1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements extra semantic analysis beyond what is enforced 10 // by the C type system. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/APValue.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/AttrIterator.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclBase.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/DeclarationName.h" 24 #include "clang/AST/EvaluatedExprVisitor.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/ExprCXX.h" 27 #include "clang/AST/ExprObjC.h" 28 #include "clang/AST/ExprOpenMP.h" 29 #include "clang/AST/FormatString.h" 30 #include "clang/AST/NSAPI.h" 31 #include "clang/AST/NonTrivialTypeVisitor.h" 32 #include "clang/AST/OperationKinds.h" 33 #include "clang/AST/RecordLayout.h" 34 #include "clang/AST/Stmt.h" 35 #include "clang/AST/TemplateBase.h" 36 #include "clang/AST/Type.h" 37 #include "clang/AST/TypeLoc.h" 38 #include "clang/AST/UnresolvedSet.h" 39 #include "clang/Basic/AddressSpaces.h" 40 #include "clang/Basic/CharInfo.h" 41 #include "clang/Basic/Diagnostic.h" 42 #include "clang/Basic/IdentifierTable.h" 43 #include "clang/Basic/LLVM.h" 44 #include "clang/Basic/LangOptions.h" 45 #include "clang/Basic/OpenCLOptions.h" 46 #include "clang/Basic/OperatorKinds.h" 47 #include "clang/Basic/PartialDiagnostic.h" 48 #include "clang/Basic/SourceLocation.h" 49 #include "clang/Basic/SourceManager.h" 50 #include "clang/Basic/Specifiers.h" 51 #include "clang/Basic/SyncScope.h" 52 #include "clang/Basic/TargetBuiltins.h" 53 #include "clang/Basic/TargetCXXABI.h" 54 #include "clang/Basic/TargetInfo.h" 55 #include "clang/Basic/TypeTraits.h" 56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 57 #include "clang/Sema/Initialization.h" 58 #include "clang/Sema/Lookup.h" 59 #include "clang/Sema/Ownership.h" 60 #include "clang/Sema/Scope.h" 61 #include "clang/Sema/ScopeInfo.h" 62 #include "clang/Sema/Sema.h" 63 #include "clang/Sema/SemaInternal.h" 64 #include "llvm/ADT/APFloat.h" 65 #include "llvm/ADT/APInt.h" 66 #include "llvm/ADT/APSInt.h" 67 #include "llvm/ADT/ArrayRef.h" 68 #include "llvm/ADT/DenseMap.h" 69 #include "llvm/ADT/FoldingSet.h" 70 #include "llvm/ADT/None.h" 71 #include "llvm/ADT/Optional.h" 72 #include "llvm/ADT/STLExtras.h" 73 #include "llvm/ADT/SmallBitVector.h" 74 #include "llvm/ADT/SmallPtrSet.h" 75 #include "llvm/ADT/SmallString.h" 76 #include "llvm/ADT/SmallVector.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/ADT/StringSet.h" 79 #include "llvm/ADT/StringSwitch.h" 80 #include "llvm/ADT/Triple.h" 81 #include "llvm/Support/AtomicOrdering.h" 82 #include "llvm/Support/Casting.h" 83 #include "llvm/Support/Compiler.h" 84 #include "llvm/Support/ConvertUTF.h" 85 #include "llvm/Support/ErrorHandling.h" 86 #include "llvm/Support/Format.h" 87 #include "llvm/Support/Locale.h" 88 #include "llvm/Support/MathExtras.h" 89 #include "llvm/Support/SaveAndRestore.h" 90 #include "llvm/Support/raw_ostream.h" 91 #include <algorithm> 92 #include <bitset> 93 #include <cassert> 94 #include <cctype> 95 #include <cstddef> 96 #include <cstdint> 97 #include <functional> 98 #include <limits> 99 #include <string> 100 #include <tuple> 101 #include <utility> 102 103 using namespace clang; 104 using namespace sema; 105 106 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 107 unsigned ByteNo) const { 108 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 109 Context.getTargetInfo()); 110 } 111 112 /// Checks that a call expression's argument count is at least the desired 113 /// number. This is useful when doing custom type-checking on a variadic 114 /// function. Returns true on error. 115 static bool checkArgCountAtLeast(Sema &S, CallExpr *Call, 116 unsigned MinArgCount) { 117 unsigned ArgCount = Call->getNumArgs(); 118 if (ArgCount >= MinArgCount) 119 return false; 120 121 return S.Diag(Call->getEndLoc(), diag::err_typecheck_call_too_few_args) 122 << 0 /*function call*/ << MinArgCount << ArgCount 123 << Call->getSourceRange(); 124 } 125 126 /// Checks that a call expression's argument count is the desired number. 127 /// This is useful when doing custom type-checking. Returns true on error. 128 static bool checkArgCount(Sema &S, CallExpr *Call, unsigned DesiredArgCount) { 129 unsigned ArgCount = Call->getNumArgs(); 130 if (ArgCount == DesiredArgCount) 131 return false; 132 133 if (checkArgCountAtLeast(S, Call, DesiredArgCount)) 134 return true; 135 assert(ArgCount > DesiredArgCount && "should have diagnosed this"); 136 137 // Highlight all the excess arguments. 138 SourceRange Range(Call->getArg(DesiredArgCount)->getBeginLoc(), 139 Call->getArg(ArgCount - 1)->getEndLoc()); 140 141 return S.Diag(Range.getBegin(), diag::err_typecheck_call_too_many_args) 142 << 0 /*function call*/ << DesiredArgCount << ArgCount 143 << Call->getArg(1)->getSourceRange(); 144 } 145 146 /// Check that the first argument to __builtin_annotation is an integer 147 /// and the second argument is a non-wide string literal. 148 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 149 if (checkArgCount(S, TheCall, 2)) 150 return true; 151 152 // First argument should be an integer. 153 Expr *ValArg = TheCall->getArg(0); 154 QualType Ty = ValArg->getType(); 155 if (!Ty->isIntegerType()) { 156 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 157 << ValArg->getSourceRange(); 158 return true; 159 } 160 161 // Second argument should be a constant string. 162 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 163 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 164 if (!Literal || !Literal->isAscii()) { 165 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 166 << StrArg->getSourceRange(); 167 return true; 168 } 169 170 TheCall->setType(Ty); 171 return false; 172 } 173 174 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 175 // We need at least one argument. 176 if (TheCall->getNumArgs() < 1) { 177 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 178 << 0 << 1 << TheCall->getNumArgs() 179 << TheCall->getCallee()->getSourceRange(); 180 return true; 181 } 182 183 // All arguments should be wide string literals. 184 for (Expr *Arg : TheCall->arguments()) { 185 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 186 if (!Literal || !Literal->isWide()) { 187 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 188 << Arg->getSourceRange(); 189 return true; 190 } 191 } 192 193 return false; 194 } 195 196 /// Check that the argument to __builtin_addressof is a glvalue, and set the 197 /// result type to the corresponding pointer type. 198 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 199 if (checkArgCount(S, TheCall, 1)) 200 return true; 201 202 ExprResult Arg(TheCall->getArg(0)); 203 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 204 if (ResultType.isNull()) 205 return true; 206 207 TheCall->setArg(0, Arg.get()); 208 TheCall->setType(ResultType); 209 return false; 210 } 211 212 /// Check that the argument to __builtin_function_start is a function. 213 static bool SemaBuiltinFunctionStart(Sema &S, CallExpr *TheCall) { 214 if (checkArgCount(S, TheCall, 1)) 215 return true; 216 217 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 218 if (Arg.isInvalid()) 219 return true; 220 221 TheCall->setArg(0, Arg.get()); 222 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>( 223 Arg.get()->getAsBuiltinConstantDeclRef(S.getASTContext())); 224 225 if (!FD) { 226 S.Diag(TheCall->getBeginLoc(), diag::err_function_start_invalid_type) 227 << TheCall->getSourceRange(); 228 return true; 229 } 230 231 return !S.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true, 232 TheCall->getBeginLoc()); 233 } 234 235 /// Check the number of arguments and set the result type to 236 /// the argument type. 237 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) { 238 if (checkArgCount(S, TheCall, 1)) 239 return true; 240 241 TheCall->setType(TheCall->getArg(0)->getType()); 242 return false; 243 } 244 245 /// Check that the value argument for __builtin_is_aligned(value, alignment) and 246 /// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer 247 /// type (but not a function pointer) and that the alignment is a power-of-two. 248 static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) { 249 if (checkArgCount(S, TheCall, 2)) 250 return true; 251 252 clang::Expr *Source = TheCall->getArg(0); 253 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned; 254 255 auto IsValidIntegerType = [](QualType Ty) { 256 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType(); 257 }; 258 QualType SrcTy = Source->getType(); 259 // We should also be able to use it with arrays (but not functions!). 260 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) { 261 SrcTy = S.Context.getDecayedType(SrcTy); 262 } 263 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) || 264 SrcTy->isFunctionPointerType()) { 265 // FIXME: this is not quite the right error message since we don't allow 266 // floating point types, or member pointers. 267 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand) 268 << SrcTy; 269 return true; 270 } 271 272 clang::Expr *AlignOp = TheCall->getArg(1); 273 if (!IsValidIntegerType(AlignOp->getType())) { 274 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int) 275 << AlignOp->getType(); 276 return true; 277 } 278 Expr::EvalResult AlignResult; 279 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1; 280 // We can't check validity of alignment if it is value dependent. 281 if (!AlignOp->isValueDependent() && 282 AlignOp->EvaluateAsInt(AlignResult, S.Context, 283 Expr::SE_AllowSideEffects)) { 284 llvm::APSInt AlignValue = AlignResult.Val.getInt(); 285 llvm::APSInt MaxValue( 286 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits)); 287 if (AlignValue < 1) { 288 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1; 289 return true; 290 } 291 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) { 292 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big) 293 << toString(MaxValue, 10); 294 return true; 295 } 296 if (!AlignValue.isPowerOf2()) { 297 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two); 298 return true; 299 } 300 if (AlignValue == 1) { 301 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless) 302 << IsBooleanAlignBuiltin; 303 } 304 } 305 306 ExprResult SrcArg = S.PerformCopyInitialization( 307 InitializedEntity::InitializeParameter(S.Context, SrcTy, false), 308 SourceLocation(), Source); 309 if (SrcArg.isInvalid()) 310 return true; 311 TheCall->setArg(0, SrcArg.get()); 312 ExprResult AlignArg = 313 S.PerformCopyInitialization(InitializedEntity::InitializeParameter( 314 S.Context, AlignOp->getType(), false), 315 SourceLocation(), AlignOp); 316 if (AlignArg.isInvalid()) 317 return true; 318 TheCall->setArg(1, AlignArg.get()); 319 // For align_up/align_down, the return type is the same as the (potentially 320 // decayed) argument type including qualifiers. For is_aligned(), the result 321 // is always bool. 322 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy); 323 return false; 324 } 325 326 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall, 327 unsigned BuiltinID) { 328 if (checkArgCount(S, TheCall, 3)) 329 return true; 330 331 // First two arguments should be integers. 332 for (unsigned I = 0; I < 2; ++I) { 333 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I)); 334 if (Arg.isInvalid()) return true; 335 TheCall->setArg(I, Arg.get()); 336 337 QualType Ty = Arg.get()->getType(); 338 if (!Ty->isIntegerType()) { 339 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 340 << Ty << Arg.get()->getSourceRange(); 341 return true; 342 } 343 } 344 345 // Third argument should be a pointer to a non-const integer. 346 // IRGen correctly handles volatile, restrict, and address spaces, and 347 // the other qualifiers aren't possible. 348 { 349 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2)); 350 if (Arg.isInvalid()) return true; 351 TheCall->setArg(2, Arg.get()); 352 353 QualType Ty = Arg.get()->getType(); 354 const auto *PtrTy = Ty->getAs<PointerType>(); 355 if (!PtrTy || 356 !PtrTy->getPointeeType()->isIntegerType() || 357 PtrTy->getPointeeType().isConstQualified()) { 358 S.Diag(Arg.get()->getBeginLoc(), 359 diag::err_overflow_builtin_must_be_ptr_int) 360 << Ty << Arg.get()->getSourceRange(); 361 return true; 362 } 363 } 364 365 // Disallow signed bit-precise integer args larger than 128 bits to mul 366 // function until we improve backend support. 367 if (BuiltinID == Builtin::BI__builtin_mul_overflow) { 368 for (unsigned I = 0; I < 3; ++I) { 369 const auto Arg = TheCall->getArg(I); 370 // Third argument will be a pointer. 371 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType(); 372 if (Ty->isBitIntType() && Ty->isSignedIntegerType() && 373 S.getASTContext().getIntWidth(Ty) > 128) 374 return S.Diag(Arg->getBeginLoc(), 375 diag::err_overflow_builtin_bit_int_max_size) 376 << 128; 377 } 378 } 379 380 return false; 381 } 382 383 namespace { 384 struct BuiltinDumpStructGenerator { 385 Sema &S; 386 CallExpr *TheCall; 387 SourceLocation Loc = TheCall->getBeginLoc(); 388 SmallVector<Expr *, 32> Actions; 389 DiagnosticErrorTrap ErrorTracker; 390 PrintingPolicy Policy; 391 392 BuiltinDumpStructGenerator(Sema &S, CallExpr *TheCall) 393 : S(S), TheCall(TheCall), ErrorTracker(S.getDiagnostics()), 394 Policy(S.Context.getPrintingPolicy()) { 395 Policy.AnonymousTagLocations = false; 396 } 397 398 Expr *makeOpaqueValueExpr(Expr *Inner) { 399 auto *OVE = new (S.Context) 400 OpaqueValueExpr(Loc, Inner->getType(), Inner->getValueKind(), 401 Inner->getObjectKind(), Inner); 402 Actions.push_back(OVE); 403 return OVE; 404 } 405 406 Expr *getStringLiteral(llvm::StringRef Str) { 407 Expr *Lit = S.Context.getPredefinedStringLiteralFromCache(Str); 408 // Wrap the literal in parentheses to attach a source location. 409 return new (S.Context) ParenExpr(Loc, Loc, Lit); 410 } 411 412 bool callPrintFunction(llvm::StringRef Format, 413 llvm::ArrayRef<Expr *> Exprs = {}) { 414 SmallVector<Expr *, 8> Args; 415 assert(TheCall->getNumArgs() >= 2); 416 Args.reserve((TheCall->getNumArgs() - 2) + /*Format*/ 1 + Exprs.size()); 417 Args.assign(TheCall->arg_begin() + 2, TheCall->arg_end()); 418 Args.push_back(getStringLiteral(Format)); 419 Args.insert(Args.end(), Exprs.begin(), Exprs.end()); 420 421 // Register a note to explain why we're performing the call. 422 Sema::CodeSynthesisContext Ctx; 423 Ctx.Kind = Sema::CodeSynthesisContext::BuildingBuiltinDumpStructCall; 424 Ctx.PointOfInstantiation = Loc; 425 Ctx.CallArgs = Args.data(); 426 Ctx.NumCallArgs = Args.size(); 427 S.pushCodeSynthesisContext(Ctx); 428 429 ExprResult RealCall = 430 S.BuildCallExpr(/*Scope=*/nullptr, TheCall->getArg(1), 431 TheCall->getBeginLoc(), Args, TheCall->getRParenLoc()); 432 433 S.popCodeSynthesisContext(); 434 if (!RealCall.isInvalid()) 435 Actions.push_back(RealCall.get()); 436 // Bail out if we've hit any errors, even if we managed to build the 437 // call. We don't want to produce more than one error. 438 return RealCall.isInvalid() || ErrorTracker.hasErrorOccurred(); 439 } 440 441 Expr *getIndentString(unsigned Depth) { 442 if (!Depth) 443 return nullptr; 444 445 llvm::SmallString<32> Indent; 446 Indent.resize(Depth * Policy.Indentation, ' '); 447 return getStringLiteral(Indent); 448 } 449 450 Expr *getTypeString(QualType T) { 451 return getStringLiteral(T.getAsString(Policy)); 452 } 453 454 bool appendFormatSpecifier(QualType T, llvm::SmallVectorImpl<char> &Str) { 455 llvm::raw_svector_ostream OS(Str); 456 457 // Format 'bool', 'char', 'signed char', 'unsigned char' as numbers, rather 458 // than trying to print a single character. 459 if (auto *BT = T->getAs<BuiltinType>()) { 460 switch (BT->getKind()) { 461 case BuiltinType::Bool: 462 OS << "%d"; 463 return true; 464 case BuiltinType::Char_U: 465 case BuiltinType::UChar: 466 OS << "%hhu"; 467 return true; 468 case BuiltinType::Char_S: 469 case BuiltinType::SChar: 470 OS << "%hhd"; 471 return true; 472 default: 473 break; 474 } 475 } 476 477 analyze_printf::PrintfSpecifier Specifier; 478 if (Specifier.fixType(T, S.getLangOpts(), S.Context, /*IsObjCLiteral=*/false)) { 479 // We were able to guess how to format this. 480 if (Specifier.getConversionSpecifier().getKind() == 481 analyze_printf::PrintfConversionSpecifier::sArg) { 482 // Wrap double-quotes around a '%s' specifier and limit its maximum 483 // length. Ideally we'd also somehow escape special characters in the 484 // contents but printf doesn't support that. 485 // FIXME: '%s' formatting is not safe in general. 486 OS << '"'; 487 Specifier.setPrecision(analyze_printf::OptionalAmount(32u)); 488 Specifier.toString(OS); 489 OS << '"'; 490 // FIXME: It would be nice to include a '...' if the string doesn't fit 491 // in the length limit. 492 } else { 493 Specifier.toString(OS); 494 } 495 return true; 496 } 497 498 if (T->isPointerType()) { 499 // Format all pointers with '%p'. 500 OS << "%p"; 501 return true; 502 } 503 504 return false; 505 } 506 507 bool dumpUnnamedRecord(const RecordDecl *RD, Expr *E, unsigned Depth) { 508 Expr *IndentLit = getIndentString(Depth); 509 Expr *TypeLit = getTypeString(S.Context.getRecordType(RD)); 510 if (IndentLit ? callPrintFunction("%s%s", {IndentLit, TypeLit}) 511 : callPrintFunction("%s", {TypeLit})) 512 return true; 513 514 return dumpRecordValue(RD, E, IndentLit, Depth); 515 } 516 517 // Dump a record value. E should be a pointer or lvalue referring to an RD. 518 bool dumpRecordValue(const RecordDecl *RD, Expr *E, Expr *RecordIndent, 519 unsigned Depth) { 520 // FIXME: Decide what to do if RD is a union. At least we should probably 521 // turn off printing `const char*` members with `%s`, because that is very 522 // likely to crash if that's not the active member. Whatever we decide, we 523 // should document it. 524 525 // Build an OpaqueValueExpr so we can refer to E more than once without 526 // triggering re-evaluation. 527 Expr *RecordArg = makeOpaqueValueExpr(E); 528 bool RecordArgIsPtr = RecordArg->getType()->isPointerType(); 529 530 if (callPrintFunction(" {\n")) 531 return true; 532 533 // Dump each base class, regardless of whether they're aggregates. 534 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 535 for (const auto &Base : CXXRD->bases()) { 536 QualType BaseType = 537 RecordArgIsPtr ? S.Context.getPointerType(Base.getType()) 538 : S.Context.getLValueReferenceType(Base.getType()); 539 ExprResult BasePtr = S.BuildCStyleCastExpr( 540 Loc, S.Context.getTrivialTypeSourceInfo(BaseType, Loc), Loc, 541 RecordArg); 542 if (BasePtr.isInvalid() || 543 dumpUnnamedRecord(Base.getType()->getAsRecordDecl(), BasePtr.get(), 544 Depth + 1)) 545 return true; 546 } 547 } 548 549 Expr *FieldIndentArg = getIndentString(Depth + 1); 550 551 // Dump each field. 552 for (auto *D : RD->decls()) { 553 auto *IFD = dyn_cast<IndirectFieldDecl>(D); 554 auto *FD = IFD ? IFD->getAnonField() : dyn_cast<FieldDecl>(D); 555 if (!FD || FD->isUnnamedBitfield() || FD->isAnonymousStructOrUnion()) 556 continue; 557 558 llvm::SmallString<20> Format = llvm::StringRef("%s%s %s "); 559 llvm::SmallVector<Expr *, 5> Args = {FieldIndentArg, 560 getTypeString(FD->getType()), 561 getStringLiteral(FD->getName())}; 562 563 if (FD->isBitField()) { 564 Format += ": %zu "; 565 QualType SizeT = S.Context.getSizeType(); 566 llvm::APInt BitWidth(S.Context.getIntWidth(SizeT), 567 FD->getBitWidthValue(S.Context)); 568 Args.push_back(IntegerLiteral::Create(S.Context, BitWidth, SizeT, Loc)); 569 } 570 571 Format += "="; 572 573 ExprResult Field = 574 IFD ? S.BuildAnonymousStructUnionMemberReference( 575 CXXScopeSpec(), Loc, IFD, 576 DeclAccessPair::make(IFD, AS_public), RecordArg, Loc) 577 : S.BuildFieldReferenceExpr( 578 RecordArg, RecordArgIsPtr, Loc, CXXScopeSpec(), FD, 579 DeclAccessPair::make(FD, AS_public), 580 DeclarationNameInfo(FD->getDeclName(), Loc)); 581 if (Field.isInvalid()) 582 return true; 583 584 auto *InnerRD = FD->getType()->getAsRecordDecl(); 585 auto *InnerCXXRD = dyn_cast_or_null<CXXRecordDecl>(InnerRD); 586 if (InnerRD && (!InnerCXXRD || InnerCXXRD->isAggregate())) { 587 // Recursively print the values of members of aggregate record type. 588 if (callPrintFunction(Format, Args) || 589 dumpRecordValue(InnerRD, Field.get(), FieldIndentArg, Depth + 1)) 590 return true; 591 } else { 592 Format += " "; 593 if (appendFormatSpecifier(FD->getType(), Format)) { 594 // We know how to print this field. 595 Args.push_back(Field.get()); 596 } else { 597 // We don't know how to print this field. Print out its address 598 // with a format specifier that a smart tool will be able to 599 // recognize and treat specially. 600 Format += "*%p"; 601 ExprResult FieldAddr = 602 S.BuildUnaryOp(nullptr, Loc, UO_AddrOf, Field.get()); 603 if (FieldAddr.isInvalid()) 604 return true; 605 Args.push_back(FieldAddr.get()); 606 } 607 Format += "\n"; 608 if (callPrintFunction(Format, Args)) 609 return true; 610 } 611 } 612 613 return RecordIndent ? callPrintFunction("%s}\n", RecordIndent) 614 : callPrintFunction("}\n"); 615 } 616 617 Expr *buildWrapper() { 618 auto *Wrapper = PseudoObjectExpr::Create(S.Context, TheCall, Actions, 619 PseudoObjectExpr::NoResult); 620 TheCall->setType(Wrapper->getType()); 621 TheCall->setValueKind(Wrapper->getValueKind()); 622 return Wrapper; 623 } 624 }; 625 } // namespace 626 627 static ExprResult SemaBuiltinDumpStruct(Sema &S, CallExpr *TheCall) { 628 if (checkArgCountAtLeast(S, TheCall, 2)) 629 return ExprError(); 630 631 ExprResult PtrArgResult = S.DefaultLvalueConversion(TheCall->getArg(0)); 632 if (PtrArgResult.isInvalid()) 633 return ExprError(); 634 TheCall->setArg(0, PtrArgResult.get()); 635 636 // First argument should be a pointer to a struct. 637 QualType PtrArgType = PtrArgResult.get()->getType(); 638 if (!PtrArgType->isPointerType() || 639 !PtrArgType->getPointeeType()->isRecordType()) { 640 S.Diag(PtrArgResult.get()->getBeginLoc(), 641 diag::err_expected_struct_pointer_argument) 642 << 1 << TheCall->getDirectCallee() << PtrArgType; 643 return ExprError(); 644 } 645 const RecordDecl *RD = PtrArgType->getPointeeType()->getAsRecordDecl(); 646 647 // Second argument is a callable, but we can't fully validate it until we try 648 // calling it. 649 QualType FnArgType = TheCall->getArg(1)->getType(); 650 if (!FnArgType->isFunctionType() && !FnArgType->isFunctionPointerType() && 651 !FnArgType->isBlockPointerType() && 652 !(S.getLangOpts().CPlusPlus && FnArgType->isRecordType())) { 653 auto *BT = FnArgType->getAs<BuiltinType>(); 654 switch (BT ? BT->getKind() : BuiltinType::Void) { 655 case BuiltinType::Dependent: 656 case BuiltinType::Overload: 657 case BuiltinType::BoundMember: 658 case BuiltinType::PseudoObject: 659 case BuiltinType::UnknownAny: 660 case BuiltinType::BuiltinFn: 661 // This might be a callable. 662 break; 663 664 default: 665 S.Diag(TheCall->getArg(1)->getBeginLoc(), 666 diag::err_expected_callable_argument) 667 << 2 << TheCall->getDirectCallee() << FnArgType; 668 return ExprError(); 669 } 670 } 671 672 BuiltinDumpStructGenerator Generator(S, TheCall); 673 674 // Wrap parentheses around the given pointer. This is not necessary for 675 // correct code generation, but it means that when we pretty-print the call 676 // arguments in our diagnostics we will produce '(&s)->n' instead of the 677 // incorrect '&s->n'. 678 Expr *PtrArg = PtrArgResult.get(); 679 PtrArg = new (S.Context) 680 ParenExpr(PtrArg->getBeginLoc(), 681 S.getLocForEndOfToken(PtrArg->getEndLoc()), PtrArg); 682 if (Generator.dumpUnnamedRecord(RD, PtrArg, 0)) 683 return ExprError(); 684 685 return Generator.buildWrapper(); 686 } 687 688 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 689 if (checkArgCount(S, BuiltinCall, 2)) 690 return true; 691 692 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 693 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 694 Expr *Call = BuiltinCall->getArg(0); 695 Expr *Chain = BuiltinCall->getArg(1); 696 697 if (Call->getStmtClass() != Stmt::CallExprClass) { 698 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 699 << Call->getSourceRange(); 700 return true; 701 } 702 703 auto CE = cast<CallExpr>(Call); 704 if (CE->getCallee()->getType()->isBlockPointerType()) { 705 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 706 << Call->getSourceRange(); 707 return true; 708 } 709 710 const Decl *TargetDecl = CE->getCalleeDecl(); 711 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 712 if (FD->getBuiltinID()) { 713 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 714 << Call->getSourceRange(); 715 return true; 716 } 717 718 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 719 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 720 << Call->getSourceRange(); 721 return true; 722 } 723 724 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 725 if (ChainResult.isInvalid()) 726 return true; 727 if (!ChainResult.get()->getType()->isPointerType()) { 728 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 729 << Chain->getSourceRange(); 730 return true; 731 } 732 733 QualType ReturnTy = CE->getCallReturnType(S.Context); 734 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 735 QualType BuiltinTy = S.Context.getFunctionType( 736 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 737 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 738 739 Builtin = 740 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 741 742 BuiltinCall->setType(CE->getType()); 743 BuiltinCall->setValueKind(CE->getValueKind()); 744 BuiltinCall->setObjectKind(CE->getObjectKind()); 745 BuiltinCall->setCallee(Builtin); 746 BuiltinCall->setArg(1, ChainResult.get()); 747 748 return false; 749 } 750 751 namespace { 752 753 class ScanfDiagnosticFormatHandler 754 : public analyze_format_string::FormatStringHandler { 755 // Accepts the argument index (relative to the first destination index) of the 756 // argument whose size we want. 757 using ComputeSizeFunction = 758 llvm::function_ref<Optional<llvm::APSInt>(unsigned)>; 759 760 // Accepts the argument index (relative to the first destination index), the 761 // destination size, and the source size). 762 using DiagnoseFunction = 763 llvm::function_ref<void(unsigned, unsigned, unsigned)>; 764 765 ComputeSizeFunction ComputeSizeArgument; 766 DiagnoseFunction Diagnose; 767 768 public: 769 ScanfDiagnosticFormatHandler(ComputeSizeFunction ComputeSizeArgument, 770 DiagnoseFunction Diagnose) 771 : ComputeSizeArgument(ComputeSizeArgument), Diagnose(Diagnose) {} 772 773 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 774 const char *StartSpecifier, 775 unsigned specifierLen) override { 776 if (!FS.consumesDataArgument()) 777 return true; 778 779 unsigned NulByte = 0; 780 switch ((FS.getConversionSpecifier().getKind())) { 781 default: 782 return true; 783 case analyze_format_string::ConversionSpecifier::sArg: 784 case analyze_format_string::ConversionSpecifier::ScanListArg: 785 NulByte = 1; 786 break; 787 case analyze_format_string::ConversionSpecifier::cArg: 788 break; 789 } 790 791 analyze_format_string::OptionalAmount FW = FS.getFieldWidth(); 792 if (FW.getHowSpecified() != 793 analyze_format_string::OptionalAmount::HowSpecified::Constant) 794 return true; 795 796 unsigned SourceSize = FW.getConstantAmount() + NulByte; 797 798 Optional<llvm::APSInt> DestSizeAPS = ComputeSizeArgument(FS.getArgIndex()); 799 if (!DestSizeAPS) 800 return true; 801 802 unsigned DestSize = DestSizeAPS->getZExtValue(); 803 804 if (DestSize < SourceSize) 805 Diagnose(FS.getArgIndex(), DestSize, SourceSize); 806 807 return true; 808 } 809 }; 810 811 class EstimateSizeFormatHandler 812 : public analyze_format_string::FormatStringHandler { 813 size_t Size; 814 815 public: 816 EstimateSizeFormatHandler(StringRef Format) 817 : Size(std::min(Format.find(0), Format.size()) + 818 1 /* null byte always written by sprintf */) {} 819 820 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 821 const char *, unsigned SpecifierLen, 822 const TargetInfo &) override { 823 824 const size_t FieldWidth = computeFieldWidth(FS); 825 const size_t Precision = computePrecision(FS); 826 827 // The actual format. 828 switch (FS.getConversionSpecifier().getKind()) { 829 // Just a char. 830 case analyze_format_string::ConversionSpecifier::cArg: 831 case analyze_format_string::ConversionSpecifier::CArg: 832 Size += std::max(FieldWidth, (size_t)1); 833 break; 834 // Just an integer. 835 case analyze_format_string::ConversionSpecifier::dArg: 836 case analyze_format_string::ConversionSpecifier::DArg: 837 case analyze_format_string::ConversionSpecifier::iArg: 838 case analyze_format_string::ConversionSpecifier::oArg: 839 case analyze_format_string::ConversionSpecifier::OArg: 840 case analyze_format_string::ConversionSpecifier::uArg: 841 case analyze_format_string::ConversionSpecifier::UArg: 842 case analyze_format_string::ConversionSpecifier::xArg: 843 case analyze_format_string::ConversionSpecifier::XArg: 844 Size += std::max(FieldWidth, Precision); 845 break; 846 847 // %g style conversion switches between %f or %e style dynamically. 848 // %f always takes less space, so default to it. 849 case analyze_format_string::ConversionSpecifier::gArg: 850 case analyze_format_string::ConversionSpecifier::GArg: 851 852 // Floating point number in the form '[+]ddd.ddd'. 853 case analyze_format_string::ConversionSpecifier::fArg: 854 case analyze_format_string::ConversionSpecifier::FArg: 855 Size += std::max(FieldWidth, 1 /* integer part */ + 856 (Precision ? 1 + Precision 857 : 0) /* period + decimal */); 858 break; 859 860 // Floating point number in the form '[-]d.ddde[+-]dd'. 861 case analyze_format_string::ConversionSpecifier::eArg: 862 case analyze_format_string::ConversionSpecifier::EArg: 863 Size += 864 std::max(FieldWidth, 865 1 /* integer part */ + 866 (Precision ? 1 + Precision : 0) /* period + decimal */ + 867 1 /* e or E letter */ + 2 /* exponent */); 868 break; 869 870 // Floating point number in the form '[-]0xh.hhhhp±dd'. 871 case analyze_format_string::ConversionSpecifier::aArg: 872 case analyze_format_string::ConversionSpecifier::AArg: 873 Size += 874 std::max(FieldWidth, 875 2 /* 0x */ + 1 /* integer part */ + 876 (Precision ? 1 + Precision : 0) /* period + decimal */ + 877 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */); 878 break; 879 880 // Just a string. 881 case analyze_format_string::ConversionSpecifier::sArg: 882 case analyze_format_string::ConversionSpecifier::SArg: 883 Size += FieldWidth; 884 break; 885 886 // Just a pointer in the form '0xddd'. 887 case analyze_format_string::ConversionSpecifier::pArg: 888 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision); 889 break; 890 891 // A plain percent. 892 case analyze_format_string::ConversionSpecifier::PercentArg: 893 Size += 1; 894 break; 895 896 default: 897 break; 898 } 899 900 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix(); 901 902 if (FS.hasAlternativeForm()) { 903 switch (FS.getConversionSpecifier().getKind()) { 904 default: 905 break; 906 // Force a leading '0'. 907 case analyze_format_string::ConversionSpecifier::oArg: 908 Size += 1; 909 break; 910 // Force a leading '0x'. 911 case analyze_format_string::ConversionSpecifier::xArg: 912 case analyze_format_string::ConversionSpecifier::XArg: 913 Size += 2; 914 break; 915 // Force a period '.' before decimal, even if precision is 0. 916 case analyze_format_string::ConversionSpecifier::aArg: 917 case analyze_format_string::ConversionSpecifier::AArg: 918 case analyze_format_string::ConversionSpecifier::eArg: 919 case analyze_format_string::ConversionSpecifier::EArg: 920 case analyze_format_string::ConversionSpecifier::fArg: 921 case analyze_format_string::ConversionSpecifier::FArg: 922 case analyze_format_string::ConversionSpecifier::gArg: 923 case analyze_format_string::ConversionSpecifier::GArg: 924 Size += (Precision ? 0 : 1); 925 break; 926 } 927 } 928 assert(SpecifierLen <= Size && "no underflow"); 929 Size -= SpecifierLen; 930 return true; 931 } 932 933 size_t getSizeLowerBound() const { return Size; } 934 935 private: 936 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) { 937 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth(); 938 size_t FieldWidth = 0; 939 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant) 940 FieldWidth = FW.getConstantAmount(); 941 return FieldWidth; 942 } 943 944 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) { 945 const analyze_format_string::OptionalAmount &FW = FS.getPrecision(); 946 size_t Precision = 0; 947 948 // See man 3 printf for default precision value based on the specifier. 949 switch (FW.getHowSpecified()) { 950 case analyze_format_string::OptionalAmount::NotSpecified: 951 switch (FS.getConversionSpecifier().getKind()) { 952 default: 953 break; 954 case analyze_format_string::ConversionSpecifier::dArg: // %d 955 case analyze_format_string::ConversionSpecifier::DArg: // %D 956 case analyze_format_string::ConversionSpecifier::iArg: // %i 957 Precision = 1; 958 break; 959 case analyze_format_string::ConversionSpecifier::oArg: // %d 960 case analyze_format_string::ConversionSpecifier::OArg: // %D 961 case analyze_format_string::ConversionSpecifier::uArg: // %d 962 case analyze_format_string::ConversionSpecifier::UArg: // %D 963 case analyze_format_string::ConversionSpecifier::xArg: // %d 964 case analyze_format_string::ConversionSpecifier::XArg: // %D 965 Precision = 1; 966 break; 967 case analyze_format_string::ConversionSpecifier::fArg: // %f 968 case analyze_format_string::ConversionSpecifier::FArg: // %F 969 case analyze_format_string::ConversionSpecifier::eArg: // %e 970 case analyze_format_string::ConversionSpecifier::EArg: // %E 971 case analyze_format_string::ConversionSpecifier::gArg: // %g 972 case analyze_format_string::ConversionSpecifier::GArg: // %G 973 Precision = 6; 974 break; 975 case analyze_format_string::ConversionSpecifier::pArg: // %d 976 Precision = 1; 977 break; 978 } 979 break; 980 case analyze_format_string::OptionalAmount::Constant: 981 Precision = FW.getConstantAmount(); 982 break; 983 default: 984 break; 985 } 986 return Precision; 987 } 988 }; 989 990 } // namespace 991 992 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, 993 CallExpr *TheCall) { 994 if (TheCall->isValueDependent() || TheCall->isTypeDependent() || 995 isConstantEvaluated()) 996 return; 997 998 bool UseDABAttr = false; 999 const FunctionDecl *UseDecl = FD; 1000 1001 const auto *DABAttr = FD->getAttr<DiagnoseAsBuiltinAttr>(); 1002 if (DABAttr) { 1003 UseDecl = DABAttr->getFunction(); 1004 assert(UseDecl && "Missing FunctionDecl in DiagnoseAsBuiltin attribute!"); 1005 UseDABAttr = true; 1006 } 1007 1008 unsigned BuiltinID = UseDecl->getBuiltinID(/*ConsiderWrappers=*/true); 1009 1010 if (!BuiltinID) 1011 return; 1012 1013 const TargetInfo &TI = getASTContext().getTargetInfo(); 1014 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType()); 1015 1016 auto TranslateIndex = [&](unsigned Index) -> Optional<unsigned> { 1017 // If we refer to a diagnose_as_builtin attribute, we need to change the 1018 // argument index to refer to the arguments of the called function. Unless 1019 // the index is out of bounds, which presumably means it's a variadic 1020 // function. 1021 if (!UseDABAttr) 1022 return Index; 1023 unsigned DABIndices = DABAttr->argIndices_size(); 1024 unsigned NewIndex = Index < DABIndices 1025 ? DABAttr->argIndices_begin()[Index] 1026 : Index - DABIndices + FD->getNumParams(); 1027 if (NewIndex >= TheCall->getNumArgs()) 1028 return llvm::None; 1029 return NewIndex; 1030 }; 1031 1032 auto ComputeExplicitObjectSizeArgument = 1033 [&](unsigned Index) -> Optional<llvm::APSInt> { 1034 Optional<unsigned> IndexOptional = TranslateIndex(Index); 1035 if (!IndexOptional) 1036 return llvm::None; 1037 unsigned NewIndex = IndexOptional.getValue(); 1038 Expr::EvalResult Result; 1039 Expr *SizeArg = TheCall->getArg(NewIndex); 1040 if (!SizeArg->EvaluateAsInt(Result, getASTContext())) 1041 return llvm::None; 1042 llvm::APSInt Integer = Result.Val.getInt(); 1043 Integer.setIsUnsigned(true); 1044 return Integer; 1045 }; 1046 1047 auto ComputeSizeArgument = [&](unsigned Index) -> Optional<llvm::APSInt> { 1048 // If the parameter has a pass_object_size attribute, then we should use its 1049 // (potentially) more strict checking mode. Otherwise, conservatively assume 1050 // type 0. 1051 int BOSType = 0; 1052 // This check can fail for variadic functions. 1053 if (Index < FD->getNumParams()) { 1054 if (const auto *POS = 1055 FD->getParamDecl(Index)->getAttr<PassObjectSizeAttr>()) 1056 BOSType = POS->getType(); 1057 } 1058 1059 Optional<unsigned> IndexOptional = TranslateIndex(Index); 1060 if (!IndexOptional) 1061 return llvm::None; 1062 unsigned NewIndex = IndexOptional.getValue(); 1063 1064 const Expr *ObjArg = TheCall->getArg(NewIndex); 1065 uint64_t Result; 1066 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType)) 1067 return llvm::None; 1068 1069 // Get the object size in the target's size_t width. 1070 return llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth); 1071 }; 1072 1073 auto ComputeStrLenArgument = [&](unsigned Index) -> Optional<llvm::APSInt> { 1074 Optional<unsigned> IndexOptional = TranslateIndex(Index); 1075 if (!IndexOptional) 1076 return llvm::None; 1077 unsigned NewIndex = IndexOptional.getValue(); 1078 1079 const Expr *ObjArg = TheCall->getArg(NewIndex); 1080 uint64_t Result; 1081 if (!ObjArg->tryEvaluateStrLen(Result, getASTContext())) 1082 return llvm::None; 1083 // Add 1 for null byte. 1084 return llvm::APSInt::getUnsigned(Result + 1).extOrTrunc(SizeTypeWidth); 1085 }; 1086 1087 Optional<llvm::APSInt> SourceSize; 1088 Optional<llvm::APSInt> DestinationSize; 1089 unsigned DiagID = 0; 1090 bool IsChkVariant = false; 1091 1092 auto GetFunctionName = [&]() { 1093 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID); 1094 // Skim off the details of whichever builtin was called to produce a better 1095 // diagnostic, as it's unlikely that the user wrote the __builtin 1096 // explicitly. 1097 if (IsChkVariant) { 1098 FunctionName = FunctionName.drop_front(std::strlen("__builtin___")); 1099 FunctionName = FunctionName.drop_back(std::strlen("_chk")); 1100 } else if (FunctionName.startswith("__builtin_")) { 1101 FunctionName = FunctionName.drop_front(std::strlen("__builtin_")); 1102 } 1103 return FunctionName; 1104 }; 1105 1106 switch (BuiltinID) { 1107 default: 1108 return; 1109 case Builtin::BI__builtin_strcpy: 1110 case Builtin::BIstrcpy: { 1111 DiagID = diag::warn_fortify_strlen_overflow; 1112 SourceSize = ComputeStrLenArgument(1); 1113 DestinationSize = ComputeSizeArgument(0); 1114 break; 1115 } 1116 1117 case Builtin::BI__builtin___strcpy_chk: { 1118 DiagID = diag::warn_fortify_strlen_overflow; 1119 SourceSize = ComputeStrLenArgument(1); 1120 DestinationSize = ComputeExplicitObjectSizeArgument(2); 1121 IsChkVariant = true; 1122 break; 1123 } 1124 1125 case Builtin::BIscanf: 1126 case Builtin::BIfscanf: 1127 case Builtin::BIsscanf: { 1128 unsigned FormatIndex = 1; 1129 unsigned DataIndex = 2; 1130 if (BuiltinID == Builtin::BIscanf) { 1131 FormatIndex = 0; 1132 DataIndex = 1; 1133 } 1134 1135 const auto *FormatExpr = 1136 TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 1137 1138 const auto *Format = dyn_cast<StringLiteral>(FormatExpr); 1139 if (!Format) 1140 return; 1141 1142 if (!Format->isAscii() && !Format->isUTF8()) 1143 return; 1144 1145 auto Diagnose = [&](unsigned ArgIndex, unsigned DestSize, 1146 unsigned SourceSize) { 1147 DiagID = diag::warn_fortify_scanf_overflow; 1148 unsigned Index = ArgIndex + DataIndex; 1149 StringRef FunctionName = GetFunctionName(); 1150 DiagRuntimeBehavior(TheCall->getArg(Index)->getBeginLoc(), TheCall, 1151 PDiag(DiagID) << FunctionName << (Index + 1) 1152 << DestSize << SourceSize); 1153 }; 1154 1155 StringRef FormatStrRef = Format->getString(); 1156 auto ShiftedComputeSizeArgument = [&](unsigned Index) { 1157 return ComputeSizeArgument(Index + DataIndex); 1158 }; 1159 ScanfDiagnosticFormatHandler H(ShiftedComputeSizeArgument, Diagnose); 1160 const char *FormatBytes = FormatStrRef.data(); 1161 const ConstantArrayType *T = 1162 Context.getAsConstantArrayType(Format->getType()); 1163 assert(T && "String literal not of constant array type!"); 1164 size_t TypeSize = T->getSize().getZExtValue(); 1165 1166 // In case there's a null byte somewhere. 1167 size_t StrLen = 1168 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 1169 1170 analyze_format_string::ParseScanfString(H, FormatBytes, 1171 FormatBytes + StrLen, getLangOpts(), 1172 Context.getTargetInfo()); 1173 1174 // Unlike the other cases, in this one we have already issued the diagnostic 1175 // here, so no need to continue (because unlike the other cases, here the 1176 // diagnostic refers to the argument number). 1177 return; 1178 } 1179 1180 case Builtin::BIsprintf: 1181 case Builtin::BI__builtin___sprintf_chk: { 1182 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3; 1183 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 1184 1185 if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) { 1186 1187 if (!Format->isAscii() && !Format->isUTF8()) 1188 return; 1189 1190 StringRef FormatStrRef = Format->getString(); 1191 EstimateSizeFormatHandler H(FormatStrRef); 1192 const char *FormatBytes = FormatStrRef.data(); 1193 const ConstantArrayType *T = 1194 Context.getAsConstantArrayType(Format->getType()); 1195 assert(T && "String literal not of constant array type!"); 1196 size_t TypeSize = T->getSize().getZExtValue(); 1197 1198 // In case there's a null byte somewhere. 1199 size_t StrLen = 1200 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 1201 if (!analyze_format_string::ParsePrintfString( 1202 H, FormatBytes, FormatBytes + StrLen, getLangOpts(), 1203 Context.getTargetInfo(), false)) { 1204 DiagID = diag::warn_fortify_source_format_overflow; 1205 SourceSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound()) 1206 .extOrTrunc(SizeTypeWidth); 1207 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) { 1208 DestinationSize = ComputeExplicitObjectSizeArgument(2); 1209 IsChkVariant = true; 1210 } else { 1211 DestinationSize = ComputeSizeArgument(0); 1212 } 1213 break; 1214 } 1215 } 1216 return; 1217 } 1218 case Builtin::BI__builtin___memcpy_chk: 1219 case Builtin::BI__builtin___memmove_chk: 1220 case Builtin::BI__builtin___memset_chk: 1221 case Builtin::BI__builtin___strlcat_chk: 1222 case Builtin::BI__builtin___strlcpy_chk: 1223 case Builtin::BI__builtin___strncat_chk: 1224 case Builtin::BI__builtin___strncpy_chk: 1225 case Builtin::BI__builtin___stpncpy_chk: 1226 case Builtin::BI__builtin___memccpy_chk: 1227 case Builtin::BI__builtin___mempcpy_chk: { 1228 DiagID = diag::warn_builtin_chk_overflow; 1229 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 2); 1230 DestinationSize = 1231 ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1232 IsChkVariant = true; 1233 break; 1234 } 1235 1236 case Builtin::BI__builtin___snprintf_chk: 1237 case Builtin::BI__builtin___vsnprintf_chk: { 1238 DiagID = diag::warn_builtin_chk_overflow; 1239 SourceSize = ComputeExplicitObjectSizeArgument(1); 1240 DestinationSize = ComputeExplicitObjectSizeArgument(3); 1241 IsChkVariant = true; 1242 break; 1243 } 1244 1245 case Builtin::BIstrncat: 1246 case Builtin::BI__builtin_strncat: 1247 case Builtin::BIstrncpy: 1248 case Builtin::BI__builtin_strncpy: 1249 case Builtin::BIstpncpy: 1250 case Builtin::BI__builtin_stpncpy: { 1251 // Whether these functions overflow depends on the runtime strlen of the 1252 // string, not just the buffer size, so emitting the "always overflow" 1253 // diagnostic isn't quite right. We should still diagnose passing a buffer 1254 // size larger than the destination buffer though; this is a runtime abort 1255 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise. 1256 DiagID = diag::warn_fortify_source_size_mismatch; 1257 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1258 DestinationSize = ComputeSizeArgument(0); 1259 break; 1260 } 1261 1262 case Builtin::BImemcpy: 1263 case Builtin::BI__builtin_memcpy: 1264 case Builtin::BImemmove: 1265 case Builtin::BI__builtin_memmove: 1266 case Builtin::BImemset: 1267 case Builtin::BI__builtin_memset: 1268 case Builtin::BImempcpy: 1269 case Builtin::BI__builtin_mempcpy: { 1270 DiagID = diag::warn_fortify_source_overflow; 1271 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1272 DestinationSize = ComputeSizeArgument(0); 1273 break; 1274 } 1275 case Builtin::BIsnprintf: 1276 case Builtin::BI__builtin_snprintf: 1277 case Builtin::BIvsnprintf: 1278 case Builtin::BI__builtin_vsnprintf: { 1279 DiagID = diag::warn_fortify_source_size_mismatch; 1280 SourceSize = ComputeExplicitObjectSizeArgument(1); 1281 DestinationSize = ComputeSizeArgument(0); 1282 break; 1283 } 1284 } 1285 1286 if (!SourceSize || !DestinationSize || 1287 llvm::APSInt::compareValues(SourceSize.getValue(), 1288 DestinationSize.getValue()) <= 0) 1289 return; 1290 1291 StringRef FunctionName = GetFunctionName(); 1292 1293 SmallString<16> DestinationStr; 1294 SmallString<16> SourceStr; 1295 DestinationSize->toString(DestinationStr, /*Radix=*/10); 1296 SourceSize->toString(SourceStr, /*Radix=*/10); 1297 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 1298 PDiag(DiagID) 1299 << FunctionName << DestinationStr << SourceStr); 1300 } 1301 1302 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 1303 Scope::ScopeFlags NeededScopeFlags, 1304 unsigned DiagID) { 1305 // Scopes aren't available during instantiation. Fortunately, builtin 1306 // functions cannot be template args so they cannot be formed through template 1307 // instantiation. Therefore checking once during the parse is sufficient. 1308 if (SemaRef.inTemplateInstantiation()) 1309 return false; 1310 1311 Scope *S = SemaRef.getCurScope(); 1312 while (S && !S->isSEHExceptScope()) 1313 S = S->getParent(); 1314 if (!S || !(S->getFlags() & NeededScopeFlags)) { 1315 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 1316 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 1317 << DRE->getDecl()->getIdentifier(); 1318 return true; 1319 } 1320 1321 return false; 1322 } 1323 1324 static inline bool isBlockPointer(Expr *Arg) { 1325 return Arg->getType()->isBlockPointerType(); 1326 } 1327 1328 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 1329 /// void*, which is a requirement of device side enqueue. 1330 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 1331 const BlockPointerType *BPT = 1332 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1333 ArrayRef<QualType> Params = 1334 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes(); 1335 unsigned ArgCounter = 0; 1336 bool IllegalParams = false; 1337 // Iterate through the block parameters until either one is found that is not 1338 // a local void*, or the block is valid. 1339 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 1340 I != E; ++I, ++ArgCounter) { 1341 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 1342 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 1343 LangAS::opencl_local) { 1344 // Get the location of the error. If a block literal has been passed 1345 // (BlockExpr) then we can point straight to the offending argument, 1346 // else we just point to the variable reference. 1347 SourceLocation ErrorLoc; 1348 if (isa<BlockExpr>(BlockArg)) { 1349 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 1350 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 1351 } else if (isa<DeclRefExpr>(BlockArg)) { 1352 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 1353 } 1354 S.Diag(ErrorLoc, 1355 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 1356 IllegalParams = true; 1357 } 1358 } 1359 1360 return IllegalParams; 1361 } 1362 1363 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 1364 // OpenCL device can support extension but not the feature as extension 1365 // requires subgroup independent forward progress, but subgroup independent 1366 // forward progress is optional in OpenCL C 3.0 __opencl_c_subgroups feature. 1367 if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts()) && 1368 !S.getOpenCLOptions().isSupported("__opencl_c_subgroups", 1369 S.getLangOpts())) { 1370 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 1371 << 1 << Call->getDirectCallee() 1372 << "cl_khr_subgroups or __opencl_c_subgroups"; 1373 return true; 1374 } 1375 return false; 1376 } 1377 1378 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 1379 if (checkArgCount(S, TheCall, 2)) 1380 return true; 1381 1382 if (checkOpenCLSubgroupExt(S, TheCall)) 1383 return true; 1384 1385 // First argument is an ndrange_t type. 1386 Expr *NDRangeArg = TheCall->getArg(0); 1387 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1388 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1389 << TheCall->getDirectCallee() << "'ndrange_t'"; 1390 return true; 1391 } 1392 1393 Expr *BlockArg = TheCall->getArg(1); 1394 if (!isBlockPointer(BlockArg)) { 1395 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1396 << TheCall->getDirectCallee() << "block"; 1397 return true; 1398 } 1399 return checkOpenCLBlockArgs(S, BlockArg); 1400 } 1401 1402 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 1403 /// get_kernel_work_group_size 1404 /// and get_kernel_preferred_work_group_size_multiple builtin functions. 1405 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 1406 if (checkArgCount(S, TheCall, 1)) 1407 return true; 1408 1409 Expr *BlockArg = TheCall->getArg(0); 1410 if (!isBlockPointer(BlockArg)) { 1411 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1412 << TheCall->getDirectCallee() << "block"; 1413 return true; 1414 } 1415 return checkOpenCLBlockArgs(S, BlockArg); 1416 } 1417 1418 /// Diagnose integer type and any valid implicit conversion to it. 1419 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 1420 const QualType &IntType); 1421 1422 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 1423 unsigned Start, unsigned End) { 1424 bool IllegalParams = false; 1425 for (unsigned I = Start; I <= End; ++I) 1426 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 1427 S.Context.getSizeType()); 1428 return IllegalParams; 1429 } 1430 1431 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 1432 /// 'local void*' parameter of passed block. 1433 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 1434 Expr *BlockArg, 1435 unsigned NumNonVarArgs) { 1436 const BlockPointerType *BPT = 1437 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1438 unsigned NumBlockParams = 1439 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams(); 1440 unsigned TotalNumArgs = TheCall->getNumArgs(); 1441 1442 // For each argument passed to the block, a corresponding uint needs to 1443 // be passed to describe the size of the local memory. 1444 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 1445 S.Diag(TheCall->getBeginLoc(), 1446 diag::err_opencl_enqueue_kernel_local_size_args); 1447 return true; 1448 } 1449 1450 // Check that the sizes of the local memory are specified by integers. 1451 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 1452 TotalNumArgs - 1); 1453 } 1454 1455 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 1456 /// overload formats specified in Table 6.13.17.1. 1457 /// int enqueue_kernel(queue_t queue, 1458 /// kernel_enqueue_flags_t flags, 1459 /// const ndrange_t ndrange, 1460 /// void (^block)(void)) 1461 /// int enqueue_kernel(queue_t queue, 1462 /// kernel_enqueue_flags_t flags, 1463 /// const ndrange_t ndrange, 1464 /// uint num_events_in_wait_list, 1465 /// clk_event_t *event_wait_list, 1466 /// clk_event_t *event_ret, 1467 /// void (^block)(void)) 1468 /// int enqueue_kernel(queue_t queue, 1469 /// kernel_enqueue_flags_t flags, 1470 /// const ndrange_t ndrange, 1471 /// void (^block)(local void*, ...), 1472 /// uint size0, ...) 1473 /// int enqueue_kernel(queue_t queue, 1474 /// kernel_enqueue_flags_t flags, 1475 /// const ndrange_t ndrange, 1476 /// uint num_events_in_wait_list, 1477 /// clk_event_t *event_wait_list, 1478 /// clk_event_t *event_ret, 1479 /// void (^block)(local void*, ...), 1480 /// uint size0, ...) 1481 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 1482 unsigned NumArgs = TheCall->getNumArgs(); 1483 1484 if (NumArgs < 4) { 1485 S.Diag(TheCall->getBeginLoc(), 1486 diag::err_typecheck_call_too_few_args_at_least) 1487 << 0 << 4 << NumArgs; 1488 return true; 1489 } 1490 1491 Expr *Arg0 = TheCall->getArg(0); 1492 Expr *Arg1 = TheCall->getArg(1); 1493 Expr *Arg2 = TheCall->getArg(2); 1494 Expr *Arg3 = TheCall->getArg(3); 1495 1496 // First argument always needs to be a queue_t type. 1497 if (!Arg0->getType()->isQueueT()) { 1498 S.Diag(TheCall->getArg(0)->getBeginLoc(), 1499 diag::err_opencl_builtin_expected_type) 1500 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 1501 return true; 1502 } 1503 1504 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 1505 if (!Arg1->getType()->isIntegerType()) { 1506 S.Diag(TheCall->getArg(1)->getBeginLoc(), 1507 diag::err_opencl_builtin_expected_type) 1508 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 1509 return true; 1510 } 1511 1512 // Third argument is always an ndrange_t type. 1513 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1514 S.Diag(TheCall->getArg(2)->getBeginLoc(), 1515 diag::err_opencl_builtin_expected_type) 1516 << TheCall->getDirectCallee() << "'ndrange_t'"; 1517 return true; 1518 } 1519 1520 // With four arguments, there is only one form that the function could be 1521 // called in: no events and no variable arguments. 1522 if (NumArgs == 4) { 1523 // check that the last argument is the right block type. 1524 if (!isBlockPointer(Arg3)) { 1525 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1526 << TheCall->getDirectCallee() << "block"; 1527 return true; 1528 } 1529 // we have a block type, check the prototype 1530 const BlockPointerType *BPT = 1531 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 1532 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) { 1533 S.Diag(Arg3->getBeginLoc(), 1534 diag::err_opencl_enqueue_kernel_blocks_no_args); 1535 return true; 1536 } 1537 return false; 1538 } 1539 // we can have block + varargs. 1540 if (isBlockPointer(Arg3)) 1541 return (checkOpenCLBlockArgs(S, Arg3) || 1542 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 1543 // last two cases with either exactly 7 args or 7 args and varargs. 1544 if (NumArgs >= 7) { 1545 // check common block argument. 1546 Expr *Arg6 = TheCall->getArg(6); 1547 if (!isBlockPointer(Arg6)) { 1548 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1549 << TheCall->getDirectCallee() << "block"; 1550 return true; 1551 } 1552 if (checkOpenCLBlockArgs(S, Arg6)) 1553 return true; 1554 1555 // Forth argument has to be any integer type. 1556 if (!Arg3->getType()->isIntegerType()) { 1557 S.Diag(TheCall->getArg(3)->getBeginLoc(), 1558 diag::err_opencl_builtin_expected_type) 1559 << TheCall->getDirectCallee() << "integer"; 1560 return true; 1561 } 1562 // check remaining common arguments. 1563 Expr *Arg4 = TheCall->getArg(4); 1564 Expr *Arg5 = TheCall->getArg(5); 1565 1566 // Fifth argument is always passed as a pointer to clk_event_t. 1567 if (!Arg4->isNullPointerConstant(S.Context, 1568 Expr::NPC_ValueDependentIsNotNull) && 1569 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 1570 S.Diag(TheCall->getArg(4)->getBeginLoc(), 1571 diag::err_opencl_builtin_expected_type) 1572 << TheCall->getDirectCallee() 1573 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1574 return true; 1575 } 1576 1577 // Sixth argument is always passed as a pointer to clk_event_t. 1578 if (!Arg5->isNullPointerConstant(S.Context, 1579 Expr::NPC_ValueDependentIsNotNull) && 1580 !(Arg5->getType()->isPointerType() && 1581 Arg5->getType()->getPointeeType()->isClkEventT())) { 1582 S.Diag(TheCall->getArg(5)->getBeginLoc(), 1583 diag::err_opencl_builtin_expected_type) 1584 << TheCall->getDirectCallee() 1585 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1586 return true; 1587 } 1588 1589 if (NumArgs == 7) 1590 return false; 1591 1592 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 1593 } 1594 1595 // None of the specific case has been detected, give generic error 1596 S.Diag(TheCall->getBeginLoc(), 1597 diag::err_opencl_enqueue_kernel_incorrect_args); 1598 return true; 1599 } 1600 1601 /// Returns OpenCL access qual. 1602 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 1603 return D->getAttr<OpenCLAccessAttr>(); 1604 } 1605 1606 /// Returns true if pipe element type is different from the pointer. 1607 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 1608 const Expr *Arg0 = Call->getArg(0); 1609 // First argument type should always be pipe. 1610 if (!Arg0->getType()->isPipeType()) { 1611 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1612 << Call->getDirectCallee() << Arg0->getSourceRange(); 1613 return true; 1614 } 1615 OpenCLAccessAttr *AccessQual = 1616 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 1617 // Validates the access qualifier is compatible with the call. 1618 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 1619 // read_only and write_only, and assumed to be read_only if no qualifier is 1620 // specified. 1621 switch (Call->getDirectCallee()->getBuiltinID()) { 1622 case Builtin::BIread_pipe: 1623 case Builtin::BIreserve_read_pipe: 1624 case Builtin::BIcommit_read_pipe: 1625 case Builtin::BIwork_group_reserve_read_pipe: 1626 case Builtin::BIsub_group_reserve_read_pipe: 1627 case Builtin::BIwork_group_commit_read_pipe: 1628 case Builtin::BIsub_group_commit_read_pipe: 1629 if (!(!AccessQual || AccessQual->isReadOnly())) { 1630 S.Diag(Arg0->getBeginLoc(), 1631 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1632 << "read_only" << Arg0->getSourceRange(); 1633 return true; 1634 } 1635 break; 1636 case Builtin::BIwrite_pipe: 1637 case Builtin::BIreserve_write_pipe: 1638 case Builtin::BIcommit_write_pipe: 1639 case Builtin::BIwork_group_reserve_write_pipe: 1640 case Builtin::BIsub_group_reserve_write_pipe: 1641 case Builtin::BIwork_group_commit_write_pipe: 1642 case Builtin::BIsub_group_commit_write_pipe: 1643 if (!(AccessQual && AccessQual->isWriteOnly())) { 1644 S.Diag(Arg0->getBeginLoc(), 1645 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1646 << "write_only" << Arg0->getSourceRange(); 1647 return true; 1648 } 1649 break; 1650 default: 1651 break; 1652 } 1653 return false; 1654 } 1655 1656 /// Returns true if pipe element type is different from the pointer. 1657 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 1658 const Expr *Arg0 = Call->getArg(0); 1659 const Expr *ArgIdx = Call->getArg(Idx); 1660 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 1661 const QualType EltTy = PipeTy->getElementType(); 1662 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 1663 // The Idx argument should be a pointer and the type of the pointer and 1664 // the type of pipe element should also be the same. 1665 if (!ArgTy || 1666 !S.Context.hasSameType( 1667 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 1668 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1669 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 1670 << ArgIdx->getType() << ArgIdx->getSourceRange(); 1671 return true; 1672 } 1673 return false; 1674 } 1675 1676 // Performs semantic analysis for the read/write_pipe call. 1677 // \param S Reference to the semantic analyzer. 1678 // \param Call A pointer to the builtin call. 1679 // \return True if a semantic error has been found, false otherwise. 1680 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 1681 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 1682 // functions have two forms. 1683 switch (Call->getNumArgs()) { 1684 case 2: 1685 if (checkOpenCLPipeArg(S, Call)) 1686 return true; 1687 // The call with 2 arguments should be 1688 // read/write_pipe(pipe T, T*). 1689 // Check packet type T. 1690 if (checkOpenCLPipePacketType(S, Call, 1)) 1691 return true; 1692 break; 1693 1694 case 4: { 1695 if (checkOpenCLPipeArg(S, Call)) 1696 return true; 1697 // The call with 4 arguments should be 1698 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 1699 // Check reserve_id_t. 1700 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1701 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1702 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1703 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1704 return true; 1705 } 1706 1707 // Check the index. 1708 const Expr *Arg2 = Call->getArg(2); 1709 if (!Arg2->getType()->isIntegerType() && 1710 !Arg2->getType()->isUnsignedIntegerType()) { 1711 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1712 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1713 << Arg2->getType() << Arg2->getSourceRange(); 1714 return true; 1715 } 1716 1717 // Check packet type T. 1718 if (checkOpenCLPipePacketType(S, Call, 3)) 1719 return true; 1720 } break; 1721 default: 1722 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 1723 << Call->getDirectCallee() << Call->getSourceRange(); 1724 return true; 1725 } 1726 1727 return false; 1728 } 1729 1730 // Performs a semantic analysis on the {work_group_/sub_group_ 1731 // /_}reserve_{read/write}_pipe 1732 // \param S Reference to the semantic analyzer. 1733 // \param Call The call to the builtin function to be analyzed. 1734 // \return True if a semantic error was found, false otherwise. 1735 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 1736 if (checkArgCount(S, Call, 2)) 1737 return true; 1738 1739 if (checkOpenCLPipeArg(S, Call)) 1740 return true; 1741 1742 // Check the reserve size. 1743 if (!Call->getArg(1)->getType()->isIntegerType() && 1744 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 1745 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1746 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1747 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1748 return true; 1749 } 1750 1751 // Since return type of reserve_read/write_pipe built-in function is 1752 // reserve_id_t, which is not defined in the builtin def file , we used int 1753 // as return type and need to override the return type of these functions. 1754 Call->setType(S.Context.OCLReserveIDTy); 1755 1756 return false; 1757 } 1758 1759 // Performs a semantic analysis on {work_group_/sub_group_ 1760 // /_}commit_{read/write}_pipe 1761 // \param S Reference to the semantic analyzer. 1762 // \param Call The call to the builtin function to be analyzed. 1763 // \return True if a semantic error was found, false otherwise. 1764 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 1765 if (checkArgCount(S, Call, 2)) 1766 return true; 1767 1768 if (checkOpenCLPipeArg(S, Call)) 1769 return true; 1770 1771 // Check reserve_id_t. 1772 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1773 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1774 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1775 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1776 return true; 1777 } 1778 1779 return false; 1780 } 1781 1782 // Performs a semantic analysis on the call to built-in Pipe 1783 // Query Functions. 1784 // \param S Reference to the semantic analyzer. 1785 // \param Call The call to the builtin function to be analyzed. 1786 // \return True if a semantic error was found, false otherwise. 1787 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 1788 if (checkArgCount(S, Call, 1)) 1789 return true; 1790 1791 if (!Call->getArg(0)->getType()->isPipeType()) { 1792 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1793 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 1794 return true; 1795 } 1796 1797 return false; 1798 } 1799 1800 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. 1801 // Performs semantic analysis for the to_global/local/private call. 1802 // \param S Reference to the semantic analyzer. 1803 // \param BuiltinID ID of the builtin function. 1804 // \param Call A pointer to the builtin call. 1805 // \return True if a semantic error has been found, false otherwise. 1806 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 1807 CallExpr *Call) { 1808 if (checkArgCount(S, Call, 1)) 1809 return true; 1810 1811 auto RT = Call->getArg(0)->getType(); 1812 if (!RT->isPointerType() || RT->getPointeeType() 1813 .getAddressSpace() == LangAS::opencl_constant) { 1814 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 1815 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 1816 return true; 1817 } 1818 1819 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 1820 S.Diag(Call->getArg(0)->getBeginLoc(), 1821 diag::warn_opencl_generic_address_space_arg) 1822 << Call->getDirectCallee()->getNameInfo().getAsString() 1823 << Call->getArg(0)->getSourceRange(); 1824 } 1825 1826 RT = RT->getPointeeType(); 1827 auto Qual = RT.getQualifiers(); 1828 switch (BuiltinID) { 1829 case Builtin::BIto_global: 1830 Qual.setAddressSpace(LangAS::opencl_global); 1831 break; 1832 case Builtin::BIto_local: 1833 Qual.setAddressSpace(LangAS::opencl_local); 1834 break; 1835 case Builtin::BIto_private: 1836 Qual.setAddressSpace(LangAS::opencl_private); 1837 break; 1838 default: 1839 llvm_unreachable("Invalid builtin function"); 1840 } 1841 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 1842 RT.getUnqualifiedType(), Qual))); 1843 1844 return false; 1845 } 1846 1847 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { 1848 if (checkArgCount(S, TheCall, 1)) 1849 return ExprError(); 1850 1851 // Compute __builtin_launder's parameter type from the argument. 1852 // The parameter type is: 1853 // * The type of the argument if it's not an array or function type, 1854 // Otherwise, 1855 // * The decayed argument type. 1856 QualType ParamTy = [&]() { 1857 QualType ArgTy = TheCall->getArg(0)->getType(); 1858 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe()) 1859 return S.Context.getPointerType(Ty->getElementType()); 1860 if (ArgTy->isFunctionType()) { 1861 return S.Context.getPointerType(ArgTy); 1862 } 1863 return ArgTy; 1864 }(); 1865 1866 TheCall->setType(ParamTy); 1867 1868 auto DiagSelect = [&]() -> llvm::Optional<unsigned> { 1869 if (!ParamTy->isPointerType()) 1870 return 0; 1871 if (ParamTy->isFunctionPointerType()) 1872 return 1; 1873 if (ParamTy->isVoidPointerType()) 1874 return 2; 1875 return llvm::Optional<unsigned>{}; 1876 }(); 1877 if (DiagSelect.hasValue()) { 1878 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) 1879 << DiagSelect.getValue() << TheCall->getSourceRange(); 1880 return ExprError(); 1881 } 1882 1883 // We either have an incomplete class type, or we have a class template 1884 // whose instantiation has not been forced. Example: 1885 // 1886 // template <class T> struct Foo { T value; }; 1887 // Foo<int> *p = nullptr; 1888 // auto *d = __builtin_launder(p); 1889 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(), 1890 diag::err_incomplete_type)) 1891 return ExprError(); 1892 1893 assert(ParamTy->getPointeeType()->isObjectType() && 1894 "Unhandled non-object pointer case"); 1895 1896 InitializedEntity Entity = 1897 InitializedEntity::InitializeParameter(S.Context, ParamTy, false); 1898 ExprResult Arg = 1899 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0)); 1900 if (Arg.isInvalid()) 1901 return ExprError(); 1902 TheCall->setArg(0, Arg.get()); 1903 1904 return TheCall; 1905 } 1906 1907 // Emit an error and return true if the current object format type is in the 1908 // list of unsupported types. 1909 static bool CheckBuiltinTargetNotInUnsupported( 1910 Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1911 ArrayRef<llvm::Triple::ObjectFormatType> UnsupportedObjectFormatTypes) { 1912 llvm::Triple::ObjectFormatType CurObjFormat = 1913 S.getASTContext().getTargetInfo().getTriple().getObjectFormat(); 1914 if (llvm::is_contained(UnsupportedObjectFormatTypes, CurObjFormat)) { 1915 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1916 << TheCall->getSourceRange(); 1917 return true; 1918 } 1919 return false; 1920 } 1921 1922 // Emit an error and return true if the current architecture is not in the list 1923 // of supported architectures. 1924 static bool 1925 CheckBuiltinTargetInSupported(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1926 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 1927 llvm::Triple::ArchType CurArch = 1928 S.getASTContext().getTargetInfo().getTriple().getArch(); 1929 if (llvm::is_contained(SupportedArchs, CurArch)) 1930 return false; 1931 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1932 << TheCall->getSourceRange(); 1933 return true; 1934 } 1935 1936 static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr, 1937 SourceLocation CallSiteLoc); 1938 1939 bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 1940 CallExpr *TheCall) { 1941 switch (TI.getTriple().getArch()) { 1942 default: 1943 // Some builtins don't require additional checking, so just consider these 1944 // acceptable. 1945 return false; 1946 case llvm::Triple::arm: 1947 case llvm::Triple::armeb: 1948 case llvm::Triple::thumb: 1949 case llvm::Triple::thumbeb: 1950 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall); 1951 case llvm::Triple::aarch64: 1952 case llvm::Triple::aarch64_32: 1953 case llvm::Triple::aarch64_be: 1954 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall); 1955 case llvm::Triple::bpfeb: 1956 case llvm::Triple::bpfel: 1957 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall); 1958 case llvm::Triple::hexagon: 1959 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall); 1960 case llvm::Triple::mips: 1961 case llvm::Triple::mipsel: 1962 case llvm::Triple::mips64: 1963 case llvm::Triple::mips64el: 1964 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall); 1965 case llvm::Triple::systemz: 1966 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall); 1967 case llvm::Triple::x86: 1968 case llvm::Triple::x86_64: 1969 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall); 1970 case llvm::Triple::ppc: 1971 case llvm::Triple::ppcle: 1972 case llvm::Triple::ppc64: 1973 case llvm::Triple::ppc64le: 1974 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall); 1975 case llvm::Triple::amdgcn: 1976 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall); 1977 case llvm::Triple::riscv32: 1978 case llvm::Triple::riscv64: 1979 return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall); 1980 } 1981 } 1982 1983 ExprResult 1984 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 1985 CallExpr *TheCall) { 1986 ExprResult TheCallResult(TheCall); 1987 1988 // Find out if any arguments are required to be integer constant expressions. 1989 unsigned ICEArguments = 0; 1990 ASTContext::GetBuiltinTypeError Error; 1991 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 1992 if (Error != ASTContext::GE_None) 1993 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 1994 1995 // If any arguments are required to be ICE's, check and diagnose. 1996 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 1997 // Skip arguments not required to be ICE's. 1998 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 1999 2000 llvm::APSInt Result; 2001 // If we don't have enough arguments, continue so we can issue better 2002 // diagnostic in checkArgCount(...) 2003 if (ArgNo < TheCall->getNumArgs() && 2004 SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 2005 return true; 2006 ICEArguments &= ~(1 << ArgNo); 2007 } 2008 2009 switch (BuiltinID) { 2010 case Builtin::BI__builtin___CFStringMakeConstantString: 2011 // CFStringMakeConstantString is currently not implemented for GOFF (i.e., 2012 // on z/OS) and for XCOFF (i.e., on AIX). Emit unsupported 2013 if (CheckBuiltinTargetNotInUnsupported( 2014 *this, BuiltinID, TheCall, 2015 {llvm::Triple::GOFF, llvm::Triple::XCOFF})) 2016 return ExprError(); 2017 assert(TheCall->getNumArgs() == 1 && 2018 "Wrong # arguments to builtin CFStringMakeConstantString"); 2019 if (CheckObjCString(TheCall->getArg(0))) 2020 return ExprError(); 2021 break; 2022 case Builtin::BI__builtin_ms_va_start: 2023 case Builtin::BI__builtin_stdarg_start: 2024 case Builtin::BI__builtin_va_start: 2025 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 2026 return ExprError(); 2027 break; 2028 case Builtin::BI__va_start: { 2029 switch (Context.getTargetInfo().getTriple().getArch()) { 2030 case llvm::Triple::aarch64: 2031 case llvm::Triple::arm: 2032 case llvm::Triple::thumb: 2033 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 2034 return ExprError(); 2035 break; 2036 default: 2037 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 2038 return ExprError(); 2039 break; 2040 } 2041 break; 2042 } 2043 2044 // The acquire, release, and no fence variants are ARM and AArch64 only. 2045 case Builtin::BI_interlockedbittestandset_acq: 2046 case Builtin::BI_interlockedbittestandset_rel: 2047 case Builtin::BI_interlockedbittestandset_nf: 2048 case Builtin::BI_interlockedbittestandreset_acq: 2049 case Builtin::BI_interlockedbittestandreset_rel: 2050 case Builtin::BI_interlockedbittestandreset_nf: 2051 if (CheckBuiltinTargetInSupported( 2052 *this, BuiltinID, TheCall, 2053 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 2054 return ExprError(); 2055 break; 2056 2057 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 2058 case Builtin::BI_bittest64: 2059 case Builtin::BI_bittestandcomplement64: 2060 case Builtin::BI_bittestandreset64: 2061 case Builtin::BI_bittestandset64: 2062 case Builtin::BI_interlockedbittestandreset64: 2063 case Builtin::BI_interlockedbittestandset64: 2064 if (CheckBuiltinTargetInSupported(*this, BuiltinID, TheCall, 2065 {llvm::Triple::x86_64, llvm::Triple::arm, 2066 llvm::Triple::thumb, 2067 llvm::Triple::aarch64})) 2068 return ExprError(); 2069 break; 2070 2071 case Builtin::BI__builtin_isgreater: 2072 case Builtin::BI__builtin_isgreaterequal: 2073 case Builtin::BI__builtin_isless: 2074 case Builtin::BI__builtin_islessequal: 2075 case Builtin::BI__builtin_islessgreater: 2076 case Builtin::BI__builtin_isunordered: 2077 if (SemaBuiltinUnorderedCompare(TheCall)) 2078 return ExprError(); 2079 break; 2080 case Builtin::BI__builtin_fpclassify: 2081 if (SemaBuiltinFPClassification(TheCall, 6)) 2082 return ExprError(); 2083 break; 2084 case Builtin::BI__builtin_isfinite: 2085 case Builtin::BI__builtin_isinf: 2086 case Builtin::BI__builtin_isinf_sign: 2087 case Builtin::BI__builtin_isnan: 2088 case Builtin::BI__builtin_isnormal: 2089 case Builtin::BI__builtin_signbit: 2090 case Builtin::BI__builtin_signbitf: 2091 case Builtin::BI__builtin_signbitl: 2092 if (SemaBuiltinFPClassification(TheCall, 1)) 2093 return ExprError(); 2094 break; 2095 case Builtin::BI__builtin_shufflevector: 2096 return SemaBuiltinShuffleVector(TheCall); 2097 // TheCall will be freed by the smart pointer here, but that's fine, since 2098 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 2099 case Builtin::BI__builtin_prefetch: 2100 if (SemaBuiltinPrefetch(TheCall)) 2101 return ExprError(); 2102 break; 2103 case Builtin::BI__builtin_alloca_with_align: 2104 case Builtin::BI__builtin_alloca_with_align_uninitialized: 2105 if (SemaBuiltinAllocaWithAlign(TheCall)) 2106 return ExprError(); 2107 LLVM_FALLTHROUGH; 2108 case Builtin::BI__builtin_alloca: 2109 case Builtin::BI__builtin_alloca_uninitialized: 2110 Diag(TheCall->getBeginLoc(), diag::warn_alloca) 2111 << TheCall->getDirectCallee(); 2112 break; 2113 case Builtin::BI__arithmetic_fence: 2114 if (SemaBuiltinArithmeticFence(TheCall)) 2115 return ExprError(); 2116 break; 2117 case Builtin::BI__assume: 2118 case Builtin::BI__builtin_assume: 2119 if (SemaBuiltinAssume(TheCall)) 2120 return ExprError(); 2121 break; 2122 case Builtin::BI__builtin_assume_aligned: 2123 if (SemaBuiltinAssumeAligned(TheCall)) 2124 return ExprError(); 2125 break; 2126 case Builtin::BI__builtin_dynamic_object_size: 2127 case Builtin::BI__builtin_object_size: 2128 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 2129 return ExprError(); 2130 break; 2131 case Builtin::BI__builtin_longjmp: 2132 if (SemaBuiltinLongjmp(TheCall)) 2133 return ExprError(); 2134 break; 2135 case Builtin::BI__builtin_setjmp: 2136 if (SemaBuiltinSetjmp(TheCall)) 2137 return ExprError(); 2138 break; 2139 case Builtin::BI__builtin_classify_type: 2140 if (checkArgCount(*this, TheCall, 1)) return true; 2141 TheCall->setType(Context.IntTy); 2142 break; 2143 case Builtin::BI__builtin_complex: 2144 if (SemaBuiltinComplex(TheCall)) 2145 return ExprError(); 2146 break; 2147 case Builtin::BI__builtin_constant_p: { 2148 if (checkArgCount(*this, TheCall, 1)) return true; 2149 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 2150 if (Arg.isInvalid()) return true; 2151 TheCall->setArg(0, Arg.get()); 2152 TheCall->setType(Context.IntTy); 2153 break; 2154 } 2155 case Builtin::BI__builtin_launder: 2156 return SemaBuiltinLaunder(*this, TheCall); 2157 case Builtin::BI__sync_fetch_and_add: 2158 case Builtin::BI__sync_fetch_and_add_1: 2159 case Builtin::BI__sync_fetch_and_add_2: 2160 case Builtin::BI__sync_fetch_and_add_4: 2161 case Builtin::BI__sync_fetch_and_add_8: 2162 case Builtin::BI__sync_fetch_and_add_16: 2163 case Builtin::BI__sync_fetch_and_sub: 2164 case Builtin::BI__sync_fetch_and_sub_1: 2165 case Builtin::BI__sync_fetch_and_sub_2: 2166 case Builtin::BI__sync_fetch_and_sub_4: 2167 case Builtin::BI__sync_fetch_and_sub_8: 2168 case Builtin::BI__sync_fetch_and_sub_16: 2169 case Builtin::BI__sync_fetch_and_or: 2170 case Builtin::BI__sync_fetch_and_or_1: 2171 case Builtin::BI__sync_fetch_and_or_2: 2172 case Builtin::BI__sync_fetch_and_or_4: 2173 case Builtin::BI__sync_fetch_and_or_8: 2174 case Builtin::BI__sync_fetch_and_or_16: 2175 case Builtin::BI__sync_fetch_and_and: 2176 case Builtin::BI__sync_fetch_and_and_1: 2177 case Builtin::BI__sync_fetch_and_and_2: 2178 case Builtin::BI__sync_fetch_and_and_4: 2179 case Builtin::BI__sync_fetch_and_and_8: 2180 case Builtin::BI__sync_fetch_and_and_16: 2181 case Builtin::BI__sync_fetch_and_xor: 2182 case Builtin::BI__sync_fetch_and_xor_1: 2183 case Builtin::BI__sync_fetch_and_xor_2: 2184 case Builtin::BI__sync_fetch_and_xor_4: 2185 case Builtin::BI__sync_fetch_and_xor_8: 2186 case Builtin::BI__sync_fetch_and_xor_16: 2187 case Builtin::BI__sync_fetch_and_nand: 2188 case Builtin::BI__sync_fetch_and_nand_1: 2189 case Builtin::BI__sync_fetch_and_nand_2: 2190 case Builtin::BI__sync_fetch_and_nand_4: 2191 case Builtin::BI__sync_fetch_and_nand_8: 2192 case Builtin::BI__sync_fetch_and_nand_16: 2193 case Builtin::BI__sync_add_and_fetch: 2194 case Builtin::BI__sync_add_and_fetch_1: 2195 case Builtin::BI__sync_add_and_fetch_2: 2196 case Builtin::BI__sync_add_and_fetch_4: 2197 case Builtin::BI__sync_add_and_fetch_8: 2198 case Builtin::BI__sync_add_and_fetch_16: 2199 case Builtin::BI__sync_sub_and_fetch: 2200 case Builtin::BI__sync_sub_and_fetch_1: 2201 case Builtin::BI__sync_sub_and_fetch_2: 2202 case Builtin::BI__sync_sub_and_fetch_4: 2203 case Builtin::BI__sync_sub_and_fetch_8: 2204 case Builtin::BI__sync_sub_and_fetch_16: 2205 case Builtin::BI__sync_and_and_fetch: 2206 case Builtin::BI__sync_and_and_fetch_1: 2207 case Builtin::BI__sync_and_and_fetch_2: 2208 case Builtin::BI__sync_and_and_fetch_4: 2209 case Builtin::BI__sync_and_and_fetch_8: 2210 case Builtin::BI__sync_and_and_fetch_16: 2211 case Builtin::BI__sync_or_and_fetch: 2212 case Builtin::BI__sync_or_and_fetch_1: 2213 case Builtin::BI__sync_or_and_fetch_2: 2214 case Builtin::BI__sync_or_and_fetch_4: 2215 case Builtin::BI__sync_or_and_fetch_8: 2216 case Builtin::BI__sync_or_and_fetch_16: 2217 case Builtin::BI__sync_xor_and_fetch: 2218 case Builtin::BI__sync_xor_and_fetch_1: 2219 case Builtin::BI__sync_xor_and_fetch_2: 2220 case Builtin::BI__sync_xor_and_fetch_4: 2221 case Builtin::BI__sync_xor_and_fetch_8: 2222 case Builtin::BI__sync_xor_and_fetch_16: 2223 case Builtin::BI__sync_nand_and_fetch: 2224 case Builtin::BI__sync_nand_and_fetch_1: 2225 case Builtin::BI__sync_nand_and_fetch_2: 2226 case Builtin::BI__sync_nand_and_fetch_4: 2227 case Builtin::BI__sync_nand_and_fetch_8: 2228 case Builtin::BI__sync_nand_and_fetch_16: 2229 case Builtin::BI__sync_val_compare_and_swap: 2230 case Builtin::BI__sync_val_compare_and_swap_1: 2231 case Builtin::BI__sync_val_compare_and_swap_2: 2232 case Builtin::BI__sync_val_compare_and_swap_4: 2233 case Builtin::BI__sync_val_compare_and_swap_8: 2234 case Builtin::BI__sync_val_compare_and_swap_16: 2235 case Builtin::BI__sync_bool_compare_and_swap: 2236 case Builtin::BI__sync_bool_compare_and_swap_1: 2237 case Builtin::BI__sync_bool_compare_and_swap_2: 2238 case Builtin::BI__sync_bool_compare_and_swap_4: 2239 case Builtin::BI__sync_bool_compare_and_swap_8: 2240 case Builtin::BI__sync_bool_compare_and_swap_16: 2241 case Builtin::BI__sync_lock_test_and_set: 2242 case Builtin::BI__sync_lock_test_and_set_1: 2243 case Builtin::BI__sync_lock_test_and_set_2: 2244 case Builtin::BI__sync_lock_test_and_set_4: 2245 case Builtin::BI__sync_lock_test_and_set_8: 2246 case Builtin::BI__sync_lock_test_and_set_16: 2247 case Builtin::BI__sync_lock_release: 2248 case Builtin::BI__sync_lock_release_1: 2249 case Builtin::BI__sync_lock_release_2: 2250 case Builtin::BI__sync_lock_release_4: 2251 case Builtin::BI__sync_lock_release_8: 2252 case Builtin::BI__sync_lock_release_16: 2253 case Builtin::BI__sync_swap: 2254 case Builtin::BI__sync_swap_1: 2255 case Builtin::BI__sync_swap_2: 2256 case Builtin::BI__sync_swap_4: 2257 case Builtin::BI__sync_swap_8: 2258 case Builtin::BI__sync_swap_16: 2259 return SemaBuiltinAtomicOverloaded(TheCallResult); 2260 case Builtin::BI__sync_synchronize: 2261 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 2262 << TheCall->getCallee()->getSourceRange(); 2263 break; 2264 case Builtin::BI__builtin_nontemporal_load: 2265 case Builtin::BI__builtin_nontemporal_store: 2266 return SemaBuiltinNontemporalOverloaded(TheCallResult); 2267 case Builtin::BI__builtin_memcpy_inline: { 2268 clang::Expr *SizeOp = TheCall->getArg(2); 2269 // We warn about copying to or from `nullptr` pointers when `size` is 2270 // greater than 0. When `size` is value dependent we cannot evaluate its 2271 // value so we bail out. 2272 if (SizeOp->isValueDependent()) 2273 break; 2274 if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) { 2275 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 2276 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc()); 2277 } 2278 break; 2279 } 2280 case Builtin::BI__builtin_memset_inline: { 2281 clang::Expr *SizeOp = TheCall->getArg(2); 2282 // We warn about filling to `nullptr` pointers when `size` is greater than 2283 // 0. When `size` is value dependent we cannot evaluate its value so we bail 2284 // out. 2285 if (SizeOp->isValueDependent()) 2286 break; 2287 if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) 2288 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 2289 break; 2290 } 2291 #define BUILTIN(ID, TYPE, ATTRS) 2292 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 2293 case Builtin::BI##ID: \ 2294 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 2295 #include "clang/Basic/Builtins.def" 2296 case Builtin::BI__annotation: 2297 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 2298 return ExprError(); 2299 break; 2300 case Builtin::BI__builtin_annotation: 2301 if (SemaBuiltinAnnotation(*this, TheCall)) 2302 return ExprError(); 2303 break; 2304 case Builtin::BI__builtin_addressof: 2305 if (SemaBuiltinAddressof(*this, TheCall)) 2306 return ExprError(); 2307 break; 2308 case Builtin::BI__builtin_function_start: 2309 if (SemaBuiltinFunctionStart(*this, TheCall)) 2310 return ExprError(); 2311 break; 2312 case Builtin::BI__builtin_is_aligned: 2313 case Builtin::BI__builtin_align_up: 2314 case Builtin::BI__builtin_align_down: 2315 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID)) 2316 return ExprError(); 2317 break; 2318 case Builtin::BI__builtin_add_overflow: 2319 case Builtin::BI__builtin_sub_overflow: 2320 case Builtin::BI__builtin_mul_overflow: 2321 if (SemaBuiltinOverflow(*this, TheCall, BuiltinID)) 2322 return ExprError(); 2323 break; 2324 case Builtin::BI__builtin_operator_new: 2325 case Builtin::BI__builtin_operator_delete: { 2326 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 2327 ExprResult Res = 2328 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 2329 if (Res.isInvalid()) 2330 CorrectDelayedTyposInExpr(TheCallResult.get()); 2331 return Res; 2332 } 2333 case Builtin::BI__builtin_dump_struct: 2334 return SemaBuiltinDumpStruct(*this, TheCall); 2335 case Builtin::BI__builtin_expect_with_probability: { 2336 // We first want to ensure we are called with 3 arguments 2337 if (checkArgCount(*this, TheCall, 3)) 2338 return ExprError(); 2339 // then check probability is constant float in range [0.0, 1.0] 2340 const Expr *ProbArg = TheCall->getArg(2); 2341 SmallVector<PartialDiagnosticAt, 8> Notes; 2342 Expr::EvalResult Eval; 2343 Eval.Diag = &Notes; 2344 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) || 2345 !Eval.Val.isFloat()) { 2346 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float) 2347 << ProbArg->getSourceRange(); 2348 for (const PartialDiagnosticAt &PDiag : Notes) 2349 Diag(PDiag.first, PDiag.second); 2350 return ExprError(); 2351 } 2352 llvm::APFloat Probability = Eval.Val.getFloat(); 2353 bool LoseInfo = false; 2354 Probability.convert(llvm::APFloat::IEEEdouble(), 2355 llvm::RoundingMode::Dynamic, &LoseInfo); 2356 if (!(Probability >= llvm::APFloat(0.0) && 2357 Probability <= llvm::APFloat(1.0))) { 2358 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range) 2359 << ProbArg->getSourceRange(); 2360 return ExprError(); 2361 } 2362 break; 2363 } 2364 case Builtin::BI__builtin_preserve_access_index: 2365 if (SemaBuiltinPreserveAI(*this, TheCall)) 2366 return ExprError(); 2367 break; 2368 case Builtin::BI__builtin_call_with_static_chain: 2369 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 2370 return ExprError(); 2371 break; 2372 case Builtin::BI__exception_code: 2373 case Builtin::BI_exception_code: 2374 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 2375 diag::err_seh___except_block)) 2376 return ExprError(); 2377 break; 2378 case Builtin::BI__exception_info: 2379 case Builtin::BI_exception_info: 2380 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 2381 diag::err_seh___except_filter)) 2382 return ExprError(); 2383 break; 2384 case Builtin::BI__GetExceptionInfo: 2385 if (checkArgCount(*this, TheCall, 1)) 2386 return ExprError(); 2387 2388 if (CheckCXXThrowOperand( 2389 TheCall->getBeginLoc(), 2390 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 2391 TheCall)) 2392 return ExprError(); 2393 2394 TheCall->setType(Context.VoidPtrTy); 2395 break; 2396 case Builtin::BIaddressof: 2397 case Builtin::BI__addressof: 2398 case Builtin::BIforward: 2399 case Builtin::BImove: 2400 case Builtin::BImove_if_noexcept: 2401 case Builtin::BIas_const: { 2402 // These are all expected to be of the form 2403 // T &/&&/* f(U &/&&) 2404 // where T and U only differ in qualification. 2405 if (checkArgCount(*this, TheCall, 1)) 2406 return ExprError(); 2407 QualType Param = FDecl->getParamDecl(0)->getType(); 2408 QualType Result = FDecl->getReturnType(); 2409 bool ReturnsPointer = BuiltinID == Builtin::BIaddressof || 2410 BuiltinID == Builtin::BI__addressof; 2411 if (!(Param->isReferenceType() && 2412 (ReturnsPointer ? Result->isPointerType() 2413 : Result->isReferenceType()) && 2414 Context.hasSameUnqualifiedType(Param->getPointeeType(), 2415 Result->getPointeeType()))) { 2416 Diag(TheCall->getBeginLoc(), diag::err_builtin_move_forward_unsupported) 2417 << FDecl; 2418 return ExprError(); 2419 } 2420 break; 2421 } 2422 // OpenCL v2.0, s6.13.16 - Pipe functions 2423 case Builtin::BIread_pipe: 2424 case Builtin::BIwrite_pipe: 2425 // Since those two functions are declared with var args, we need a semantic 2426 // check for the argument. 2427 if (SemaBuiltinRWPipe(*this, TheCall)) 2428 return ExprError(); 2429 break; 2430 case Builtin::BIreserve_read_pipe: 2431 case Builtin::BIreserve_write_pipe: 2432 case Builtin::BIwork_group_reserve_read_pipe: 2433 case Builtin::BIwork_group_reserve_write_pipe: 2434 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 2435 return ExprError(); 2436 break; 2437 case Builtin::BIsub_group_reserve_read_pipe: 2438 case Builtin::BIsub_group_reserve_write_pipe: 2439 if (checkOpenCLSubgroupExt(*this, TheCall) || 2440 SemaBuiltinReserveRWPipe(*this, TheCall)) 2441 return ExprError(); 2442 break; 2443 case Builtin::BIcommit_read_pipe: 2444 case Builtin::BIcommit_write_pipe: 2445 case Builtin::BIwork_group_commit_read_pipe: 2446 case Builtin::BIwork_group_commit_write_pipe: 2447 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 2448 return ExprError(); 2449 break; 2450 case Builtin::BIsub_group_commit_read_pipe: 2451 case Builtin::BIsub_group_commit_write_pipe: 2452 if (checkOpenCLSubgroupExt(*this, TheCall) || 2453 SemaBuiltinCommitRWPipe(*this, TheCall)) 2454 return ExprError(); 2455 break; 2456 case Builtin::BIget_pipe_num_packets: 2457 case Builtin::BIget_pipe_max_packets: 2458 if (SemaBuiltinPipePackets(*this, TheCall)) 2459 return ExprError(); 2460 break; 2461 case Builtin::BIto_global: 2462 case Builtin::BIto_local: 2463 case Builtin::BIto_private: 2464 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 2465 return ExprError(); 2466 break; 2467 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 2468 case Builtin::BIenqueue_kernel: 2469 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 2470 return ExprError(); 2471 break; 2472 case Builtin::BIget_kernel_work_group_size: 2473 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 2474 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 2475 return ExprError(); 2476 break; 2477 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 2478 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 2479 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 2480 return ExprError(); 2481 break; 2482 case Builtin::BI__builtin_os_log_format: 2483 Cleanup.setExprNeedsCleanups(true); 2484 LLVM_FALLTHROUGH; 2485 case Builtin::BI__builtin_os_log_format_buffer_size: 2486 if (SemaBuiltinOSLogFormat(TheCall)) 2487 return ExprError(); 2488 break; 2489 case Builtin::BI__builtin_frame_address: 2490 case Builtin::BI__builtin_return_address: { 2491 if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF)) 2492 return ExprError(); 2493 2494 // -Wframe-address warning if non-zero passed to builtin 2495 // return/frame address. 2496 Expr::EvalResult Result; 2497 if (!TheCall->getArg(0)->isValueDependent() && 2498 TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) && 2499 Result.Val.getInt() != 0) 2500 Diag(TheCall->getBeginLoc(), diag::warn_frame_address) 2501 << ((BuiltinID == Builtin::BI__builtin_return_address) 2502 ? "__builtin_return_address" 2503 : "__builtin_frame_address") 2504 << TheCall->getSourceRange(); 2505 break; 2506 } 2507 2508 // __builtin_elementwise_abs restricts the element type to signed integers or 2509 // floating point types only. 2510 case Builtin::BI__builtin_elementwise_abs: { 2511 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2512 return ExprError(); 2513 2514 QualType ArgTy = TheCall->getArg(0)->getType(); 2515 QualType EltTy = ArgTy; 2516 2517 if (auto *VecTy = EltTy->getAs<VectorType>()) 2518 EltTy = VecTy->getElementType(); 2519 if (EltTy->isUnsignedIntegerType()) { 2520 Diag(TheCall->getArg(0)->getBeginLoc(), 2521 diag::err_builtin_invalid_arg_type) 2522 << 1 << /* signed integer or float ty*/ 3 << ArgTy; 2523 return ExprError(); 2524 } 2525 break; 2526 } 2527 2528 // These builtins restrict the element type to floating point 2529 // types only. 2530 case Builtin::BI__builtin_elementwise_ceil: 2531 case Builtin::BI__builtin_elementwise_floor: 2532 case Builtin::BI__builtin_elementwise_roundeven: 2533 case Builtin::BI__builtin_elementwise_trunc: { 2534 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2535 return ExprError(); 2536 2537 QualType ArgTy = TheCall->getArg(0)->getType(); 2538 QualType EltTy = ArgTy; 2539 2540 if (auto *VecTy = EltTy->getAs<VectorType>()) 2541 EltTy = VecTy->getElementType(); 2542 if (!EltTy->isFloatingType()) { 2543 Diag(TheCall->getArg(0)->getBeginLoc(), 2544 diag::err_builtin_invalid_arg_type) 2545 << 1 << /* float ty*/ 5 << ArgTy; 2546 2547 return ExprError(); 2548 } 2549 break; 2550 } 2551 2552 // These builtins restrict the element type to integer 2553 // types only. 2554 case Builtin::BI__builtin_elementwise_add_sat: 2555 case Builtin::BI__builtin_elementwise_sub_sat: { 2556 if (SemaBuiltinElementwiseMath(TheCall)) 2557 return ExprError(); 2558 2559 const Expr *Arg = TheCall->getArg(0); 2560 QualType ArgTy = Arg->getType(); 2561 QualType EltTy = ArgTy; 2562 2563 if (auto *VecTy = EltTy->getAs<VectorType>()) 2564 EltTy = VecTy->getElementType(); 2565 2566 if (!EltTy->isIntegerType()) { 2567 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2568 << 1 << /* integer ty */ 6 << ArgTy; 2569 return ExprError(); 2570 } 2571 break; 2572 } 2573 2574 case Builtin::BI__builtin_elementwise_min: 2575 case Builtin::BI__builtin_elementwise_max: 2576 if (SemaBuiltinElementwiseMath(TheCall)) 2577 return ExprError(); 2578 break; 2579 case Builtin::BI__builtin_reduce_max: 2580 case Builtin::BI__builtin_reduce_min: { 2581 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2582 return ExprError(); 2583 2584 const Expr *Arg = TheCall->getArg(0); 2585 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2586 if (!TyA) { 2587 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2588 << 1 << /* vector ty*/ 4 << Arg->getType(); 2589 return ExprError(); 2590 } 2591 2592 TheCall->setType(TyA->getElementType()); 2593 break; 2594 } 2595 2596 // These builtins support vectors of integers only. 2597 // TODO: ADD/MUL should support floating-point types. 2598 case Builtin::BI__builtin_reduce_add: 2599 case Builtin::BI__builtin_reduce_mul: 2600 case Builtin::BI__builtin_reduce_xor: 2601 case Builtin::BI__builtin_reduce_or: 2602 case Builtin::BI__builtin_reduce_and: { 2603 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2604 return ExprError(); 2605 2606 const Expr *Arg = TheCall->getArg(0); 2607 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2608 if (!TyA || !TyA->getElementType()->isIntegerType()) { 2609 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2610 << 1 << /* vector of integers */ 6 << Arg->getType(); 2611 return ExprError(); 2612 } 2613 TheCall->setType(TyA->getElementType()); 2614 break; 2615 } 2616 2617 case Builtin::BI__builtin_matrix_transpose: 2618 return SemaBuiltinMatrixTranspose(TheCall, TheCallResult); 2619 2620 case Builtin::BI__builtin_matrix_column_major_load: 2621 return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult); 2622 2623 case Builtin::BI__builtin_matrix_column_major_store: 2624 return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult); 2625 2626 case Builtin::BI__builtin_get_device_side_mangled_name: { 2627 auto Check = [](CallExpr *TheCall) { 2628 if (TheCall->getNumArgs() != 1) 2629 return false; 2630 auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts()); 2631 if (!DRE) 2632 return false; 2633 auto *D = DRE->getDecl(); 2634 if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D)) 2635 return false; 2636 return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() || 2637 D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>(); 2638 }; 2639 if (!Check(TheCall)) { 2640 Diag(TheCall->getBeginLoc(), 2641 diag::err_hip_invalid_args_builtin_mangled_name); 2642 return ExprError(); 2643 } 2644 } 2645 } 2646 2647 // Since the target specific builtins for each arch overlap, only check those 2648 // of the arch we are compiling for. 2649 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 2650 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) { 2651 assert(Context.getAuxTargetInfo() && 2652 "Aux Target Builtin, but not an aux target?"); 2653 2654 if (CheckTSBuiltinFunctionCall( 2655 *Context.getAuxTargetInfo(), 2656 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall)) 2657 return ExprError(); 2658 } else { 2659 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID, 2660 TheCall)) 2661 return ExprError(); 2662 } 2663 } 2664 2665 return TheCallResult; 2666 } 2667 2668 // Get the valid immediate range for the specified NEON type code. 2669 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 2670 NeonTypeFlags Type(t); 2671 int IsQuad = ForceQuad ? true : Type.isQuad(); 2672 switch (Type.getEltType()) { 2673 case NeonTypeFlags::Int8: 2674 case NeonTypeFlags::Poly8: 2675 return shift ? 7 : (8 << IsQuad) - 1; 2676 case NeonTypeFlags::Int16: 2677 case NeonTypeFlags::Poly16: 2678 return shift ? 15 : (4 << IsQuad) - 1; 2679 case NeonTypeFlags::Int32: 2680 return shift ? 31 : (2 << IsQuad) - 1; 2681 case NeonTypeFlags::Int64: 2682 case NeonTypeFlags::Poly64: 2683 return shift ? 63 : (1 << IsQuad) - 1; 2684 case NeonTypeFlags::Poly128: 2685 return shift ? 127 : (1 << IsQuad) - 1; 2686 case NeonTypeFlags::Float16: 2687 assert(!shift && "cannot shift float types!"); 2688 return (4 << IsQuad) - 1; 2689 case NeonTypeFlags::Float32: 2690 assert(!shift && "cannot shift float types!"); 2691 return (2 << IsQuad) - 1; 2692 case NeonTypeFlags::Float64: 2693 assert(!shift && "cannot shift float types!"); 2694 return (1 << IsQuad) - 1; 2695 case NeonTypeFlags::BFloat16: 2696 assert(!shift && "cannot shift float types!"); 2697 return (4 << IsQuad) - 1; 2698 } 2699 llvm_unreachable("Invalid NeonTypeFlag!"); 2700 } 2701 2702 /// getNeonEltType - Return the QualType corresponding to the elements of 2703 /// the vector type specified by the NeonTypeFlags. This is used to check 2704 /// the pointer arguments for Neon load/store intrinsics. 2705 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 2706 bool IsPolyUnsigned, bool IsInt64Long) { 2707 switch (Flags.getEltType()) { 2708 case NeonTypeFlags::Int8: 2709 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 2710 case NeonTypeFlags::Int16: 2711 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 2712 case NeonTypeFlags::Int32: 2713 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 2714 case NeonTypeFlags::Int64: 2715 if (IsInt64Long) 2716 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 2717 else 2718 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 2719 : Context.LongLongTy; 2720 case NeonTypeFlags::Poly8: 2721 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 2722 case NeonTypeFlags::Poly16: 2723 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 2724 case NeonTypeFlags::Poly64: 2725 if (IsInt64Long) 2726 return Context.UnsignedLongTy; 2727 else 2728 return Context.UnsignedLongLongTy; 2729 case NeonTypeFlags::Poly128: 2730 break; 2731 case NeonTypeFlags::Float16: 2732 return Context.HalfTy; 2733 case NeonTypeFlags::Float32: 2734 return Context.FloatTy; 2735 case NeonTypeFlags::Float64: 2736 return Context.DoubleTy; 2737 case NeonTypeFlags::BFloat16: 2738 return Context.BFloat16Ty; 2739 } 2740 llvm_unreachable("Invalid NeonTypeFlag!"); 2741 } 2742 2743 bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2744 // Range check SVE intrinsics that take immediate values. 2745 SmallVector<std::tuple<int,int,int>, 3> ImmChecks; 2746 2747 switch (BuiltinID) { 2748 default: 2749 return false; 2750 #define GET_SVE_IMMEDIATE_CHECK 2751 #include "clang/Basic/arm_sve_sema_rangechecks.inc" 2752 #undef GET_SVE_IMMEDIATE_CHECK 2753 } 2754 2755 // Perform all the immediate checks for this builtin call. 2756 bool HasError = false; 2757 for (auto &I : ImmChecks) { 2758 int ArgNum, CheckTy, ElementSizeInBits; 2759 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I; 2760 2761 typedef bool(*OptionSetCheckFnTy)(int64_t Value); 2762 2763 // Function that checks whether the operand (ArgNum) is an immediate 2764 // that is one of the predefined values. 2765 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm, 2766 int ErrDiag) -> bool { 2767 // We can't check the value of a dependent argument. 2768 Expr *Arg = TheCall->getArg(ArgNum); 2769 if (Arg->isTypeDependent() || Arg->isValueDependent()) 2770 return false; 2771 2772 // Check constant-ness first. 2773 llvm::APSInt Imm; 2774 if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm)) 2775 return true; 2776 2777 if (!CheckImm(Imm.getSExtValue())) 2778 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange(); 2779 return false; 2780 }; 2781 2782 switch ((SVETypeFlags::ImmCheckType)CheckTy) { 2783 case SVETypeFlags::ImmCheck0_31: 2784 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31)) 2785 HasError = true; 2786 break; 2787 case SVETypeFlags::ImmCheck0_13: 2788 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13)) 2789 HasError = true; 2790 break; 2791 case SVETypeFlags::ImmCheck1_16: 2792 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16)) 2793 HasError = true; 2794 break; 2795 case SVETypeFlags::ImmCheck0_7: 2796 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7)) 2797 HasError = true; 2798 break; 2799 case SVETypeFlags::ImmCheckExtract: 2800 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2801 (2048 / ElementSizeInBits) - 1)) 2802 HasError = true; 2803 break; 2804 case SVETypeFlags::ImmCheckShiftRight: 2805 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits)) 2806 HasError = true; 2807 break; 2808 case SVETypeFlags::ImmCheckShiftRightNarrow: 2809 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 2810 ElementSizeInBits / 2)) 2811 HasError = true; 2812 break; 2813 case SVETypeFlags::ImmCheckShiftLeft: 2814 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2815 ElementSizeInBits - 1)) 2816 HasError = true; 2817 break; 2818 case SVETypeFlags::ImmCheckLaneIndex: 2819 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2820 (128 / (1 * ElementSizeInBits)) - 1)) 2821 HasError = true; 2822 break; 2823 case SVETypeFlags::ImmCheckLaneIndexCompRotate: 2824 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2825 (128 / (2 * ElementSizeInBits)) - 1)) 2826 HasError = true; 2827 break; 2828 case SVETypeFlags::ImmCheckLaneIndexDot: 2829 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2830 (128 / (4 * ElementSizeInBits)) - 1)) 2831 HasError = true; 2832 break; 2833 case SVETypeFlags::ImmCheckComplexRot90_270: 2834 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; }, 2835 diag::err_rotation_argument_to_cadd)) 2836 HasError = true; 2837 break; 2838 case SVETypeFlags::ImmCheckComplexRotAll90: 2839 if (CheckImmediateInSet( 2840 [](int64_t V) { 2841 return V == 0 || V == 90 || V == 180 || V == 270; 2842 }, 2843 diag::err_rotation_argument_to_cmla)) 2844 HasError = true; 2845 break; 2846 case SVETypeFlags::ImmCheck0_1: 2847 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1)) 2848 HasError = true; 2849 break; 2850 case SVETypeFlags::ImmCheck0_2: 2851 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2)) 2852 HasError = true; 2853 break; 2854 case SVETypeFlags::ImmCheck0_3: 2855 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3)) 2856 HasError = true; 2857 break; 2858 } 2859 } 2860 2861 return HasError; 2862 } 2863 2864 bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI, 2865 unsigned BuiltinID, CallExpr *TheCall) { 2866 llvm::APSInt Result; 2867 uint64_t mask = 0; 2868 unsigned TV = 0; 2869 int PtrArgNum = -1; 2870 bool HasConstPtr = false; 2871 switch (BuiltinID) { 2872 #define GET_NEON_OVERLOAD_CHECK 2873 #include "clang/Basic/arm_neon.inc" 2874 #include "clang/Basic/arm_fp16.inc" 2875 #undef GET_NEON_OVERLOAD_CHECK 2876 } 2877 2878 // For NEON intrinsics which are overloaded on vector element type, validate 2879 // the immediate which specifies which variant to emit. 2880 unsigned ImmArg = TheCall->getNumArgs()-1; 2881 if (mask) { 2882 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 2883 return true; 2884 2885 TV = Result.getLimitedValue(64); 2886 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 2887 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 2888 << TheCall->getArg(ImmArg)->getSourceRange(); 2889 } 2890 2891 if (PtrArgNum >= 0) { 2892 // Check that pointer arguments have the specified type. 2893 Expr *Arg = TheCall->getArg(PtrArgNum); 2894 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 2895 Arg = ICE->getSubExpr(); 2896 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 2897 QualType RHSTy = RHS.get()->getType(); 2898 2899 llvm::Triple::ArchType Arch = TI.getTriple().getArch(); 2900 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 2901 Arch == llvm::Triple::aarch64_32 || 2902 Arch == llvm::Triple::aarch64_be; 2903 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong; 2904 QualType EltTy = 2905 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 2906 if (HasConstPtr) 2907 EltTy = EltTy.withConst(); 2908 QualType LHSTy = Context.getPointerType(EltTy); 2909 AssignConvertType ConvTy; 2910 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 2911 if (RHS.isInvalid()) 2912 return true; 2913 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 2914 RHS.get(), AA_Assigning)) 2915 return true; 2916 } 2917 2918 // For NEON intrinsics which take an immediate value as part of the 2919 // instruction, range check them here. 2920 unsigned i = 0, l = 0, u = 0; 2921 switch (BuiltinID) { 2922 default: 2923 return false; 2924 #define GET_NEON_IMMEDIATE_CHECK 2925 #include "clang/Basic/arm_neon.inc" 2926 #include "clang/Basic/arm_fp16.inc" 2927 #undef GET_NEON_IMMEDIATE_CHECK 2928 } 2929 2930 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2931 } 2932 2933 bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2934 switch (BuiltinID) { 2935 default: 2936 return false; 2937 #include "clang/Basic/arm_mve_builtin_sema.inc" 2938 } 2939 } 2940 2941 bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2942 CallExpr *TheCall) { 2943 bool Err = false; 2944 switch (BuiltinID) { 2945 default: 2946 return false; 2947 #include "clang/Basic/arm_cde_builtin_sema.inc" 2948 } 2949 2950 if (Err) 2951 return true; 2952 2953 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true); 2954 } 2955 2956 bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI, 2957 const Expr *CoprocArg, bool WantCDE) { 2958 if (isConstantEvaluated()) 2959 return false; 2960 2961 // We can't check the value of a dependent argument. 2962 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent()) 2963 return false; 2964 2965 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context); 2966 int64_t CoprocNo = CoprocNoAP.getExtValue(); 2967 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative"); 2968 2969 uint32_t CDECoprocMask = TI.getARMCDECoprocMask(); 2970 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo)); 2971 2972 if (IsCDECoproc != WantCDE) 2973 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc) 2974 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange(); 2975 2976 return false; 2977 } 2978 2979 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 2980 unsigned MaxWidth) { 2981 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 2982 BuiltinID == ARM::BI__builtin_arm_ldaex || 2983 BuiltinID == ARM::BI__builtin_arm_strex || 2984 BuiltinID == ARM::BI__builtin_arm_stlex || 2985 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2986 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2987 BuiltinID == AArch64::BI__builtin_arm_strex || 2988 BuiltinID == AArch64::BI__builtin_arm_stlex) && 2989 "unexpected ARM builtin"); 2990 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 2991 BuiltinID == ARM::BI__builtin_arm_ldaex || 2992 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2993 BuiltinID == AArch64::BI__builtin_arm_ldaex; 2994 2995 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 2996 2997 // Ensure that we have the proper number of arguments. 2998 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 2999 return true; 3000 3001 // Inspect the pointer argument of the atomic builtin. This should always be 3002 // a pointer type, whose element is an integral scalar or pointer type. 3003 // Because it is a pointer type, we don't have to worry about any implicit 3004 // casts here. 3005 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 3006 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 3007 if (PointerArgRes.isInvalid()) 3008 return true; 3009 PointerArg = PointerArgRes.get(); 3010 3011 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 3012 if (!pointerType) { 3013 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 3014 << PointerArg->getType() << PointerArg->getSourceRange(); 3015 return true; 3016 } 3017 3018 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 3019 // task is to insert the appropriate casts into the AST. First work out just 3020 // what the appropriate type is. 3021 QualType ValType = pointerType->getPointeeType(); 3022 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 3023 if (IsLdrex) 3024 AddrType.addConst(); 3025 3026 // Issue a warning if the cast is dodgy. 3027 CastKind CastNeeded = CK_NoOp; 3028 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 3029 CastNeeded = CK_BitCast; 3030 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 3031 << PointerArg->getType() << Context.getPointerType(AddrType) 3032 << AA_Passing << PointerArg->getSourceRange(); 3033 } 3034 3035 // Finally, do the cast and replace the argument with the corrected version. 3036 AddrType = Context.getPointerType(AddrType); 3037 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 3038 if (PointerArgRes.isInvalid()) 3039 return true; 3040 PointerArg = PointerArgRes.get(); 3041 3042 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 3043 3044 // In general, we allow ints, floats and pointers to be loaded and stored. 3045 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 3046 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 3047 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 3048 << PointerArg->getType() << PointerArg->getSourceRange(); 3049 return true; 3050 } 3051 3052 // But ARM doesn't have instructions to deal with 128-bit versions. 3053 if (Context.getTypeSize(ValType) > MaxWidth) { 3054 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 3055 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 3056 << PointerArg->getType() << PointerArg->getSourceRange(); 3057 return true; 3058 } 3059 3060 switch (ValType.getObjCLifetime()) { 3061 case Qualifiers::OCL_None: 3062 case Qualifiers::OCL_ExplicitNone: 3063 // okay 3064 break; 3065 3066 case Qualifiers::OCL_Weak: 3067 case Qualifiers::OCL_Strong: 3068 case Qualifiers::OCL_Autoreleasing: 3069 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 3070 << ValType << PointerArg->getSourceRange(); 3071 return true; 3072 } 3073 3074 if (IsLdrex) { 3075 TheCall->setType(ValType); 3076 return false; 3077 } 3078 3079 // Initialize the argument to be stored. 3080 ExprResult ValArg = TheCall->getArg(0); 3081 InitializedEntity Entity = InitializedEntity::InitializeParameter( 3082 Context, ValType, /*consume*/ false); 3083 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 3084 if (ValArg.isInvalid()) 3085 return true; 3086 TheCall->setArg(0, ValArg.get()); 3087 3088 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 3089 // but the custom checker bypasses all default analysis. 3090 TheCall->setType(Context.IntTy); 3091 return false; 3092 } 3093 3094 bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3095 CallExpr *TheCall) { 3096 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 3097 BuiltinID == ARM::BI__builtin_arm_ldaex || 3098 BuiltinID == ARM::BI__builtin_arm_strex || 3099 BuiltinID == ARM::BI__builtin_arm_stlex) { 3100 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 3101 } 3102 3103 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 3104 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3105 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 3106 } 3107 3108 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 3109 BuiltinID == ARM::BI__builtin_arm_wsr64) 3110 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 3111 3112 if (BuiltinID == ARM::BI__builtin_arm_rsr || 3113 BuiltinID == ARM::BI__builtin_arm_rsrp || 3114 BuiltinID == ARM::BI__builtin_arm_wsr || 3115 BuiltinID == ARM::BI__builtin_arm_wsrp) 3116 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3117 3118 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3119 return true; 3120 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall)) 3121 return true; 3122 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3123 return true; 3124 3125 // For intrinsics which take an immediate value as part of the instruction, 3126 // range check them here. 3127 // FIXME: VFP Intrinsics should error if VFP not present. 3128 switch (BuiltinID) { 3129 default: return false; 3130 case ARM::BI__builtin_arm_ssat: 3131 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 3132 case ARM::BI__builtin_arm_usat: 3133 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 3134 case ARM::BI__builtin_arm_ssat16: 3135 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 3136 case ARM::BI__builtin_arm_usat16: 3137 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 3138 case ARM::BI__builtin_arm_vcvtr_f: 3139 case ARM::BI__builtin_arm_vcvtr_d: 3140 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3141 case ARM::BI__builtin_arm_dmb: 3142 case ARM::BI__builtin_arm_dsb: 3143 case ARM::BI__builtin_arm_isb: 3144 case ARM::BI__builtin_arm_dbg: 3145 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 3146 case ARM::BI__builtin_arm_cdp: 3147 case ARM::BI__builtin_arm_cdp2: 3148 case ARM::BI__builtin_arm_mcr: 3149 case ARM::BI__builtin_arm_mcr2: 3150 case ARM::BI__builtin_arm_mrc: 3151 case ARM::BI__builtin_arm_mrc2: 3152 case ARM::BI__builtin_arm_mcrr: 3153 case ARM::BI__builtin_arm_mcrr2: 3154 case ARM::BI__builtin_arm_mrrc: 3155 case ARM::BI__builtin_arm_mrrc2: 3156 case ARM::BI__builtin_arm_ldc: 3157 case ARM::BI__builtin_arm_ldcl: 3158 case ARM::BI__builtin_arm_ldc2: 3159 case ARM::BI__builtin_arm_ldc2l: 3160 case ARM::BI__builtin_arm_stc: 3161 case ARM::BI__builtin_arm_stcl: 3162 case ARM::BI__builtin_arm_stc2: 3163 case ARM::BI__builtin_arm_stc2l: 3164 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) || 3165 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), 3166 /*WantCDE*/ false); 3167 } 3168 } 3169 3170 bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, 3171 unsigned BuiltinID, 3172 CallExpr *TheCall) { 3173 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 3174 BuiltinID == AArch64::BI__builtin_arm_ldaex || 3175 BuiltinID == AArch64::BI__builtin_arm_strex || 3176 BuiltinID == AArch64::BI__builtin_arm_stlex) { 3177 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 3178 } 3179 3180 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 3181 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3182 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || 3183 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 3184 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 3185 } 3186 3187 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 3188 BuiltinID == AArch64::BI__builtin_arm_wsr64) 3189 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3190 3191 // Memory Tagging Extensions (MTE) Intrinsics 3192 if (BuiltinID == AArch64::BI__builtin_arm_irg || 3193 BuiltinID == AArch64::BI__builtin_arm_addg || 3194 BuiltinID == AArch64::BI__builtin_arm_gmi || 3195 BuiltinID == AArch64::BI__builtin_arm_ldg || 3196 BuiltinID == AArch64::BI__builtin_arm_stg || 3197 BuiltinID == AArch64::BI__builtin_arm_subp) { 3198 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 3199 } 3200 3201 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 3202 BuiltinID == AArch64::BI__builtin_arm_rsrp || 3203 BuiltinID == AArch64::BI__builtin_arm_wsr || 3204 BuiltinID == AArch64::BI__builtin_arm_wsrp) 3205 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3206 3207 // Only check the valid encoding range. Any constant in this range would be 3208 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 3209 // an exception for incorrect registers. This matches MSVC behavior. 3210 if (BuiltinID == AArch64::BI_ReadStatusReg || 3211 BuiltinID == AArch64::BI_WriteStatusReg) 3212 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 3213 3214 if (BuiltinID == AArch64::BI__getReg) 3215 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 3216 3217 if (BuiltinID == AArch64::BI__break) 3218 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xffff); 3219 3220 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3221 return true; 3222 3223 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall)) 3224 return true; 3225 3226 // For intrinsics which take an immediate value as part of the instruction, 3227 // range check them here. 3228 unsigned i = 0, l = 0, u = 0; 3229 switch (BuiltinID) { 3230 default: return false; 3231 case AArch64::BI__builtin_arm_dmb: 3232 case AArch64::BI__builtin_arm_dsb: 3233 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 3234 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break; 3235 } 3236 3237 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 3238 } 3239 3240 static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) { 3241 if (Arg->getType()->getAsPlaceholderType()) 3242 return false; 3243 3244 // The first argument needs to be a record field access. 3245 // If it is an array element access, we delay decision 3246 // to BPF backend to check whether the access is a 3247 // field access or not. 3248 return (Arg->IgnoreParens()->getObjectKind() == OK_BitField || 3249 isa<MemberExpr>(Arg->IgnoreParens()) || 3250 isa<ArraySubscriptExpr>(Arg->IgnoreParens())); 3251 } 3252 3253 static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) { 3254 QualType ArgType = Arg->getType(); 3255 if (ArgType->getAsPlaceholderType()) 3256 return false; 3257 3258 // for TYPE_EXISTENCE/TYPE_SIZEOF reloc type 3259 // format: 3260 // 1. __builtin_preserve_type_info(*(<type> *)0, flag); 3261 // 2. <type> var; 3262 // __builtin_preserve_type_info(var, flag); 3263 if (!isa<DeclRefExpr>(Arg->IgnoreParens()) && 3264 !isa<UnaryOperator>(Arg->IgnoreParens())) 3265 return false; 3266 3267 // Typedef type. 3268 if (ArgType->getAs<TypedefType>()) 3269 return true; 3270 3271 // Record type or Enum type. 3272 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 3273 if (const auto *RT = Ty->getAs<RecordType>()) { 3274 if (!RT->getDecl()->getDeclName().isEmpty()) 3275 return true; 3276 } else if (const auto *ET = Ty->getAs<EnumType>()) { 3277 if (!ET->getDecl()->getDeclName().isEmpty()) 3278 return true; 3279 } 3280 3281 return false; 3282 } 3283 3284 static bool isValidBPFPreserveEnumValueArg(Expr *Arg) { 3285 QualType ArgType = Arg->getType(); 3286 if (ArgType->getAsPlaceholderType()) 3287 return false; 3288 3289 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type 3290 // format: 3291 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>, 3292 // flag); 3293 const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens()); 3294 if (!UO) 3295 return false; 3296 3297 const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr()); 3298 if (!CE) 3299 return false; 3300 if (CE->getCastKind() != CK_IntegralToPointer && 3301 CE->getCastKind() != CK_NullToPointer) 3302 return false; 3303 3304 // The integer must be from an EnumConstantDecl. 3305 const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr()); 3306 if (!DR) 3307 return false; 3308 3309 const EnumConstantDecl *Enumerator = 3310 dyn_cast<EnumConstantDecl>(DR->getDecl()); 3311 if (!Enumerator) 3312 return false; 3313 3314 // The type must be EnumType. 3315 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 3316 const auto *ET = Ty->getAs<EnumType>(); 3317 if (!ET) 3318 return false; 3319 3320 // The enum value must be supported. 3321 return llvm::is_contained(ET->getDecl()->enumerators(), Enumerator); 3322 } 3323 3324 bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID, 3325 CallExpr *TheCall) { 3326 assert((BuiltinID == BPF::BI__builtin_preserve_field_info || 3327 BuiltinID == BPF::BI__builtin_btf_type_id || 3328 BuiltinID == BPF::BI__builtin_preserve_type_info || 3329 BuiltinID == BPF::BI__builtin_preserve_enum_value) && 3330 "unexpected BPF builtin"); 3331 3332 if (checkArgCount(*this, TheCall, 2)) 3333 return true; 3334 3335 // The second argument needs to be a constant int 3336 Expr *Arg = TheCall->getArg(1); 3337 Optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context); 3338 diag::kind kind; 3339 if (!Value) { 3340 if (BuiltinID == BPF::BI__builtin_preserve_field_info) 3341 kind = diag::err_preserve_field_info_not_const; 3342 else if (BuiltinID == BPF::BI__builtin_btf_type_id) 3343 kind = diag::err_btf_type_id_not_const; 3344 else if (BuiltinID == BPF::BI__builtin_preserve_type_info) 3345 kind = diag::err_preserve_type_info_not_const; 3346 else 3347 kind = diag::err_preserve_enum_value_not_const; 3348 Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange(); 3349 return true; 3350 } 3351 3352 // The first argument 3353 Arg = TheCall->getArg(0); 3354 bool InvalidArg = false; 3355 bool ReturnUnsignedInt = true; 3356 if (BuiltinID == BPF::BI__builtin_preserve_field_info) { 3357 if (!isValidBPFPreserveFieldInfoArg(Arg)) { 3358 InvalidArg = true; 3359 kind = diag::err_preserve_field_info_not_field; 3360 } 3361 } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) { 3362 if (!isValidBPFPreserveTypeInfoArg(Arg)) { 3363 InvalidArg = true; 3364 kind = diag::err_preserve_type_info_invalid; 3365 } 3366 } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) { 3367 if (!isValidBPFPreserveEnumValueArg(Arg)) { 3368 InvalidArg = true; 3369 kind = diag::err_preserve_enum_value_invalid; 3370 } 3371 ReturnUnsignedInt = false; 3372 } else if (BuiltinID == BPF::BI__builtin_btf_type_id) { 3373 ReturnUnsignedInt = false; 3374 } 3375 3376 if (InvalidArg) { 3377 Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange(); 3378 return true; 3379 } 3380 3381 if (ReturnUnsignedInt) 3382 TheCall->setType(Context.UnsignedIntTy); 3383 else 3384 TheCall->setType(Context.UnsignedLongTy); 3385 return false; 3386 } 3387 3388 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3389 struct ArgInfo { 3390 uint8_t OpNum; 3391 bool IsSigned; 3392 uint8_t BitWidth; 3393 uint8_t Align; 3394 }; 3395 struct BuiltinInfo { 3396 unsigned BuiltinID; 3397 ArgInfo Infos[2]; 3398 }; 3399 3400 static BuiltinInfo Infos[] = { 3401 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 3402 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 3403 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 3404 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} }, 3405 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 3406 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 3407 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 3408 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 3409 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 3410 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 3411 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 3412 3413 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 3414 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 3415 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 3416 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 3417 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 3418 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 3419 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 3420 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 3421 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 3422 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 3423 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 3424 3425 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 3426 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 3427 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 3428 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 3429 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 3430 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 3431 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 3432 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 3433 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 3434 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 3435 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 3436 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 3437 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 3438 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 3439 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 3440 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 3441 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 3442 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 3443 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 3444 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 3445 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 3446 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 3447 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 3448 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 3449 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 3450 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 3451 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 3452 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 3453 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 3454 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 3455 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 3456 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 3457 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 3458 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 3459 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 3460 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 3461 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 3462 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 3463 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 3464 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 3465 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 3466 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 3467 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 3468 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 3469 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 3470 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 3471 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 3472 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 3473 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 3474 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 3475 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 3476 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 3477 {{ 1, false, 6, 0 }} }, 3478 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 3479 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 3480 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 3481 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 3482 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 3483 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 3484 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 3485 {{ 1, false, 5, 0 }} }, 3486 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 3487 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 3488 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 3489 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 3490 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 3491 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 3492 { 2, false, 5, 0 }} }, 3493 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 3494 { 2, false, 6, 0 }} }, 3495 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 3496 { 3, false, 5, 0 }} }, 3497 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 3498 { 3, false, 6, 0 }} }, 3499 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 3500 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 3501 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 3502 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 3503 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 3504 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 3505 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 3506 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 3507 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 3508 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 3509 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 3510 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 3511 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 3512 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 3513 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 3514 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 3515 {{ 2, false, 4, 0 }, 3516 { 3, false, 5, 0 }} }, 3517 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 3518 {{ 2, false, 4, 0 }, 3519 { 3, false, 5, 0 }} }, 3520 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 3521 {{ 2, false, 4, 0 }, 3522 { 3, false, 5, 0 }} }, 3523 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 3524 {{ 2, false, 4, 0 }, 3525 { 3, false, 5, 0 }} }, 3526 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 3527 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 3528 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 3529 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 3530 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 3531 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 3532 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 3533 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 3534 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 3535 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 3536 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 3537 { 2, false, 5, 0 }} }, 3538 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 3539 { 2, false, 6, 0 }} }, 3540 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 3541 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 3542 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 3543 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 3544 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 3545 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 3546 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 3547 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 3548 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 3549 {{ 1, false, 4, 0 }} }, 3550 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 3551 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 3552 {{ 1, false, 4, 0 }} }, 3553 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 3554 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 3555 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 3556 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 3557 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 3558 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 3559 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 3560 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 3561 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 3562 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 3563 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 3564 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 3565 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 3566 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 3567 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 3568 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 3569 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 3570 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 3571 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 3572 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 3573 {{ 3, false, 1, 0 }} }, 3574 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 3575 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 3576 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 3577 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 3578 {{ 3, false, 1, 0 }} }, 3579 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 3580 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 3581 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 3582 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 3583 {{ 3, false, 1, 0 }} }, 3584 }; 3585 3586 // Use a dynamically initialized static to sort the table exactly once on 3587 // first run. 3588 static const bool SortOnce = 3589 (llvm::sort(Infos, 3590 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 3591 return LHS.BuiltinID < RHS.BuiltinID; 3592 }), 3593 true); 3594 (void)SortOnce; 3595 3596 const BuiltinInfo *F = llvm::partition_point( 3597 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; }); 3598 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 3599 return false; 3600 3601 bool Error = false; 3602 3603 for (const ArgInfo &A : F->Infos) { 3604 // Ignore empty ArgInfo elements. 3605 if (A.BitWidth == 0) 3606 continue; 3607 3608 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 3609 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 3610 if (!A.Align) { 3611 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3612 } else { 3613 unsigned M = 1 << A.Align; 3614 Min *= M; 3615 Max *= M; 3616 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3617 Error |= SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 3618 } 3619 } 3620 return Error; 3621 } 3622 3623 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 3624 CallExpr *TheCall) { 3625 return CheckHexagonBuiltinArgument(BuiltinID, TheCall); 3626 } 3627 3628 bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI, 3629 unsigned BuiltinID, CallExpr *TheCall) { 3630 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) || 3631 CheckMipsBuiltinArgument(BuiltinID, TheCall); 3632 } 3633 3634 bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, 3635 CallExpr *TheCall) { 3636 3637 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID && 3638 BuiltinID <= Mips::BI__builtin_mips_lwx) { 3639 if (!TI.hasFeature("dsp")) 3640 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp); 3641 } 3642 3643 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID && 3644 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) { 3645 if (!TI.hasFeature("dspr2")) 3646 return Diag(TheCall->getBeginLoc(), 3647 diag::err_mips_builtin_requires_dspr2); 3648 } 3649 3650 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID && 3651 BuiltinID <= Mips::BI__builtin_msa_xori_b) { 3652 if (!TI.hasFeature("msa")) 3653 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa); 3654 } 3655 3656 return false; 3657 } 3658 3659 // CheckMipsBuiltinArgument - Checks the constant value passed to the 3660 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The 3661 // ordering for DSP is unspecified. MSA is ordered by the data format used 3662 // by the underlying instruction i.e., df/m, df/n and then by size. 3663 // 3664 // FIXME: The size tests here should instead be tablegen'd along with the 3665 // definitions from include/clang/Basic/BuiltinsMips.def. 3666 // FIXME: GCC is strict on signedness for some of these intrinsics, we should 3667 // be too. 3668 bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3669 unsigned i = 0, l = 0, u = 0, m = 0; 3670 switch (BuiltinID) { 3671 default: return false; 3672 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 3673 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 3674 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 3675 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 3676 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 3677 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 3678 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 3679 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the 3680 // df/m field. 3681 // These intrinsics take an unsigned 3 bit immediate. 3682 case Mips::BI__builtin_msa_bclri_b: 3683 case Mips::BI__builtin_msa_bnegi_b: 3684 case Mips::BI__builtin_msa_bseti_b: 3685 case Mips::BI__builtin_msa_sat_s_b: 3686 case Mips::BI__builtin_msa_sat_u_b: 3687 case Mips::BI__builtin_msa_slli_b: 3688 case Mips::BI__builtin_msa_srai_b: 3689 case Mips::BI__builtin_msa_srari_b: 3690 case Mips::BI__builtin_msa_srli_b: 3691 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 3692 case Mips::BI__builtin_msa_binsli_b: 3693 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 3694 // These intrinsics take an unsigned 4 bit immediate. 3695 case Mips::BI__builtin_msa_bclri_h: 3696 case Mips::BI__builtin_msa_bnegi_h: 3697 case Mips::BI__builtin_msa_bseti_h: 3698 case Mips::BI__builtin_msa_sat_s_h: 3699 case Mips::BI__builtin_msa_sat_u_h: 3700 case Mips::BI__builtin_msa_slli_h: 3701 case Mips::BI__builtin_msa_srai_h: 3702 case Mips::BI__builtin_msa_srari_h: 3703 case Mips::BI__builtin_msa_srli_h: 3704 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 3705 case Mips::BI__builtin_msa_binsli_h: 3706 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 3707 // These intrinsics take an unsigned 5 bit immediate. 3708 // The first block of intrinsics actually have an unsigned 5 bit field, 3709 // not a df/n field. 3710 case Mips::BI__builtin_msa_cfcmsa: 3711 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; 3712 case Mips::BI__builtin_msa_clei_u_b: 3713 case Mips::BI__builtin_msa_clei_u_h: 3714 case Mips::BI__builtin_msa_clei_u_w: 3715 case Mips::BI__builtin_msa_clei_u_d: 3716 case Mips::BI__builtin_msa_clti_u_b: 3717 case Mips::BI__builtin_msa_clti_u_h: 3718 case Mips::BI__builtin_msa_clti_u_w: 3719 case Mips::BI__builtin_msa_clti_u_d: 3720 case Mips::BI__builtin_msa_maxi_u_b: 3721 case Mips::BI__builtin_msa_maxi_u_h: 3722 case Mips::BI__builtin_msa_maxi_u_w: 3723 case Mips::BI__builtin_msa_maxi_u_d: 3724 case Mips::BI__builtin_msa_mini_u_b: 3725 case Mips::BI__builtin_msa_mini_u_h: 3726 case Mips::BI__builtin_msa_mini_u_w: 3727 case Mips::BI__builtin_msa_mini_u_d: 3728 case Mips::BI__builtin_msa_addvi_b: 3729 case Mips::BI__builtin_msa_addvi_h: 3730 case Mips::BI__builtin_msa_addvi_w: 3731 case Mips::BI__builtin_msa_addvi_d: 3732 case Mips::BI__builtin_msa_bclri_w: 3733 case Mips::BI__builtin_msa_bnegi_w: 3734 case Mips::BI__builtin_msa_bseti_w: 3735 case Mips::BI__builtin_msa_sat_s_w: 3736 case Mips::BI__builtin_msa_sat_u_w: 3737 case Mips::BI__builtin_msa_slli_w: 3738 case Mips::BI__builtin_msa_srai_w: 3739 case Mips::BI__builtin_msa_srari_w: 3740 case Mips::BI__builtin_msa_srli_w: 3741 case Mips::BI__builtin_msa_srlri_w: 3742 case Mips::BI__builtin_msa_subvi_b: 3743 case Mips::BI__builtin_msa_subvi_h: 3744 case Mips::BI__builtin_msa_subvi_w: 3745 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 3746 case Mips::BI__builtin_msa_binsli_w: 3747 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 3748 // These intrinsics take an unsigned 6 bit immediate. 3749 case Mips::BI__builtin_msa_bclri_d: 3750 case Mips::BI__builtin_msa_bnegi_d: 3751 case Mips::BI__builtin_msa_bseti_d: 3752 case Mips::BI__builtin_msa_sat_s_d: 3753 case Mips::BI__builtin_msa_sat_u_d: 3754 case Mips::BI__builtin_msa_slli_d: 3755 case Mips::BI__builtin_msa_srai_d: 3756 case Mips::BI__builtin_msa_srari_d: 3757 case Mips::BI__builtin_msa_srli_d: 3758 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 3759 case Mips::BI__builtin_msa_binsli_d: 3760 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 3761 // These intrinsics take a signed 5 bit immediate. 3762 case Mips::BI__builtin_msa_ceqi_b: 3763 case Mips::BI__builtin_msa_ceqi_h: 3764 case Mips::BI__builtin_msa_ceqi_w: 3765 case Mips::BI__builtin_msa_ceqi_d: 3766 case Mips::BI__builtin_msa_clti_s_b: 3767 case Mips::BI__builtin_msa_clti_s_h: 3768 case Mips::BI__builtin_msa_clti_s_w: 3769 case Mips::BI__builtin_msa_clti_s_d: 3770 case Mips::BI__builtin_msa_clei_s_b: 3771 case Mips::BI__builtin_msa_clei_s_h: 3772 case Mips::BI__builtin_msa_clei_s_w: 3773 case Mips::BI__builtin_msa_clei_s_d: 3774 case Mips::BI__builtin_msa_maxi_s_b: 3775 case Mips::BI__builtin_msa_maxi_s_h: 3776 case Mips::BI__builtin_msa_maxi_s_w: 3777 case Mips::BI__builtin_msa_maxi_s_d: 3778 case Mips::BI__builtin_msa_mini_s_b: 3779 case Mips::BI__builtin_msa_mini_s_h: 3780 case Mips::BI__builtin_msa_mini_s_w: 3781 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 3782 // These intrinsics take an unsigned 8 bit immediate. 3783 case Mips::BI__builtin_msa_andi_b: 3784 case Mips::BI__builtin_msa_nori_b: 3785 case Mips::BI__builtin_msa_ori_b: 3786 case Mips::BI__builtin_msa_shf_b: 3787 case Mips::BI__builtin_msa_shf_h: 3788 case Mips::BI__builtin_msa_shf_w: 3789 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 3790 case Mips::BI__builtin_msa_bseli_b: 3791 case Mips::BI__builtin_msa_bmnzi_b: 3792 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 3793 // df/n format 3794 // These intrinsics take an unsigned 4 bit immediate. 3795 case Mips::BI__builtin_msa_copy_s_b: 3796 case Mips::BI__builtin_msa_copy_u_b: 3797 case Mips::BI__builtin_msa_insve_b: 3798 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 3799 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 3800 // These intrinsics take an unsigned 3 bit immediate. 3801 case Mips::BI__builtin_msa_copy_s_h: 3802 case Mips::BI__builtin_msa_copy_u_h: 3803 case Mips::BI__builtin_msa_insve_h: 3804 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 3805 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 3806 // These intrinsics take an unsigned 2 bit immediate. 3807 case Mips::BI__builtin_msa_copy_s_w: 3808 case Mips::BI__builtin_msa_copy_u_w: 3809 case Mips::BI__builtin_msa_insve_w: 3810 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 3811 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 3812 // These intrinsics take an unsigned 1 bit immediate. 3813 case Mips::BI__builtin_msa_copy_s_d: 3814 case Mips::BI__builtin_msa_copy_u_d: 3815 case Mips::BI__builtin_msa_insve_d: 3816 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 3817 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 3818 // Memory offsets and immediate loads. 3819 // These intrinsics take a signed 10 bit immediate. 3820 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 3821 case Mips::BI__builtin_msa_ldi_h: 3822 case Mips::BI__builtin_msa_ldi_w: 3823 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 3824 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 3825 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 3826 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 3827 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 3828 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break; 3829 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break; 3830 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 3831 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 3832 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 3833 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 3834 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break; 3835 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break; 3836 } 3837 3838 if (!m) 3839 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3840 3841 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 3842 SemaBuiltinConstantArgMultiple(TheCall, i, m); 3843 } 3844 3845 /// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str, 3846 /// advancing the pointer over the consumed characters. The decoded type is 3847 /// returned. If the decoded type represents a constant integer with a 3848 /// constraint on its value then Mask is set to that value. The type descriptors 3849 /// used in Str are specific to PPC MMA builtins and are documented in the file 3850 /// defining the PPC builtins. 3851 static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str, 3852 unsigned &Mask) { 3853 bool RequireICE = false; 3854 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 3855 switch (*Str++) { 3856 case 'V': 3857 return Context.getVectorType(Context.UnsignedCharTy, 16, 3858 VectorType::VectorKind::AltiVecVector); 3859 case 'i': { 3860 char *End; 3861 unsigned size = strtoul(Str, &End, 10); 3862 assert(End != Str && "Missing constant parameter constraint"); 3863 Str = End; 3864 Mask = size; 3865 return Context.IntTy; 3866 } 3867 case 'W': { 3868 char *End; 3869 unsigned size = strtoul(Str, &End, 10); 3870 assert(End != Str && "Missing PowerPC MMA type size"); 3871 Str = End; 3872 QualType Type; 3873 switch (size) { 3874 #define PPC_VECTOR_TYPE(typeName, Id, size) \ 3875 case size: Type = Context.Id##Ty; break; 3876 #include "clang/Basic/PPCTypes.def" 3877 default: llvm_unreachable("Invalid PowerPC MMA vector type"); 3878 } 3879 bool CheckVectorArgs = false; 3880 while (!CheckVectorArgs) { 3881 switch (*Str++) { 3882 case '*': 3883 Type = Context.getPointerType(Type); 3884 break; 3885 case 'C': 3886 Type = Type.withConst(); 3887 break; 3888 default: 3889 CheckVectorArgs = true; 3890 --Str; 3891 break; 3892 } 3893 } 3894 return Type; 3895 } 3896 default: 3897 return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true); 3898 } 3899 } 3900 3901 static bool isPPC_64Builtin(unsigned BuiltinID) { 3902 // These builtins only work on PPC 64bit targets. 3903 switch (BuiltinID) { 3904 case PPC::BI__builtin_divde: 3905 case PPC::BI__builtin_divdeu: 3906 case PPC::BI__builtin_bpermd: 3907 case PPC::BI__builtin_pdepd: 3908 case PPC::BI__builtin_pextd: 3909 case PPC::BI__builtin_ppc_ldarx: 3910 case PPC::BI__builtin_ppc_stdcx: 3911 case PPC::BI__builtin_ppc_tdw: 3912 case PPC::BI__builtin_ppc_trapd: 3913 case PPC::BI__builtin_ppc_cmpeqb: 3914 case PPC::BI__builtin_ppc_setb: 3915 case PPC::BI__builtin_ppc_mulhd: 3916 case PPC::BI__builtin_ppc_mulhdu: 3917 case PPC::BI__builtin_ppc_maddhd: 3918 case PPC::BI__builtin_ppc_maddhdu: 3919 case PPC::BI__builtin_ppc_maddld: 3920 case PPC::BI__builtin_ppc_load8r: 3921 case PPC::BI__builtin_ppc_store8r: 3922 case PPC::BI__builtin_ppc_insert_exp: 3923 case PPC::BI__builtin_ppc_extract_sig: 3924 case PPC::BI__builtin_ppc_addex: 3925 case PPC::BI__builtin_darn: 3926 case PPC::BI__builtin_darn_raw: 3927 case PPC::BI__builtin_ppc_compare_and_swaplp: 3928 case PPC::BI__builtin_ppc_fetch_and_addlp: 3929 case PPC::BI__builtin_ppc_fetch_and_andlp: 3930 case PPC::BI__builtin_ppc_fetch_and_orlp: 3931 case PPC::BI__builtin_ppc_fetch_and_swaplp: 3932 return true; 3933 } 3934 return false; 3935 } 3936 3937 static bool SemaFeatureCheck(Sema &S, CallExpr *TheCall, 3938 StringRef FeatureToCheck, unsigned DiagID, 3939 StringRef DiagArg = "") { 3940 if (S.Context.getTargetInfo().hasFeature(FeatureToCheck)) 3941 return false; 3942 3943 if (DiagArg.empty()) 3944 S.Diag(TheCall->getBeginLoc(), DiagID) << TheCall->getSourceRange(); 3945 else 3946 S.Diag(TheCall->getBeginLoc(), DiagID) 3947 << DiagArg << TheCall->getSourceRange(); 3948 3949 return true; 3950 } 3951 3952 /// Returns true if the argument consists of one contiguous run of 1s with any 3953 /// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so 3954 /// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not, 3955 /// since all 1s are not contiguous. 3956 bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) { 3957 llvm::APSInt Result; 3958 // We can't check the value of a dependent argument. 3959 Expr *Arg = TheCall->getArg(ArgNum); 3960 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3961 return false; 3962 3963 // Check constant-ness first. 3964 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3965 return true; 3966 3967 // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s. 3968 if (Result.isShiftedMask() || (~Result).isShiftedMask()) 3969 return false; 3970 3971 return Diag(TheCall->getBeginLoc(), 3972 diag::err_argument_not_contiguous_bit_field) 3973 << ArgNum << Arg->getSourceRange(); 3974 } 3975 3976 bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3977 CallExpr *TheCall) { 3978 unsigned i = 0, l = 0, u = 0; 3979 bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64; 3980 llvm::APSInt Result; 3981 3982 if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit) 3983 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 3984 << TheCall->getSourceRange(); 3985 3986 switch (BuiltinID) { 3987 default: return false; 3988 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 3989 case PPC::BI__builtin_altivec_crypto_vshasigmad: 3990 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3991 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3992 case PPC::BI__builtin_altivec_dss: 3993 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3); 3994 case PPC::BI__builtin_tbegin: 3995 case PPC::BI__builtin_tend: 3996 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1) || 3997 SemaFeatureCheck(*this, TheCall, "htm", 3998 diag::err_ppc_builtin_requires_htm); 3999 case PPC::BI__builtin_tsr: 4000 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 4001 SemaFeatureCheck(*this, TheCall, "htm", 4002 diag::err_ppc_builtin_requires_htm); 4003 case PPC::BI__builtin_tabortwc: 4004 case PPC::BI__builtin_tabortdc: 4005 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 4006 SemaFeatureCheck(*this, TheCall, "htm", 4007 diag::err_ppc_builtin_requires_htm); 4008 case PPC::BI__builtin_tabortwci: 4009 case PPC::BI__builtin_tabortdci: 4010 return SemaFeatureCheck(*this, TheCall, "htm", 4011 diag::err_ppc_builtin_requires_htm) || 4012 (SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 4013 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31)); 4014 case PPC::BI__builtin_tabort: 4015 case PPC::BI__builtin_tcheck: 4016 case PPC::BI__builtin_treclaim: 4017 case PPC::BI__builtin_trechkpt: 4018 case PPC::BI__builtin_tendall: 4019 case PPC::BI__builtin_tresume: 4020 case PPC::BI__builtin_tsuspend: 4021 case PPC::BI__builtin_get_texasr: 4022 case PPC::BI__builtin_get_texasru: 4023 case PPC::BI__builtin_get_tfhar: 4024 case PPC::BI__builtin_get_tfiar: 4025 case PPC::BI__builtin_set_texasr: 4026 case PPC::BI__builtin_set_texasru: 4027 case PPC::BI__builtin_set_tfhar: 4028 case PPC::BI__builtin_set_tfiar: 4029 case PPC::BI__builtin_ttest: 4030 return SemaFeatureCheck(*this, TheCall, "htm", 4031 diag::err_ppc_builtin_requires_htm); 4032 // According to GCC 'Basic PowerPC Built-in Functions Available on ISA 2.05', 4033 // __builtin_(un)pack_longdouble are available only if long double uses IBM 4034 // extended double representation. 4035 case PPC::BI__builtin_unpack_longdouble: 4036 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 1)) 4037 return true; 4038 LLVM_FALLTHROUGH; 4039 case PPC::BI__builtin_pack_longdouble: 4040 if (&TI.getLongDoubleFormat() != &llvm::APFloat::PPCDoubleDouble()) 4041 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_requires_abi) 4042 << "ibmlongdouble"; 4043 return false; 4044 case PPC::BI__builtin_altivec_dst: 4045 case PPC::BI__builtin_altivec_dstt: 4046 case PPC::BI__builtin_altivec_dstst: 4047 case PPC::BI__builtin_altivec_dststt: 4048 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 4049 case PPC::BI__builtin_vsx_xxpermdi: 4050 case PPC::BI__builtin_vsx_xxsldwi: 4051 return SemaBuiltinVSX(TheCall); 4052 case PPC::BI__builtin_divwe: 4053 case PPC::BI__builtin_divweu: 4054 case PPC::BI__builtin_divde: 4055 case PPC::BI__builtin_divdeu: 4056 return SemaFeatureCheck(*this, TheCall, "extdiv", 4057 diag::err_ppc_builtin_only_on_arch, "7"); 4058 case PPC::BI__builtin_bpermd: 4059 return SemaFeatureCheck(*this, TheCall, "bpermd", 4060 diag::err_ppc_builtin_only_on_arch, "7"); 4061 case PPC::BI__builtin_unpack_vector_int128: 4062 return SemaFeatureCheck(*this, TheCall, "vsx", 4063 diag::err_ppc_builtin_only_on_arch, "7") || 4064 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 4065 case PPC::BI__builtin_pack_vector_int128: 4066 return SemaFeatureCheck(*this, TheCall, "vsx", 4067 diag::err_ppc_builtin_only_on_arch, "7"); 4068 case PPC::BI__builtin_pdepd: 4069 case PPC::BI__builtin_pextd: 4070 return SemaFeatureCheck(*this, TheCall, "isa-v31-instructions", 4071 diag::err_ppc_builtin_only_on_arch, "10"); 4072 case PPC::BI__builtin_altivec_vgnb: 4073 return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7); 4074 case PPC::BI__builtin_vsx_xxeval: 4075 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255); 4076 case PPC::BI__builtin_altivec_vsldbi: 4077 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 4078 case PPC::BI__builtin_altivec_vsrdbi: 4079 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 4080 case PPC::BI__builtin_vsx_xxpermx: 4081 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7); 4082 case PPC::BI__builtin_ppc_tw: 4083 case PPC::BI__builtin_ppc_tdw: 4084 return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31); 4085 case PPC::BI__builtin_ppc_cmpeqb: 4086 case PPC::BI__builtin_ppc_setb: 4087 case PPC::BI__builtin_ppc_maddhd: 4088 case PPC::BI__builtin_ppc_maddhdu: 4089 case PPC::BI__builtin_ppc_maddld: 4090 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4091 diag::err_ppc_builtin_only_on_arch, "9"); 4092 case PPC::BI__builtin_ppc_cmprb: 4093 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4094 diag::err_ppc_builtin_only_on_arch, "9") || 4095 SemaBuiltinConstantArgRange(TheCall, 0, 0, 1); 4096 // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must 4097 // be a constant that represents a contiguous bit field. 4098 case PPC::BI__builtin_ppc_rlwnm: 4099 return SemaValueIsRunOfOnes(TheCall, 2); 4100 case PPC::BI__builtin_ppc_rlwimi: 4101 case PPC::BI__builtin_ppc_rldimi: 4102 return SemaBuiltinConstantArg(TheCall, 2, Result) || 4103 SemaValueIsRunOfOnes(TheCall, 3); 4104 case PPC::BI__builtin_ppc_extract_exp: 4105 case PPC::BI__builtin_ppc_extract_sig: 4106 case PPC::BI__builtin_ppc_insert_exp: 4107 return SemaFeatureCheck(*this, TheCall, "power9-vector", 4108 diag::err_ppc_builtin_only_on_arch, "9"); 4109 case PPC::BI__builtin_ppc_addex: { 4110 if (SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4111 diag::err_ppc_builtin_only_on_arch, "9") || 4112 SemaBuiltinConstantArgRange(TheCall, 2, 0, 3)) 4113 return true; 4114 // Output warning for reserved values 1 to 3. 4115 int ArgValue = 4116 TheCall->getArg(2)->getIntegerConstantExpr(Context)->getSExtValue(); 4117 if (ArgValue != 0) 4118 Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour) 4119 << ArgValue; 4120 return false; 4121 } 4122 case PPC::BI__builtin_ppc_mtfsb0: 4123 case PPC::BI__builtin_ppc_mtfsb1: 4124 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 4125 case PPC::BI__builtin_ppc_mtfsf: 4126 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 255); 4127 case PPC::BI__builtin_ppc_mtfsfi: 4128 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 4129 SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 4130 case PPC::BI__builtin_ppc_alignx: 4131 return SemaBuiltinConstantArgPower2(TheCall, 0); 4132 case PPC::BI__builtin_ppc_rdlam: 4133 return SemaValueIsRunOfOnes(TheCall, 2); 4134 case PPC::BI__builtin_ppc_icbt: 4135 case PPC::BI__builtin_ppc_sthcx: 4136 case PPC::BI__builtin_ppc_stbcx: 4137 case PPC::BI__builtin_ppc_lharx: 4138 case PPC::BI__builtin_ppc_lbarx: 4139 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 4140 diag::err_ppc_builtin_only_on_arch, "8"); 4141 case PPC::BI__builtin_vsx_ldrmb: 4142 case PPC::BI__builtin_vsx_strmb: 4143 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 4144 diag::err_ppc_builtin_only_on_arch, "8") || 4145 SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 4146 case PPC::BI__builtin_altivec_vcntmbb: 4147 case PPC::BI__builtin_altivec_vcntmbh: 4148 case PPC::BI__builtin_altivec_vcntmbw: 4149 case PPC::BI__builtin_altivec_vcntmbd: 4150 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 4151 case PPC::BI__builtin_darn: 4152 case PPC::BI__builtin_darn_raw: 4153 case PPC::BI__builtin_darn_32: 4154 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4155 diag::err_ppc_builtin_only_on_arch, "9"); 4156 case PPC::BI__builtin_vsx_xxgenpcvbm: 4157 case PPC::BI__builtin_vsx_xxgenpcvhm: 4158 case PPC::BI__builtin_vsx_xxgenpcvwm: 4159 case PPC::BI__builtin_vsx_xxgenpcvdm: 4160 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3); 4161 case PPC::BI__builtin_ppc_compare_exp_uo: 4162 case PPC::BI__builtin_ppc_compare_exp_lt: 4163 case PPC::BI__builtin_ppc_compare_exp_gt: 4164 case PPC::BI__builtin_ppc_compare_exp_eq: 4165 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4166 diag::err_ppc_builtin_only_on_arch, "9") || 4167 SemaFeatureCheck(*this, TheCall, "vsx", 4168 diag::err_ppc_builtin_requires_vsx); 4169 case PPC::BI__builtin_ppc_test_data_class: { 4170 // Check if the first argument of the __builtin_ppc_test_data_class call is 4171 // valid. The argument must be either a 'float' or a 'double'. 4172 QualType ArgType = TheCall->getArg(0)->getType(); 4173 if (ArgType != QualType(Context.FloatTy) && 4174 ArgType != QualType(Context.DoubleTy)) 4175 return Diag(TheCall->getBeginLoc(), 4176 diag::err_ppc_invalid_test_data_class_type); 4177 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4178 diag::err_ppc_builtin_only_on_arch, "9") || 4179 SemaFeatureCheck(*this, TheCall, "vsx", 4180 diag::err_ppc_builtin_requires_vsx) || 4181 SemaBuiltinConstantArgRange(TheCall, 1, 0, 127); 4182 } 4183 case PPC::BI__builtin_ppc_maxfe: 4184 case PPC::BI__builtin_ppc_minfe: 4185 case PPC::BI__builtin_ppc_maxfl: 4186 case PPC::BI__builtin_ppc_minfl: 4187 case PPC::BI__builtin_ppc_maxfs: 4188 case PPC::BI__builtin_ppc_minfs: { 4189 if (Context.getTargetInfo().getTriple().isOSAIX() && 4190 (BuiltinID == PPC::BI__builtin_ppc_maxfe || 4191 BuiltinID == PPC::BI__builtin_ppc_minfe)) 4192 return Diag(TheCall->getBeginLoc(), diag::err_target_unsupported_type) 4193 << "builtin" << true << 128 << QualType(Context.LongDoubleTy) 4194 << false << Context.getTargetInfo().getTriple().str(); 4195 // Argument type should be exact. 4196 QualType ArgType = QualType(Context.LongDoubleTy); 4197 if (BuiltinID == PPC::BI__builtin_ppc_maxfl || 4198 BuiltinID == PPC::BI__builtin_ppc_minfl) 4199 ArgType = QualType(Context.DoubleTy); 4200 else if (BuiltinID == PPC::BI__builtin_ppc_maxfs || 4201 BuiltinID == PPC::BI__builtin_ppc_minfs) 4202 ArgType = QualType(Context.FloatTy); 4203 for (unsigned I = 0, E = TheCall->getNumArgs(); I < E; ++I) 4204 if (TheCall->getArg(I)->getType() != ArgType) 4205 return Diag(TheCall->getBeginLoc(), 4206 diag::err_typecheck_convert_incompatible) 4207 << TheCall->getArg(I)->getType() << ArgType << 1 << 0 << 0; 4208 return false; 4209 } 4210 case PPC::BI__builtin_ppc_load8r: 4211 case PPC::BI__builtin_ppc_store8r: 4212 return SemaFeatureCheck(*this, TheCall, "isa-v206-instructions", 4213 diag::err_ppc_builtin_only_on_arch, "7"); 4214 #define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \ 4215 case PPC::BI__builtin_##Name: \ 4216 return SemaBuiltinPPCMMACall(TheCall, BuiltinID, Types); 4217 #include "clang/Basic/BuiltinsPPC.def" 4218 } 4219 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 4220 } 4221 4222 // Check if the given type is a non-pointer PPC MMA type. This function is used 4223 // in Sema to prevent invalid uses of restricted PPC MMA types. 4224 bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) { 4225 if (Type->isPointerType() || Type->isArrayType()) 4226 return false; 4227 4228 QualType CoreType = Type.getCanonicalType().getUnqualifiedType(); 4229 #define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty 4230 if (false 4231 #include "clang/Basic/PPCTypes.def" 4232 ) { 4233 Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type); 4234 return true; 4235 } 4236 return false; 4237 } 4238 4239 bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, 4240 CallExpr *TheCall) { 4241 // position of memory order and scope arguments in the builtin 4242 unsigned OrderIndex, ScopeIndex; 4243 switch (BuiltinID) { 4244 case AMDGPU::BI__builtin_amdgcn_atomic_inc32: 4245 case AMDGPU::BI__builtin_amdgcn_atomic_inc64: 4246 case AMDGPU::BI__builtin_amdgcn_atomic_dec32: 4247 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: 4248 OrderIndex = 2; 4249 ScopeIndex = 3; 4250 break; 4251 case AMDGPU::BI__builtin_amdgcn_fence: 4252 OrderIndex = 0; 4253 ScopeIndex = 1; 4254 break; 4255 default: 4256 return false; 4257 } 4258 4259 ExprResult Arg = TheCall->getArg(OrderIndex); 4260 auto ArgExpr = Arg.get(); 4261 Expr::EvalResult ArgResult; 4262 4263 if (!ArgExpr->EvaluateAsInt(ArgResult, Context)) 4264 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int) 4265 << ArgExpr->getType(); 4266 auto Ord = ArgResult.Val.getInt().getZExtValue(); 4267 4268 // Check validity of memory ordering as per C11 / C++11's memody model. 4269 // Only fence needs check. Atomic dec/inc allow all memory orders. 4270 if (!llvm::isValidAtomicOrderingCABI(Ord)) 4271 return Diag(ArgExpr->getBeginLoc(), 4272 diag::warn_atomic_op_has_invalid_memory_order) 4273 << ArgExpr->getSourceRange(); 4274 switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) { 4275 case llvm::AtomicOrderingCABI::relaxed: 4276 case llvm::AtomicOrderingCABI::consume: 4277 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence) 4278 return Diag(ArgExpr->getBeginLoc(), 4279 diag::warn_atomic_op_has_invalid_memory_order) 4280 << ArgExpr->getSourceRange(); 4281 break; 4282 case llvm::AtomicOrderingCABI::acquire: 4283 case llvm::AtomicOrderingCABI::release: 4284 case llvm::AtomicOrderingCABI::acq_rel: 4285 case llvm::AtomicOrderingCABI::seq_cst: 4286 break; 4287 } 4288 4289 Arg = TheCall->getArg(ScopeIndex); 4290 ArgExpr = Arg.get(); 4291 Expr::EvalResult ArgResult1; 4292 // Check that sync scope is a constant literal 4293 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context)) 4294 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal) 4295 << ArgExpr->getType(); 4296 4297 return false; 4298 } 4299 4300 bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) { 4301 llvm::APSInt Result; 4302 4303 // We can't check the value of a dependent argument. 4304 Expr *Arg = TheCall->getArg(ArgNum); 4305 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4306 return false; 4307 4308 // Check constant-ness first. 4309 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4310 return true; 4311 4312 int64_t Val = Result.getSExtValue(); 4313 if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7)) 4314 return false; 4315 4316 return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul) 4317 << Arg->getSourceRange(); 4318 } 4319 4320 static bool isRISCV32Builtin(unsigned BuiltinID) { 4321 // These builtins only work on riscv32 targets. 4322 switch (BuiltinID) { 4323 case RISCV::BI__builtin_riscv_zip_32: 4324 case RISCV::BI__builtin_riscv_unzip_32: 4325 case RISCV::BI__builtin_riscv_aes32dsi_32: 4326 case RISCV::BI__builtin_riscv_aes32dsmi_32: 4327 case RISCV::BI__builtin_riscv_aes32esi_32: 4328 case RISCV::BI__builtin_riscv_aes32esmi_32: 4329 case RISCV::BI__builtin_riscv_sha512sig0h_32: 4330 case RISCV::BI__builtin_riscv_sha512sig0l_32: 4331 case RISCV::BI__builtin_riscv_sha512sig1h_32: 4332 case RISCV::BI__builtin_riscv_sha512sig1l_32: 4333 case RISCV::BI__builtin_riscv_sha512sum0r_32: 4334 case RISCV::BI__builtin_riscv_sha512sum1r_32: 4335 return true; 4336 } 4337 4338 return false; 4339 } 4340 4341 bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, 4342 unsigned BuiltinID, 4343 CallExpr *TheCall) { 4344 // CodeGenFunction can also detect this, but this gives a better error 4345 // message. 4346 bool FeatureMissing = false; 4347 SmallVector<StringRef> ReqFeatures; 4348 StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID); 4349 Features.split(ReqFeatures, ','); 4350 4351 // Check for 32-bit only builtins on a 64-bit target. 4352 const llvm::Triple &TT = TI.getTriple(); 4353 if (TT.getArch() != llvm::Triple::riscv32 && isRISCV32Builtin(BuiltinID)) 4354 return Diag(TheCall->getCallee()->getBeginLoc(), 4355 diag::err_32_bit_builtin_64_bit_tgt); 4356 4357 // Check if each required feature is included 4358 for (StringRef F : ReqFeatures) { 4359 SmallVector<StringRef> ReqOpFeatures; 4360 F.split(ReqOpFeatures, '|'); 4361 bool HasFeature = false; 4362 for (StringRef OF : ReqOpFeatures) { 4363 if (TI.hasFeature(OF)) { 4364 HasFeature = true; 4365 continue; 4366 } 4367 } 4368 4369 if (!HasFeature) { 4370 std::string FeatureStrs; 4371 for (StringRef OF : ReqOpFeatures) { 4372 // If the feature is 64bit, alter the string so it will print better in 4373 // the diagnostic. 4374 if (OF == "64bit") 4375 OF = "RV64"; 4376 4377 // Convert features like "zbr" and "experimental-zbr" to "Zbr". 4378 OF.consume_front("experimental-"); 4379 std::string FeatureStr = OF.str(); 4380 FeatureStr[0] = std::toupper(FeatureStr[0]); 4381 // Combine strings. 4382 FeatureStrs += FeatureStrs == "" ? "" : ", "; 4383 FeatureStrs += "'"; 4384 FeatureStrs += FeatureStr; 4385 FeatureStrs += "'"; 4386 } 4387 // Error message 4388 FeatureMissing = true; 4389 Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension) 4390 << TheCall->getSourceRange() << StringRef(FeatureStrs); 4391 } 4392 } 4393 4394 if (FeatureMissing) 4395 return true; 4396 4397 switch (BuiltinID) { 4398 case RISCVVector::BI__builtin_rvv_vsetvli: 4399 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) || 4400 CheckRISCVLMUL(TheCall, 2); 4401 case RISCVVector::BI__builtin_rvv_vsetvlimax: 4402 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 4403 CheckRISCVLMUL(TheCall, 1); 4404 case RISCVVector::BI__builtin_rvv_vget_v: { 4405 ASTContext::BuiltinVectorTypeInfo ResVecInfo = 4406 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4407 TheCall->getType().getCanonicalType().getTypePtr())); 4408 ASTContext::BuiltinVectorTypeInfo VecInfo = 4409 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4410 TheCall->getArg(0)->getType().getCanonicalType().getTypePtr())); 4411 unsigned MaxIndex = 4412 (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors) / 4413 (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors); 4414 return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1); 4415 } 4416 case RISCVVector::BI__builtin_rvv_vset_v: { 4417 ASTContext::BuiltinVectorTypeInfo ResVecInfo = 4418 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4419 TheCall->getType().getCanonicalType().getTypePtr())); 4420 ASTContext::BuiltinVectorTypeInfo VecInfo = 4421 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4422 TheCall->getArg(2)->getType().getCanonicalType().getTypePtr())); 4423 unsigned MaxIndex = 4424 (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors) / 4425 (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors); 4426 return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1); 4427 } 4428 // Check if byteselect is in [0, 3] 4429 case RISCV::BI__builtin_riscv_aes32dsi_32: 4430 case RISCV::BI__builtin_riscv_aes32dsmi_32: 4431 case RISCV::BI__builtin_riscv_aes32esi_32: 4432 case RISCV::BI__builtin_riscv_aes32esmi_32: 4433 case RISCV::BI__builtin_riscv_sm4ks: 4434 case RISCV::BI__builtin_riscv_sm4ed: 4435 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 4436 // Check if rnum is in [0, 10] 4437 case RISCV::BI__builtin_riscv_aes64ks1i_64: 4438 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 10); 4439 } 4440 4441 return false; 4442 } 4443 4444 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 4445 CallExpr *TheCall) { 4446 if (BuiltinID == SystemZ::BI__builtin_tabort) { 4447 Expr *Arg = TheCall->getArg(0); 4448 if (Optional<llvm::APSInt> AbortCode = Arg->getIntegerConstantExpr(Context)) 4449 if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256) 4450 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 4451 << Arg->getSourceRange(); 4452 } 4453 4454 // For intrinsics which take an immediate value as part of the instruction, 4455 // range check them here. 4456 unsigned i = 0, l = 0, u = 0; 4457 switch (BuiltinID) { 4458 default: return false; 4459 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 4460 case SystemZ::BI__builtin_s390_verimb: 4461 case SystemZ::BI__builtin_s390_verimh: 4462 case SystemZ::BI__builtin_s390_verimf: 4463 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 4464 case SystemZ::BI__builtin_s390_vfaeb: 4465 case SystemZ::BI__builtin_s390_vfaeh: 4466 case SystemZ::BI__builtin_s390_vfaef: 4467 case SystemZ::BI__builtin_s390_vfaebs: 4468 case SystemZ::BI__builtin_s390_vfaehs: 4469 case SystemZ::BI__builtin_s390_vfaefs: 4470 case SystemZ::BI__builtin_s390_vfaezb: 4471 case SystemZ::BI__builtin_s390_vfaezh: 4472 case SystemZ::BI__builtin_s390_vfaezf: 4473 case SystemZ::BI__builtin_s390_vfaezbs: 4474 case SystemZ::BI__builtin_s390_vfaezhs: 4475 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 4476 case SystemZ::BI__builtin_s390_vfisb: 4477 case SystemZ::BI__builtin_s390_vfidb: 4478 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 4479 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 4480 case SystemZ::BI__builtin_s390_vftcisb: 4481 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 4482 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 4483 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 4484 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 4485 case SystemZ::BI__builtin_s390_vstrcb: 4486 case SystemZ::BI__builtin_s390_vstrch: 4487 case SystemZ::BI__builtin_s390_vstrcf: 4488 case SystemZ::BI__builtin_s390_vstrczb: 4489 case SystemZ::BI__builtin_s390_vstrczh: 4490 case SystemZ::BI__builtin_s390_vstrczf: 4491 case SystemZ::BI__builtin_s390_vstrcbs: 4492 case SystemZ::BI__builtin_s390_vstrchs: 4493 case SystemZ::BI__builtin_s390_vstrcfs: 4494 case SystemZ::BI__builtin_s390_vstrczbs: 4495 case SystemZ::BI__builtin_s390_vstrczhs: 4496 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 4497 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 4498 case SystemZ::BI__builtin_s390_vfminsb: 4499 case SystemZ::BI__builtin_s390_vfmaxsb: 4500 case SystemZ::BI__builtin_s390_vfmindb: 4501 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 4502 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break; 4503 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break; 4504 case SystemZ::BI__builtin_s390_vclfnhs: 4505 case SystemZ::BI__builtin_s390_vclfnls: 4506 case SystemZ::BI__builtin_s390_vcfn: 4507 case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break; 4508 case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break; 4509 } 4510 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 4511 } 4512 4513 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 4514 /// This checks that the target supports __builtin_cpu_supports and 4515 /// that the string argument is constant and valid. 4516 static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI, 4517 CallExpr *TheCall) { 4518 Expr *Arg = TheCall->getArg(0); 4519 4520 // Check if the argument is a string literal. 4521 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 4522 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 4523 << Arg->getSourceRange(); 4524 4525 // Check the contents of the string. 4526 StringRef Feature = 4527 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 4528 if (!TI.validateCpuSupports(Feature)) 4529 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 4530 << Arg->getSourceRange(); 4531 return false; 4532 } 4533 4534 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 4535 /// This checks that the target supports __builtin_cpu_is and 4536 /// that the string argument is constant and valid. 4537 static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) { 4538 Expr *Arg = TheCall->getArg(0); 4539 4540 // Check if the argument is a string literal. 4541 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 4542 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 4543 << Arg->getSourceRange(); 4544 4545 // Check the contents of the string. 4546 StringRef Feature = 4547 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 4548 if (!TI.validateCpuIs(Feature)) 4549 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 4550 << Arg->getSourceRange(); 4551 return false; 4552 } 4553 4554 // Check if the rounding mode is legal. 4555 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 4556 // Indicates if this instruction has rounding control or just SAE. 4557 bool HasRC = false; 4558 4559 unsigned ArgNum = 0; 4560 switch (BuiltinID) { 4561 default: 4562 return false; 4563 case X86::BI__builtin_ia32_vcvttsd2si32: 4564 case X86::BI__builtin_ia32_vcvttsd2si64: 4565 case X86::BI__builtin_ia32_vcvttsd2usi32: 4566 case X86::BI__builtin_ia32_vcvttsd2usi64: 4567 case X86::BI__builtin_ia32_vcvttss2si32: 4568 case X86::BI__builtin_ia32_vcvttss2si64: 4569 case X86::BI__builtin_ia32_vcvttss2usi32: 4570 case X86::BI__builtin_ia32_vcvttss2usi64: 4571 case X86::BI__builtin_ia32_vcvttsh2si32: 4572 case X86::BI__builtin_ia32_vcvttsh2si64: 4573 case X86::BI__builtin_ia32_vcvttsh2usi32: 4574 case X86::BI__builtin_ia32_vcvttsh2usi64: 4575 ArgNum = 1; 4576 break; 4577 case X86::BI__builtin_ia32_maxpd512: 4578 case X86::BI__builtin_ia32_maxps512: 4579 case X86::BI__builtin_ia32_minpd512: 4580 case X86::BI__builtin_ia32_minps512: 4581 case X86::BI__builtin_ia32_maxph512: 4582 case X86::BI__builtin_ia32_minph512: 4583 ArgNum = 2; 4584 break; 4585 case X86::BI__builtin_ia32_vcvtph2pd512_mask: 4586 case X86::BI__builtin_ia32_vcvtph2psx512_mask: 4587 case X86::BI__builtin_ia32_cvtps2pd512_mask: 4588 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 4589 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 4590 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 4591 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 4592 case X86::BI__builtin_ia32_cvttps2dq512_mask: 4593 case X86::BI__builtin_ia32_cvttps2qq512_mask: 4594 case X86::BI__builtin_ia32_cvttps2udq512_mask: 4595 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 4596 case X86::BI__builtin_ia32_vcvttph2w512_mask: 4597 case X86::BI__builtin_ia32_vcvttph2uw512_mask: 4598 case X86::BI__builtin_ia32_vcvttph2dq512_mask: 4599 case X86::BI__builtin_ia32_vcvttph2udq512_mask: 4600 case X86::BI__builtin_ia32_vcvttph2qq512_mask: 4601 case X86::BI__builtin_ia32_vcvttph2uqq512_mask: 4602 case X86::BI__builtin_ia32_exp2pd_mask: 4603 case X86::BI__builtin_ia32_exp2ps_mask: 4604 case X86::BI__builtin_ia32_getexppd512_mask: 4605 case X86::BI__builtin_ia32_getexpps512_mask: 4606 case X86::BI__builtin_ia32_getexpph512_mask: 4607 case X86::BI__builtin_ia32_rcp28pd_mask: 4608 case X86::BI__builtin_ia32_rcp28ps_mask: 4609 case X86::BI__builtin_ia32_rsqrt28pd_mask: 4610 case X86::BI__builtin_ia32_rsqrt28ps_mask: 4611 case X86::BI__builtin_ia32_vcomisd: 4612 case X86::BI__builtin_ia32_vcomiss: 4613 case X86::BI__builtin_ia32_vcomish: 4614 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 4615 ArgNum = 3; 4616 break; 4617 case X86::BI__builtin_ia32_cmppd512_mask: 4618 case X86::BI__builtin_ia32_cmpps512_mask: 4619 case X86::BI__builtin_ia32_cmpsd_mask: 4620 case X86::BI__builtin_ia32_cmpss_mask: 4621 case X86::BI__builtin_ia32_cmpsh_mask: 4622 case X86::BI__builtin_ia32_vcvtsh2sd_round_mask: 4623 case X86::BI__builtin_ia32_vcvtsh2ss_round_mask: 4624 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 4625 case X86::BI__builtin_ia32_getexpsd128_round_mask: 4626 case X86::BI__builtin_ia32_getexpss128_round_mask: 4627 case X86::BI__builtin_ia32_getexpsh128_round_mask: 4628 case X86::BI__builtin_ia32_getmantpd512_mask: 4629 case X86::BI__builtin_ia32_getmantps512_mask: 4630 case X86::BI__builtin_ia32_getmantph512_mask: 4631 case X86::BI__builtin_ia32_maxsd_round_mask: 4632 case X86::BI__builtin_ia32_maxss_round_mask: 4633 case X86::BI__builtin_ia32_maxsh_round_mask: 4634 case X86::BI__builtin_ia32_minsd_round_mask: 4635 case X86::BI__builtin_ia32_minss_round_mask: 4636 case X86::BI__builtin_ia32_minsh_round_mask: 4637 case X86::BI__builtin_ia32_rcp28sd_round_mask: 4638 case X86::BI__builtin_ia32_rcp28ss_round_mask: 4639 case X86::BI__builtin_ia32_reducepd512_mask: 4640 case X86::BI__builtin_ia32_reduceps512_mask: 4641 case X86::BI__builtin_ia32_reduceph512_mask: 4642 case X86::BI__builtin_ia32_rndscalepd_mask: 4643 case X86::BI__builtin_ia32_rndscaleps_mask: 4644 case X86::BI__builtin_ia32_rndscaleph_mask: 4645 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 4646 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 4647 ArgNum = 4; 4648 break; 4649 case X86::BI__builtin_ia32_fixupimmpd512_mask: 4650 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 4651 case X86::BI__builtin_ia32_fixupimmps512_mask: 4652 case X86::BI__builtin_ia32_fixupimmps512_maskz: 4653 case X86::BI__builtin_ia32_fixupimmsd_mask: 4654 case X86::BI__builtin_ia32_fixupimmsd_maskz: 4655 case X86::BI__builtin_ia32_fixupimmss_mask: 4656 case X86::BI__builtin_ia32_fixupimmss_maskz: 4657 case X86::BI__builtin_ia32_getmantsd_round_mask: 4658 case X86::BI__builtin_ia32_getmantss_round_mask: 4659 case X86::BI__builtin_ia32_getmantsh_round_mask: 4660 case X86::BI__builtin_ia32_rangepd512_mask: 4661 case X86::BI__builtin_ia32_rangeps512_mask: 4662 case X86::BI__builtin_ia32_rangesd128_round_mask: 4663 case X86::BI__builtin_ia32_rangess128_round_mask: 4664 case X86::BI__builtin_ia32_reducesd_mask: 4665 case X86::BI__builtin_ia32_reducess_mask: 4666 case X86::BI__builtin_ia32_reducesh_mask: 4667 case X86::BI__builtin_ia32_rndscalesd_round_mask: 4668 case X86::BI__builtin_ia32_rndscaless_round_mask: 4669 case X86::BI__builtin_ia32_rndscalesh_round_mask: 4670 ArgNum = 5; 4671 break; 4672 case X86::BI__builtin_ia32_vcvtsd2si64: 4673 case X86::BI__builtin_ia32_vcvtsd2si32: 4674 case X86::BI__builtin_ia32_vcvtsd2usi32: 4675 case X86::BI__builtin_ia32_vcvtsd2usi64: 4676 case X86::BI__builtin_ia32_vcvtss2si32: 4677 case X86::BI__builtin_ia32_vcvtss2si64: 4678 case X86::BI__builtin_ia32_vcvtss2usi32: 4679 case X86::BI__builtin_ia32_vcvtss2usi64: 4680 case X86::BI__builtin_ia32_vcvtsh2si32: 4681 case X86::BI__builtin_ia32_vcvtsh2si64: 4682 case X86::BI__builtin_ia32_vcvtsh2usi32: 4683 case X86::BI__builtin_ia32_vcvtsh2usi64: 4684 case X86::BI__builtin_ia32_sqrtpd512: 4685 case X86::BI__builtin_ia32_sqrtps512: 4686 case X86::BI__builtin_ia32_sqrtph512: 4687 ArgNum = 1; 4688 HasRC = true; 4689 break; 4690 case X86::BI__builtin_ia32_addph512: 4691 case X86::BI__builtin_ia32_divph512: 4692 case X86::BI__builtin_ia32_mulph512: 4693 case X86::BI__builtin_ia32_subph512: 4694 case X86::BI__builtin_ia32_addpd512: 4695 case X86::BI__builtin_ia32_addps512: 4696 case X86::BI__builtin_ia32_divpd512: 4697 case X86::BI__builtin_ia32_divps512: 4698 case X86::BI__builtin_ia32_mulpd512: 4699 case X86::BI__builtin_ia32_mulps512: 4700 case X86::BI__builtin_ia32_subpd512: 4701 case X86::BI__builtin_ia32_subps512: 4702 case X86::BI__builtin_ia32_cvtsi2sd64: 4703 case X86::BI__builtin_ia32_cvtsi2ss32: 4704 case X86::BI__builtin_ia32_cvtsi2ss64: 4705 case X86::BI__builtin_ia32_cvtusi2sd64: 4706 case X86::BI__builtin_ia32_cvtusi2ss32: 4707 case X86::BI__builtin_ia32_cvtusi2ss64: 4708 case X86::BI__builtin_ia32_vcvtusi2sh: 4709 case X86::BI__builtin_ia32_vcvtusi642sh: 4710 case X86::BI__builtin_ia32_vcvtsi2sh: 4711 case X86::BI__builtin_ia32_vcvtsi642sh: 4712 ArgNum = 2; 4713 HasRC = true; 4714 break; 4715 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 4716 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 4717 case X86::BI__builtin_ia32_vcvtpd2ph512_mask: 4718 case X86::BI__builtin_ia32_vcvtps2phx512_mask: 4719 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 4720 case X86::BI__builtin_ia32_cvtpd2dq512_mask: 4721 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 4722 case X86::BI__builtin_ia32_cvtpd2udq512_mask: 4723 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 4724 case X86::BI__builtin_ia32_cvtps2dq512_mask: 4725 case X86::BI__builtin_ia32_cvtps2qq512_mask: 4726 case X86::BI__builtin_ia32_cvtps2udq512_mask: 4727 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 4728 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 4729 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 4730 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 4731 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 4732 case X86::BI__builtin_ia32_vcvtdq2ph512_mask: 4733 case X86::BI__builtin_ia32_vcvtudq2ph512_mask: 4734 case X86::BI__builtin_ia32_vcvtw2ph512_mask: 4735 case X86::BI__builtin_ia32_vcvtuw2ph512_mask: 4736 case X86::BI__builtin_ia32_vcvtph2w512_mask: 4737 case X86::BI__builtin_ia32_vcvtph2uw512_mask: 4738 case X86::BI__builtin_ia32_vcvtph2dq512_mask: 4739 case X86::BI__builtin_ia32_vcvtph2udq512_mask: 4740 case X86::BI__builtin_ia32_vcvtph2qq512_mask: 4741 case X86::BI__builtin_ia32_vcvtph2uqq512_mask: 4742 case X86::BI__builtin_ia32_vcvtqq2ph512_mask: 4743 case X86::BI__builtin_ia32_vcvtuqq2ph512_mask: 4744 ArgNum = 3; 4745 HasRC = true; 4746 break; 4747 case X86::BI__builtin_ia32_addsh_round_mask: 4748 case X86::BI__builtin_ia32_addss_round_mask: 4749 case X86::BI__builtin_ia32_addsd_round_mask: 4750 case X86::BI__builtin_ia32_divsh_round_mask: 4751 case X86::BI__builtin_ia32_divss_round_mask: 4752 case X86::BI__builtin_ia32_divsd_round_mask: 4753 case X86::BI__builtin_ia32_mulsh_round_mask: 4754 case X86::BI__builtin_ia32_mulss_round_mask: 4755 case X86::BI__builtin_ia32_mulsd_round_mask: 4756 case X86::BI__builtin_ia32_subsh_round_mask: 4757 case X86::BI__builtin_ia32_subss_round_mask: 4758 case X86::BI__builtin_ia32_subsd_round_mask: 4759 case X86::BI__builtin_ia32_scalefph512_mask: 4760 case X86::BI__builtin_ia32_scalefpd512_mask: 4761 case X86::BI__builtin_ia32_scalefps512_mask: 4762 case X86::BI__builtin_ia32_scalefsd_round_mask: 4763 case X86::BI__builtin_ia32_scalefss_round_mask: 4764 case X86::BI__builtin_ia32_scalefsh_round_mask: 4765 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 4766 case X86::BI__builtin_ia32_vcvtss2sh_round_mask: 4767 case X86::BI__builtin_ia32_vcvtsd2sh_round_mask: 4768 case X86::BI__builtin_ia32_sqrtsd_round_mask: 4769 case X86::BI__builtin_ia32_sqrtss_round_mask: 4770 case X86::BI__builtin_ia32_sqrtsh_round_mask: 4771 case X86::BI__builtin_ia32_vfmaddsd3_mask: 4772 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 4773 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 4774 case X86::BI__builtin_ia32_vfmaddss3_mask: 4775 case X86::BI__builtin_ia32_vfmaddss3_maskz: 4776 case X86::BI__builtin_ia32_vfmaddss3_mask3: 4777 case X86::BI__builtin_ia32_vfmaddsh3_mask: 4778 case X86::BI__builtin_ia32_vfmaddsh3_maskz: 4779 case X86::BI__builtin_ia32_vfmaddsh3_mask3: 4780 case X86::BI__builtin_ia32_vfmaddpd512_mask: 4781 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 4782 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 4783 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 4784 case X86::BI__builtin_ia32_vfmaddps512_mask: 4785 case X86::BI__builtin_ia32_vfmaddps512_maskz: 4786 case X86::BI__builtin_ia32_vfmaddps512_mask3: 4787 case X86::BI__builtin_ia32_vfmsubps512_mask3: 4788 case X86::BI__builtin_ia32_vfmaddph512_mask: 4789 case X86::BI__builtin_ia32_vfmaddph512_maskz: 4790 case X86::BI__builtin_ia32_vfmaddph512_mask3: 4791 case X86::BI__builtin_ia32_vfmsubph512_mask3: 4792 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 4793 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 4794 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 4795 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 4796 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 4797 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 4798 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 4799 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 4800 case X86::BI__builtin_ia32_vfmaddsubph512_mask: 4801 case X86::BI__builtin_ia32_vfmaddsubph512_maskz: 4802 case X86::BI__builtin_ia32_vfmaddsubph512_mask3: 4803 case X86::BI__builtin_ia32_vfmsubaddph512_mask3: 4804 case X86::BI__builtin_ia32_vfmaddcsh_mask: 4805 case X86::BI__builtin_ia32_vfmaddcsh_round_mask: 4806 case X86::BI__builtin_ia32_vfmaddcsh_round_mask3: 4807 case X86::BI__builtin_ia32_vfmaddcph512_mask: 4808 case X86::BI__builtin_ia32_vfmaddcph512_maskz: 4809 case X86::BI__builtin_ia32_vfmaddcph512_mask3: 4810 case X86::BI__builtin_ia32_vfcmaddcsh_mask: 4811 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask: 4812 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3: 4813 case X86::BI__builtin_ia32_vfcmaddcph512_mask: 4814 case X86::BI__builtin_ia32_vfcmaddcph512_maskz: 4815 case X86::BI__builtin_ia32_vfcmaddcph512_mask3: 4816 case X86::BI__builtin_ia32_vfmulcsh_mask: 4817 case X86::BI__builtin_ia32_vfmulcph512_mask: 4818 case X86::BI__builtin_ia32_vfcmulcsh_mask: 4819 case X86::BI__builtin_ia32_vfcmulcph512_mask: 4820 ArgNum = 4; 4821 HasRC = true; 4822 break; 4823 } 4824 4825 llvm::APSInt Result; 4826 4827 // We can't check the value of a dependent argument. 4828 Expr *Arg = TheCall->getArg(ArgNum); 4829 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4830 return false; 4831 4832 // Check constant-ness first. 4833 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4834 return true; 4835 4836 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 4837 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 4838 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding 4839 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together. 4840 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 4841 Result == 8/*ROUND_NO_EXC*/ || 4842 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) || 4843 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 4844 return false; 4845 4846 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 4847 << Arg->getSourceRange(); 4848 } 4849 4850 // Check if the gather/scatter scale is legal. 4851 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 4852 CallExpr *TheCall) { 4853 unsigned ArgNum = 0; 4854 switch (BuiltinID) { 4855 default: 4856 return false; 4857 case X86::BI__builtin_ia32_gatherpfdpd: 4858 case X86::BI__builtin_ia32_gatherpfdps: 4859 case X86::BI__builtin_ia32_gatherpfqpd: 4860 case X86::BI__builtin_ia32_gatherpfqps: 4861 case X86::BI__builtin_ia32_scatterpfdpd: 4862 case X86::BI__builtin_ia32_scatterpfdps: 4863 case X86::BI__builtin_ia32_scatterpfqpd: 4864 case X86::BI__builtin_ia32_scatterpfqps: 4865 ArgNum = 3; 4866 break; 4867 case X86::BI__builtin_ia32_gatherd_pd: 4868 case X86::BI__builtin_ia32_gatherd_pd256: 4869 case X86::BI__builtin_ia32_gatherq_pd: 4870 case X86::BI__builtin_ia32_gatherq_pd256: 4871 case X86::BI__builtin_ia32_gatherd_ps: 4872 case X86::BI__builtin_ia32_gatherd_ps256: 4873 case X86::BI__builtin_ia32_gatherq_ps: 4874 case X86::BI__builtin_ia32_gatherq_ps256: 4875 case X86::BI__builtin_ia32_gatherd_q: 4876 case X86::BI__builtin_ia32_gatherd_q256: 4877 case X86::BI__builtin_ia32_gatherq_q: 4878 case X86::BI__builtin_ia32_gatherq_q256: 4879 case X86::BI__builtin_ia32_gatherd_d: 4880 case X86::BI__builtin_ia32_gatherd_d256: 4881 case X86::BI__builtin_ia32_gatherq_d: 4882 case X86::BI__builtin_ia32_gatherq_d256: 4883 case X86::BI__builtin_ia32_gather3div2df: 4884 case X86::BI__builtin_ia32_gather3div2di: 4885 case X86::BI__builtin_ia32_gather3div4df: 4886 case X86::BI__builtin_ia32_gather3div4di: 4887 case X86::BI__builtin_ia32_gather3div4sf: 4888 case X86::BI__builtin_ia32_gather3div4si: 4889 case X86::BI__builtin_ia32_gather3div8sf: 4890 case X86::BI__builtin_ia32_gather3div8si: 4891 case X86::BI__builtin_ia32_gather3siv2df: 4892 case X86::BI__builtin_ia32_gather3siv2di: 4893 case X86::BI__builtin_ia32_gather3siv4df: 4894 case X86::BI__builtin_ia32_gather3siv4di: 4895 case X86::BI__builtin_ia32_gather3siv4sf: 4896 case X86::BI__builtin_ia32_gather3siv4si: 4897 case X86::BI__builtin_ia32_gather3siv8sf: 4898 case X86::BI__builtin_ia32_gather3siv8si: 4899 case X86::BI__builtin_ia32_gathersiv8df: 4900 case X86::BI__builtin_ia32_gathersiv16sf: 4901 case X86::BI__builtin_ia32_gatherdiv8df: 4902 case X86::BI__builtin_ia32_gatherdiv16sf: 4903 case X86::BI__builtin_ia32_gathersiv8di: 4904 case X86::BI__builtin_ia32_gathersiv16si: 4905 case X86::BI__builtin_ia32_gatherdiv8di: 4906 case X86::BI__builtin_ia32_gatherdiv16si: 4907 case X86::BI__builtin_ia32_scatterdiv2df: 4908 case X86::BI__builtin_ia32_scatterdiv2di: 4909 case X86::BI__builtin_ia32_scatterdiv4df: 4910 case X86::BI__builtin_ia32_scatterdiv4di: 4911 case X86::BI__builtin_ia32_scatterdiv4sf: 4912 case X86::BI__builtin_ia32_scatterdiv4si: 4913 case X86::BI__builtin_ia32_scatterdiv8sf: 4914 case X86::BI__builtin_ia32_scatterdiv8si: 4915 case X86::BI__builtin_ia32_scattersiv2df: 4916 case X86::BI__builtin_ia32_scattersiv2di: 4917 case X86::BI__builtin_ia32_scattersiv4df: 4918 case X86::BI__builtin_ia32_scattersiv4di: 4919 case X86::BI__builtin_ia32_scattersiv4sf: 4920 case X86::BI__builtin_ia32_scattersiv4si: 4921 case X86::BI__builtin_ia32_scattersiv8sf: 4922 case X86::BI__builtin_ia32_scattersiv8si: 4923 case X86::BI__builtin_ia32_scattersiv8df: 4924 case X86::BI__builtin_ia32_scattersiv16sf: 4925 case X86::BI__builtin_ia32_scatterdiv8df: 4926 case X86::BI__builtin_ia32_scatterdiv16sf: 4927 case X86::BI__builtin_ia32_scattersiv8di: 4928 case X86::BI__builtin_ia32_scattersiv16si: 4929 case X86::BI__builtin_ia32_scatterdiv8di: 4930 case X86::BI__builtin_ia32_scatterdiv16si: 4931 ArgNum = 4; 4932 break; 4933 } 4934 4935 llvm::APSInt Result; 4936 4937 // We can't check the value of a dependent argument. 4938 Expr *Arg = TheCall->getArg(ArgNum); 4939 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4940 return false; 4941 4942 // Check constant-ness first. 4943 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4944 return true; 4945 4946 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 4947 return false; 4948 4949 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 4950 << Arg->getSourceRange(); 4951 } 4952 4953 enum { TileRegLow = 0, TileRegHigh = 7 }; 4954 4955 bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, 4956 ArrayRef<int> ArgNums) { 4957 for (int ArgNum : ArgNums) { 4958 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh)) 4959 return true; 4960 } 4961 return false; 4962 } 4963 4964 bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall, 4965 ArrayRef<int> ArgNums) { 4966 // Because the max number of tile register is TileRegHigh + 1, so here we use 4967 // each bit to represent the usage of them in bitset. 4968 std::bitset<TileRegHigh + 1> ArgValues; 4969 for (int ArgNum : ArgNums) { 4970 Expr *Arg = TheCall->getArg(ArgNum); 4971 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4972 continue; 4973 4974 llvm::APSInt Result; 4975 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4976 return true; 4977 int ArgExtValue = Result.getExtValue(); 4978 assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) && 4979 "Incorrect tile register num."); 4980 if (ArgValues.test(ArgExtValue)) 4981 return Diag(TheCall->getBeginLoc(), 4982 diag::err_x86_builtin_tile_arg_duplicate) 4983 << TheCall->getArg(ArgNum)->getSourceRange(); 4984 ArgValues.set(ArgExtValue); 4985 } 4986 return false; 4987 } 4988 4989 bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, 4990 ArrayRef<int> ArgNums) { 4991 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) || 4992 CheckX86BuiltinTileDuplicate(TheCall, ArgNums); 4993 } 4994 4995 bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) { 4996 switch (BuiltinID) { 4997 default: 4998 return false; 4999 case X86::BI__builtin_ia32_tileloadd64: 5000 case X86::BI__builtin_ia32_tileloaddt164: 5001 case X86::BI__builtin_ia32_tilestored64: 5002 case X86::BI__builtin_ia32_tilezero: 5003 return CheckX86BuiltinTileArgumentsRange(TheCall, 0); 5004 case X86::BI__builtin_ia32_tdpbssd: 5005 case X86::BI__builtin_ia32_tdpbsud: 5006 case X86::BI__builtin_ia32_tdpbusd: 5007 case X86::BI__builtin_ia32_tdpbuud: 5008 case X86::BI__builtin_ia32_tdpbf16ps: 5009 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2}); 5010 } 5011 } 5012 static bool isX86_32Builtin(unsigned BuiltinID) { 5013 // These builtins only work on x86-32 targets. 5014 switch (BuiltinID) { 5015 case X86::BI__builtin_ia32_readeflags_u32: 5016 case X86::BI__builtin_ia32_writeeflags_u32: 5017 return true; 5018 } 5019 5020 return false; 5021 } 5022 5023 bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 5024 CallExpr *TheCall) { 5025 if (BuiltinID == X86::BI__builtin_cpu_supports) 5026 return SemaBuiltinCpuSupports(*this, TI, TheCall); 5027 5028 if (BuiltinID == X86::BI__builtin_cpu_is) 5029 return SemaBuiltinCpuIs(*this, TI, TheCall); 5030 5031 // Check for 32-bit only builtins on a 64-bit target. 5032 const llvm::Triple &TT = TI.getTriple(); 5033 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 5034 return Diag(TheCall->getCallee()->getBeginLoc(), 5035 diag::err_32_bit_builtin_64_bit_tgt); 5036 5037 // If the intrinsic has rounding or SAE make sure its valid. 5038 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 5039 return true; 5040 5041 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 5042 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 5043 return true; 5044 5045 // If the intrinsic has a tile arguments, make sure they are valid. 5046 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall)) 5047 return true; 5048 5049 // For intrinsics which take an immediate value as part of the instruction, 5050 // range check them here. 5051 int i = 0, l = 0, u = 0; 5052 switch (BuiltinID) { 5053 default: 5054 return false; 5055 case X86::BI__builtin_ia32_vec_ext_v2si: 5056 case X86::BI__builtin_ia32_vec_ext_v2di: 5057 case X86::BI__builtin_ia32_vextractf128_pd256: 5058 case X86::BI__builtin_ia32_vextractf128_ps256: 5059 case X86::BI__builtin_ia32_vextractf128_si256: 5060 case X86::BI__builtin_ia32_extract128i256: 5061 case X86::BI__builtin_ia32_extractf64x4_mask: 5062 case X86::BI__builtin_ia32_extracti64x4_mask: 5063 case X86::BI__builtin_ia32_extractf32x8_mask: 5064 case X86::BI__builtin_ia32_extracti32x8_mask: 5065 case X86::BI__builtin_ia32_extractf64x2_256_mask: 5066 case X86::BI__builtin_ia32_extracti64x2_256_mask: 5067 case X86::BI__builtin_ia32_extractf32x4_256_mask: 5068 case X86::BI__builtin_ia32_extracti32x4_256_mask: 5069 i = 1; l = 0; u = 1; 5070 break; 5071 case X86::BI__builtin_ia32_vec_set_v2di: 5072 case X86::BI__builtin_ia32_vinsertf128_pd256: 5073 case X86::BI__builtin_ia32_vinsertf128_ps256: 5074 case X86::BI__builtin_ia32_vinsertf128_si256: 5075 case X86::BI__builtin_ia32_insert128i256: 5076 case X86::BI__builtin_ia32_insertf32x8: 5077 case X86::BI__builtin_ia32_inserti32x8: 5078 case X86::BI__builtin_ia32_insertf64x4: 5079 case X86::BI__builtin_ia32_inserti64x4: 5080 case X86::BI__builtin_ia32_insertf64x2_256: 5081 case X86::BI__builtin_ia32_inserti64x2_256: 5082 case X86::BI__builtin_ia32_insertf32x4_256: 5083 case X86::BI__builtin_ia32_inserti32x4_256: 5084 i = 2; l = 0; u = 1; 5085 break; 5086 case X86::BI__builtin_ia32_vpermilpd: 5087 case X86::BI__builtin_ia32_vec_ext_v4hi: 5088 case X86::BI__builtin_ia32_vec_ext_v4si: 5089 case X86::BI__builtin_ia32_vec_ext_v4sf: 5090 case X86::BI__builtin_ia32_vec_ext_v4di: 5091 case X86::BI__builtin_ia32_extractf32x4_mask: 5092 case X86::BI__builtin_ia32_extracti32x4_mask: 5093 case X86::BI__builtin_ia32_extractf64x2_512_mask: 5094 case X86::BI__builtin_ia32_extracti64x2_512_mask: 5095 i = 1; l = 0; u = 3; 5096 break; 5097 case X86::BI_mm_prefetch: 5098 case X86::BI__builtin_ia32_vec_ext_v8hi: 5099 case X86::BI__builtin_ia32_vec_ext_v8si: 5100 i = 1; l = 0; u = 7; 5101 break; 5102 case X86::BI__builtin_ia32_sha1rnds4: 5103 case X86::BI__builtin_ia32_blendpd: 5104 case X86::BI__builtin_ia32_shufpd: 5105 case X86::BI__builtin_ia32_vec_set_v4hi: 5106 case X86::BI__builtin_ia32_vec_set_v4si: 5107 case X86::BI__builtin_ia32_vec_set_v4di: 5108 case X86::BI__builtin_ia32_shuf_f32x4_256: 5109 case X86::BI__builtin_ia32_shuf_f64x2_256: 5110 case X86::BI__builtin_ia32_shuf_i32x4_256: 5111 case X86::BI__builtin_ia32_shuf_i64x2_256: 5112 case X86::BI__builtin_ia32_insertf64x2_512: 5113 case X86::BI__builtin_ia32_inserti64x2_512: 5114 case X86::BI__builtin_ia32_insertf32x4: 5115 case X86::BI__builtin_ia32_inserti32x4: 5116 i = 2; l = 0; u = 3; 5117 break; 5118 case X86::BI__builtin_ia32_vpermil2pd: 5119 case X86::BI__builtin_ia32_vpermil2pd256: 5120 case X86::BI__builtin_ia32_vpermil2ps: 5121 case X86::BI__builtin_ia32_vpermil2ps256: 5122 i = 3; l = 0; u = 3; 5123 break; 5124 case X86::BI__builtin_ia32_cmpb128_mask: 5125 case X86::BI__builtin_ia32_cmpw128_mask: 5126 case X86::BI__builtin_ia32_cmpd128_mask: 5127 case X86::BI__builtin_ia32_cmpq128_mask: 5128 case X86::BI__builtin_ia32_cmpb256_mask: 5129 case X86::BI__builtin_ia32_cmpw256_mask: 5130 case X86::BI__builtin_ia32_cmpd256_mask: 5131 case X86::BI__builtin_ia32_cmpq256_mask: 5132 case X86::BI__builtin_ia32_cmpb512_mask: 5133 case X86::BI__builtin_ia32_cmpw512_mask: 5134 case X86::BI__builtin_ia32_cmpd512_mask: 5135 case X86::BI__builtin_ia32_cmpq512_mask: 5136 case X86::BI__builtin_ia32_ucmpb128_mask: 5137 case X86::BI__builtin_ia32_ucmpw128_mask: 5138 case X86::BI__builtin_ia32_ucmpd128_mask: 5139 case X86::BI__builtin_ia32_ucmpq128_mask: 5140 case X86::BI__builtin_ia32_ucmpb256_mask: 5141 case X86::BI__builtin_ia32_ucmpw256_mask: 5142 case X86::BI__builtin_ia32_ucmpd256_mask: 5143 case X86::BI__builtin_ia32_ucmpq256_mask: 5144 case X86::BI__builtin_ia32_ucmpb512_mask: 5145 case X86::BI__builtin_ia32_ucmpw512_mask: 5146 case X86::BI__builtin_ia32_ucmpd512_mask: 5147 case X86::BI__builtin_ia32_ucmpq512_mask: 5148 case X86::BI__builtin_ia32_vpcomub: 5149 case X86::BI__builtin_ia32_vpcomuw: 5150 case X86::BI__builtin_ia32_vpcomud: 5151 case X86::BI__builtin_ia32_vpcomuq: 5152 case X86::BI__builtin_ia32_vpcomb: 5153 case X86::BI__builtin_ia32_vpcomw: 5154 case X86::BI__builtin_ia32_vpcomd: 5155 case X86::BI__builtin_ia32_vpcomq: 5156 case X86::BI__builtin_ia32_vec_set_v8hi: 5157 case X86::BI__builtin_ia32_vec_set_v8si: 5158 i = 2; l = 0; u = 7; 5159 break; 5160 case X86::BI__builtin_ia32_vpermilpd256: 5161 case X86::BI__builtin_ia32_roundps: 5162 case X86::BI__builtin_ia32_roundpd: 5163 case X86::BI__builtin_ia32_roundps256: 5164 case X86::BI__builtin_ia32_roundpd256: 5165 case X86::BI__builtin_ia32_getmantpd128_mask: 5166 case X86::BI__builtin_ia32_getmantpd256_mask: 5167 case X86::BI__builtin_ia32_getmantps128_mask: 5168 case X86::BI__builtin_ia32_getmantps256_mask: 5169 case X86::BI__builtin_ia32_getmantpd512_mask: 5170 case X86::BI__builtin_ia32_getmantps512_mask: 5171 case X86::BI__builtin_ia32_getmantph128_mask: 5172 case X86::BI__builtin_ia32_getmantph256_mask: 5173 case X86::BI__builtin_ia32_getmantph512_mask: 5174 case X86::BI__builtin_ia32_vec_ext_v16qi: 5175 case X86::BI__builtin_ia32_vec_ext_v16hi: 5176 i = 1; l = 0; u = 15; 5177 break; 5178 case X86::BI__builtin_ia32_pblendd128: 5179 case X86::BI__builtin_ia32_blendps: 5180 case X86::BI__builtin_ia32_blendpd256: 5181 case X86::BI__builtin_ia32_shufpd256: 5182 case X86::BI__builtin_ia32_roundss: 5183 case X86::BI__builtin_ia32_roundsd: 5184 case X86::BI__builtin_ia32_rangepd128_mask: 5185 case X86::BI__builtin_ia32_rangepd256_mask: 5186 case X86::BI__builtin_ia32_rangepd512_mask: 5187 case X86::BI__builtin_ia32_rangeps128_mask: 5188 case X86::BI__builtin_ia32_rangeps256_mask: 5189 case X86::BI__builtin_ia32_rangeps512_mask: 5190 case X86::BI__builtin_ia32_getmantsd_round_mask: 5191 case X86::BI__builtin_ia32_getmantss_round_mask: 5192 case X86::BI__builtin_ia32_getmantsh_round_mask: 5193 case X86::BI__builtin_ia32_vec_set_v16qi: 5194 case X86::BI__builtin_ia32_vec_set_v16hi: 5195 i = 2; l = 0; u = 15; 5196 break; 5197 case X86::BI__builtin_ia32_vec_ext_v32qi: 5198 i = 1; l = 0; u = 31; 5199 break; 5200 case X86::BI__builtin_ia32_cmpps: 5201 case X86::BI__builtin_ia32_cmpss: 5202 case X86::BI__builtin_ia32_cmppd: 5203 case X86::BI__builtin_ia32_cmpsd: 5204 case X86::BI__builtin_ia32_cmpps256: 5205 case X86::BI__builtin_ia32_cmppd256: 5206 case X86::BI__builtin_ia32_cmpps128_mask: 5207 case X86::BI__builtin_ia32_cmppd128_mask: 5208 case X86::BI__builtin_ia32_cmpps256_mask: 5209 case X86::BI__builtin_ia32_cmppd256_mask: 5210 case X86::BI__builtin_ia32_cmpps512_mask: 5211 case X86::BI__builtin_ia32_cmppd512_mask: 5212 case X86::BI__builtin_ia32_cmpsd_mask: 5213 case X86::BI__builtin_ia32_cmpss_mask: 5214 case X86::BI__builtin_ia32_vec_set_v32qi: 5215 i = 2; l = 0; u = 31; 5216 break; 5217 case X86::BI__builtin_ia32_permdf256: 5218 case X86::BI__builtin_ia32_permdi256: 5219 case X86::BI__builtin_ia32_permdf512: 5220 case X86::BI__builtin_ia32_permdi512: 5221 case X86::BI__builtin_ia32_vpermilps: 5222 case X86::BI__builtin_ia32_vpermilps256: 5223 case X86::BI__builtin_ia32_vpermilpd512: 5224 case X86::BI__builtin_ia32_vpermilps512: 5225 case X86::BI__builtin_ia32_pshufd: 5226 case X86::BI__builtin_ia32_pshufd256: 5227 case X86::BI__builtin_ia32_pshufd512: 5228 case X86::BI__builtin_ia32_pshufhw: 5229 case X86::BI__builtin_ia32_pshufhw256: 5230 case X86::BI__builtin_ia32_pshufhw512: 5231 case X86::BI__builtin_ia32_pshuflw: 5232 case X86::BI__builtin_ia32_pshuflw256: 5233 case X86::BI__builtin_ia32_pshuflw512: 5234 case X86::BI__builtin_ia32_vcvtps2ph: 5235 case X86::BI__builtin_ia32_vcvtps2ph_mask: 5236 case X86::BI__builtin_ia32_vcvtps2ph256: 5237 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 5238 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 5239 case X86::BI__builtin_ia32_rndscaleps_128_mask: 5240 case X86::BI__builtin_ia32_rndscalepd_128_mask: 5241 case X86::BI__builtin_ia32_rndscaleps_256_mask: 5242 case X86::BI__builtin_ia32_rndscalepd_256_mask: 5243 case X86::BI__builtin_ia32_rndscaleps_mask: 5244 case X86::BI__builtin_ia32_rndscalepd_mask: 5245 case X86::BI__builtin_ia32_rndscaleph_mask: 5246 case X86::BI__builtin_ia32_reducepd128_mask: 5247 case X86::BI__builtin_ia32_reducepd256_mask: 5248 case X86::BI__builtin_ia32_reducepd512_mask: 5249 case X86::BI__builtin_ia32_reduceps128_mask: 5250 case X86::BI__builtin_ia32_reduceps256_mask: 5251 case X86::BI__builtin_ia32_reduceps512_mask: 5252 case X86::BI__builtin_ia32_reduceph128_mask: 5253 case X86::BI__builtin_ia32_reduceph256_mask: 5254 case X86::BI__builtin_ia32_reduceph512_mask: 5255 case X86::BI__builtin_ia32_prold512: 5256 case X86::BI__builtin_ia32_prolq512: 5257 case X86::BI__builtin_ia32_prold128: 5258 case X86::BI__builtin_ia32_prold256: 5259 case X86::BI__builtin_ia32_prolq128: 5260 case X86::BI__builtin_ia32_prolq256: 5261 case X86::BI__builtin_ia32_prord512: 5262 case X86::BI__builtin_ia32_prorq512: 5263 case X86::BI__builtin_ia32_prord128: 5264 case X86::BI__builtin_ia32_prord256: 5265 case X86::BI__builtin_ia32_prorq128: 5266 case X86::BI__builtin_ia32_prorq256: 5267 case X86::BI__builtin_ia32_fpclasspd128_mask: 5268 case X86::BI__builtin_ia32_fpclasspd256_mask: 5269 case X86::BI__builtin_ia32_fpclassps128_mask: 5270 case X86::BI__builtin_ia32_fpclassps256_mask: 5271 case X86::BI__builtin_ia32_fpclassps512_mask: 5272 case X86::BI__builtin_ia32_fpclasspd512_mask: 5273 case X86::BI__builtin_ia32_fpclassph128_mask: 5274 case X86::BI__builtin_ia32_fpclassph256_mask: 5275 case X86::BI__builtin_ia32_fpclassph512_mask: 5276 case X86::BI__builtin_ia32_fpclasssd_mask: 5277 case X86::BI__builtin_ia32_fpclassss_mask: 5278 case X86::BI__builtin_ia32_fpclasssh_mask: 5279 case X86::BI__builtin_ia32_pslldqi128_byteshift: 5280 case X86::BI__builtin_ia32_pslldqi256_byteshift: 5281 case X86::BI__builtin_ia32_pslldqi512_byteshift: 5282 case X86::BI__builtin_ia32_psrldqi128_byteshift: 5283 case X86::BI__builtin_ia32_psrldqi256_byteshift: 5284 case X86::BI__builtin_ia32_psrldqi512_byteshift: 5285 case X86::BI__builtin_ia32_kshiftliqi: 5286 case X86::BI__builtin_ia32_kshiftlihi: 5287 case X86::BI__builtin_ia32_kshiftlisi: 5288 case X86::BI__builtin_ia32_kshiftlidi: 5289 case X86::BI__builtin_ia32_kshiftriqi: 5290 case X86::BI__builtin_ia32_kshiftrihi: 5291 case X86::BI__builtin_ia32_kshiftrisi: 5292 case X86::BI__builtin_ia32_kshiftridi: 5293 i = 1; l = 0; u = 255; 5294 break; 5295 case X86::BI__builtin_ia32_vperm2f128_pd256: 5296 case X86::BI__builtin_ia32_vperm2f128_ps256: 5297 case X86::BI__builtin_ia32_vperm2f128_si256: 5298 case X86::BI__builtin_ia32_permti256: 5299 case X86::BI__builtin_ia32_pblendw128: 5300 case X86::BI__builtin_ia32_pblendw256: 5301 case X86::BI__builtin_ia32_blendps256: 5302 case X86::BI__builtin_ia32_pblendd256: 5303 case X86::BI__builtin_ia32_palignr128: 5304 case X86::BI__builtin_ia32_palignr256: 5305 case X86::BI__builtin_ia32_palignr512: 5306 case X86::BI__builtin_ia32_alignq512: 5307 case X86::BI__builtin_ia32_alignd512: 5308 case X86::BI__builtin_ia32_alignd128: 5309 case X86::BI__builtin_ia32_alignd256: 5310 case X86::BI__builtin_ia32_alignq128: 5311 case X86::BI__builtin_ia32_alignq256: 5312 case X86::BI__builtin_ia32_vcomisd: 5313 case X86::BI__builtin_ia32_vcomiss: 5314 case X86::BI__builtin_ia32_shuf_f32x4: 5315 case X86::BI__builtin_ia32_shuf_f64x2: 5316 case X86::BI__builtin_ia32_shuf_i32x4: 5317 case X86::BI__builtin_ia32_shuf_i64x2: 5318 case X86::BI__builtin_ia32_shufpd512: 5319 case X86::BI__builtin_ia32_shufps: 5320 case X86::BI__builtin_ia32_shufps256: 5321 case X86::BI__builtin_ia32_shufps512: 5322 case X86::BI__builtin_ia32_dbpsadbw128: 5323 case X86::BI__builtin_ia32_dbpsadbw256: 5324 case X86::BI__builtin_ia32_dbpsadbw512: 5325 case X86::BI__builtin_ia32_vpshldd128: 5326 case X86::BI__builtin_ia32_vpshldd256: 5327 case X86::BI__builtin_ia32_vpshldd512: 5328 case X86::BI__builtin_ia32_vpshldq128: 5329 case X86::BI__builtin_ia32_vpshldq256: 5330 case X86::BI__builtin_ia32_vpshldq512: 5331 case X86::BI__builtin_ia32_vpshldw128: 5332 case X86::BI__builtin_ia32_vpshldw256: 5333 case X86::BI__builtin_ia32_vpshldw512: 5334 case X86::BI__builtin_ia32_vpshrdd128: 5335 case X86::BI__builtin_ia32_vpshrdd256: 5336 case X86::BI__builtin_ia32_vpshrdd512: 5337 case X86::BI__builtin_ia32_vpshrdq128: 5338 case X86::BI__builtin_ia32_vpshrdq256: 5339 case X86::BI__builtin_ia32_vpshrdq512: 5340 case X86::BI__builtin_ia32_vpshrdw128: 5341 case X86::BI__builtin_ia32_vpshrdw256: 5342 case X86::BI__builtin_ia32_vpshrdw512: 5343 i = 2; l = 0; u = 255; 5344 break; 5345 case X86::BI__builtin_ia32_fixupimmpd512_mask: 5346 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 5347 case X86::BI__builtin_ia32_fixupimmps512_mask: 5348 case X86::BI__builtin_ia32_fixupimmps512_maskz: 5349 case X86::BI__builtin_ia32_fixupimmsd_mask: 5350 case X86::BI__builtin_ia32_fixupimmsd_maskz: 5351 case X86::BI__builtin_ia32_fixupimmss_mask: 5352 case X86::BI__builtin_ia32_fixupimmss_maskz: 5353 case X86::BI__builtin_ia32_fixupimmpd128_mask: 5354 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 5355 case X86::BI__builtin_ia32_fixupimmpd256_mask: 5356 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 5357 case X86::BI__builtin_ia32_fixupimmps128_mask: 5358 case X86::BI__builtin_ia32_fixupimmps128_maskz: 5359 case X86::BI__builtin_ia32_fixupimmps256_mask: 5360 case X86::BI__builtin_ia32_fixupimmps256_maskz: 5361 case X86::BI__builtin_ia32_pternlogd512_mask: 5362 case X86::BI__builtin_ia32_pternlogd512_maskz: 5363 case X86::BI__builtin_ia32_pternlogq512_mask: 5364 case X86::BI__builtin_ia32_pternlogq512_maskz: 5365 case X86::BI__builtin_ia32_pternlogd128_mask: 5366 case X86::BI__builtin_ia32_pternlogd128_maskz: 5367 case X86::BI__builtin_ia32_pternlogd256_mask: 5368 case X86::BI__builtin_ia32_pternlogd256_maskz: 5369 case X86::BI__builtin_ia32_pternlogq128_mask: 5370 case X86::BI__builtin_ia32_pternlogq128_maskz: 5371 case X86::BI__builtin_ia32_pternlogq256_mask: 5372 case X86::BI__builtin_ia32_pternlogq256_maskz: 5373 i = 3; l = 0; u = 255; 5374 break; 5375 case X86::BI__builtin_ia32_gatherpfdpd: 5376 case X86::BI__builtin_ia32_gatherpfdps: 5377 case X86::BI__builtin_ia32_gatherpfqpd: 5378 case X86::BI__builtin_ia32_gatherpfqps: 5379 case X86::BI__builtin_ia32_scatterpfdpd: 5380 case X86::BI__builtin_ia32_scatterpfdps: 5381 case X86::BI__builtin_ia32_scatterpfqpd: 5382 case X86::BI__builtin_ia32_scatterpfqps: 5383 i = 4; l = 2; u = 3; 5384 break; 5385 case X86::BI__builtin_ia32_reducesd_mask: 5386 case X86::BI__builtin_ia32_reducess_mask: 5387 case X86::BI__builtin_ia32_rndscalesd_round_mask: 5388 case X86::BI__builtin_ia32_rndscaless_round_mask: 5389 case X86::BI__builtin_ia32_rndscalesh_round_mask: 5390 case X86::BI__builtin_ia32_reducesh_mask: 5391 i = 4; l = 0; u = 255; 5392 break; 5393 } 5394 5395 // Note that we don't force a hard error on the range check here, allowing 5396 // template-generated or macro-generated dead code to potentially have out-of- 5397 // range values. These need to code generate, but don't need to necessarily 5398 // make any sense. We use a warning that defaults to an error. 5399 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 5400 } 5401 5402 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 5403 /// parameter with the FormatAttr's correct format_idx and firstDataArg. 5404 /// Returns true when the format fits the function and the FormatStringInfo has 5405 /// been populated. 5406 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 5407 FormatStringInfo *FSI) { 5408 FSI->HasVAListArg = Format->getFirstArg() == 0; 5409 FSI->FormatIdx = Format->getFormatIdx() - 1; 5410 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1; 5411 5412 // The way the format attribute works in GCC, the implicit this argument 5413 // of member functions is counted. However, it doesn't appear in our own 5414 // lists, so decrement format_idx in that case. 5415 if (IsCXXMember) { 5416 if(FSI->FormatIdx == 0) 5417 return false; 5418 --FSI->FormatIdx; 5419 if (FSI->FirstDataArg != 0) 5420 --FSI->FirstDataArg; 5421 } 5422 return true; 5423 } 5424 5425 /// Checks if a the given expression evaluates to null. 5426 /// 5427 /// Returns true if the value evaluates to null. 5428 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 5429 // If the expression has non-null type, it doesn't evaluate to null. 5430 if (auto nullability 5431 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { 5432 if (*nullability == NullabilityKind::NonNull) 5433 return false; 5434 } 5435 5436 // As a special case, transparent unions initialized with zero are 5437 // considered null for the purposes of the nonnull attribute. 5438 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 5439 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 5440 if (const CompoundLiteralExpr *CLE = 5441 dyn_cast<CompoundLiteralExpr>(Expr)) 5442 if (const InitListExpr *ILE = 5443 dyn_cast<InitListExpr>(CLE->getInitializer())) 5444 Expr = ILE->getInit(0); 5445 } 5446 5447 bool Result; 5448 return (!Expr->isValueDependent() && 5449 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 5450 !Result); 5451 } 5452 5453 static void CheckNonNullArgument(Sema &S, 5454 const Expr *ArgExpr, 5455 SourceLocation CallSiteLoc) { 5456 if (CheckNonNullExpr(S, ArgExpr)) 5457 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 5458 S.PDiag(diag::warn_null_arg) 5459 << ArgExpr->getSourceRange()); 5460 } 5461 5462 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 5463 FormatStringInfo FSI; 5464 if ((GetFormatStringType(Format) == FST_NSString) && 5465 getFormatStringInfo(Format, false, &FSI)) { 5466 Idx = FSI.FormatIdx; 5467 return true; 5468 } 5469 return false; 5470 } 5471 5472 /// Diagnose use of %s directive in an NSString which is being passed 5473 /// as formatting string to formatting method. 5474 static void 5475 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 5476 const NamedDecl *FDecl, 5477 Expr **Args, 5478 unsigned NumArgs) { 5479 unsigned Idx = 0; 5480 bool Format = false; 5481 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 5482 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 5483 Idx = 2; 5484 Format = true; 5485 } 5486 else 5487 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 5488 if (S.GetFormatNSStringIdx(I, Idx)) { 5489 Format = true; 5490 break; 5491 } 5492 } 5493 if (!Format || NumArgs <= Idx) 5494 return; 5495 const Expr *FormatExpr = Args[Idx]; 5496 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 5497 FormatExpr = CSCE->getSubExpr(); 5498 const StringLiteral *FormatString; 5499 if (const ObjCStringLiteral *OSL = 5500 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 5501 FormatString = OSL->getString(); 5502 else 5503 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 5504 if (!FormatString) 5505 return; 5506 if (S.FormatStringHasSArg(FormatString)) { 5507 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 5508 << "%s" << 1 << 1; 5509 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 5510 << FDecl->getDeclName(); 5511 } 5512 } 5513 5514 /// Determine whether the given type has a non-null nullability annotation. 5515 static bool isNonNullType(ASTContext &ctx, QualType type) { 5516 if (auto nullability = type->getNullability(ctx)) 5517 return *nullability == NullabilityKind::NonNull; 5518 5519 return false; 5520 } 5521 5522 static void CheckNonNullArguments(Sema &S, 5523 const NamedDecl *FDecl, 5524 const FunctionProtoType *Proto, 5525 ArrayRef<const Expr *> Args, 5526 SourceLocation CallSiteLoc) { 5527 assert((FDecl || Proto) && "Need a function declaration or prototype"); 5528 5529 // Already checked by by constant evaluator. 5530 if (S.isConstantEvaluated()) 5531 return; 5532 // Check the attributes attached to the method/function itself. 5533 llvm::SmallBitVector NonNullArgs; 5534 if (FDecl) { 5535 // Handle the nonnull attribute on the function/method declaration itself. 5536 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 5537 if (!NonNull->args_size()) { 5538 // Easy case: all pointer arguments are nonnull. 5539 for (const auto *Arg : Args) 5540 if (S.isValidPointerAttrType(Arg->getType())) 5541 CheckNonNullArgument(S, Arg, CallSiteLoc); 5542 return; 5543 } 5544 5545 for (const ParamIdx &Idx : NonNull->args()) { 5546 unsigned IdxAST = Idx.getASTIndex(); 5547 if (IdxAST >= Args.size()) 5548 continue; 5549 if (NonNullArgs.empty()) 5550 NonNullArgs.resize(Args.size()); 5551 NonNullArgs.set(IdxAST); 5552 } 5553 } 5554 } 5555 5556 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 5557 // Handle the nonnull attribute on the parameters of the 5558 // function/method. 5559 ArrayRef<ParmVarDecl*> parms; 5560 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 5561 parms = FD->parameters(); 5562 else 5563 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 5564 5565 unsigned ParamIndex = 0; 5566 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 5567 I != E; ++I, ++ParamIndex) { 5568 const ParmVarDecl *PVD = *I; 5569 if (PVD->hasAttr<NonNullAttr>() || 5570 isNonNullType(S.Context, PVD->getType())) { 5571 if (NonNullArgs.empty()) 5572 NonNullArgs.resize(Args.size()); 5573 5574 NonNullArgs.set(ParamIndex); 5575 } 5576 } 5577 } else { 5578 // If we have a non-function, non-method declaration but no 5579 // function prototype, try to dig out the function prototype. 5580 if (!Proto) { 5581 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 5582 QualType type = VD->getType().getNonReferenceType(); 5583 if (auto pointerType = type->getAs<PointerType>()) 5584 type = pointerType->getPointeeType(); 5585 else if (auto blockType = type->getAs<BlockPointerType>()) 5586 type = blockType->getPointeeType(); 5587 // FIXME: data member pointers? 5588 5589 // Dig out the function prototype, if there is one. 5590 Proto = type->getAs<FunctionProtoType>(); 5591 } 5592 } 5593 5594 // Fill in non-null argument information from the nullability 5595 // information on the parameter types (if we have them). 5596 if (Proto) { 5597 unsigned Index = 0; 5598 for (auto paramType : Proto->getParamTypes()) { 5599 if (isNonNullType(S.Context, paramType)) { 5600 if (NonNullArgs.empty()) 5601 NonNullArgs.resize(Args.size()); 5602 5603 NonNullArgs.set(Index); 5604 } 5605 5606 ++Index; 5607 } 5608 } 5609 } 5610 5611 // Check for non-null arguments. 5612 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 5613 ArgIndex != ArgIndexEnd; ++ArgIndex) { 5614 if (NonNullArgs[ArgIndex]) 5615 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc); 5616 } 5617 } 5618 5619 /// Warn if a pointer or reference argument passed to a function points to an 5620 /// object that is less aligned than the parameter. This can happen when 5621 /// creating a typedef with a lower alignment than the original type and then 5622 /// calling functions defined in terms of the original type. 5623 void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, 5624 StringRef ParamName, QualType ArgTy, 5625 QualType ParamTy) { 5626 5627 // If a function accepts a pointer or reference type 5628 if (!ParamTy->isPointerType() && !ParamTy->isReferenceType()) 5629 return; 5630 5631 // If the parameter is a pointer type, get the pointee type for the 5632 // argument too. If the parameter is a reference type, don't try to get 5633 // the pointee type for the argument. 5634 if (ParamTy->isPointerType()) 5635 ArgTy = ArgTy->getPointeeType(); 5636 5637 // Remove reference or pointer 5638 ParamTy = ParamTy->getPointeeType(); 5639 5640 // Find expected alignment, and the actual alignment of the passed object. 5641 // getTypeAlignInChars requires complete types 5642 if (ArgTy.isNull() || ParamTy->isIncompleteType() || 5643 ArgTy->isIncompleteType() || ParamTy->isUndeducedType() || 5644 ArgTy->isUndeducedType()) 5645 return; 5646 5647 CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy); 5648 CharUnits ArgAlign = Context.getTypeAlignInChars(ArgTy); 5649 5650 // If the argument is less aligned than the parameter, there is a 5651 // potential alignment issue. 5652 if (ArgAlign < ParamAlign) 5653 Diag(Loc, diag::warn_param_mismatched_alignment) 5654 << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity() 5655 << ParamName << (FDecl != nullptr) << FDecl; 5656 } 5657 5658 /// Handles the checks for format strings, non-POD arguments to vararg 5659 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if 5660 /// attributes. 5661 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 5662 const Expr *ThisArg, ArrayRef<const Expr *> Args, 5663 bool IsMemberFunction, SourceLocation Loc, 5664 SourceRange Range, VariadicCallType CallType) { 5665 // FIXME: We should check as much as we can in the template definition. 5666 if (CurContext->isDependentContext()) 5667 return; 5668 5669 // Printf and scanf checking. 5670 llvm::SmallBitVector CheckedVarArgs; 5671 if (FDecl) { 5672 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 5673 // Only create vector if there are format attributes. 5674 CheckedVarArgs.resize(Args.size()); 5675 5676 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 5677 CheckedVarArgs); 5678 } 5679 } 5680 5681 // Refuse POD arguments that weren't caught by the format string 5682 // checks above. 5683 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 5684 if (CallType != VariadicDoesNotApply && 5685 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 5686 unsigned NumParams = Proto ? Proto->getNumParams() 5687 : FDecl && isa<FunctionDecl>(FDecl) 5688 ? cast<FunctionDecl>(FDecl)->getNumParams() 5689 : FDecl && isa<ObjCMethodDecl>(FDecl) 5690 ? cast<ObjCMethodDecl>(FDecl)->param_size() 5691 : 0; 5692 5693 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 5694 // Args[ArgIdx] can be null in malformed code. 5695 if (const Expr *Arg = Args[ArgIdx]) { 5696 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 5697 checkVariadicArgument(Arg, CallType); 5698 } 5699 } 5700 } 5701 5702 if (FDecl || Proto) { 5703 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 5704 5705 // Type safety checking. 5706 if (FDecl) { 5707 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 5708 CheckArgumentWithTypeTag(I, Args, Loc); 5709 } 5710 } 5711 5712 // Check that passed arguments match the alignment of original arguments. 5713 // Try to get the missing prototype from the declaration. 5714 if (!Proto && FDecl) { 5715 const auto *FT = FDecl->getFunctionType(); 5716 if (isa_and_nonnull<FunctionProtoType>(FT)) 5717 Proto = cast<FunctionProtoType>(FDecl->getFunctionType()); 5718 } 5719 if (Proto) { 5720 // For variadic functions, we may have more args than parameters. 5721 // For some K&R functions, we may have less args than parameters. 5722 const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size()); 5723 for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) { 5724 // Args[ArgIdx] can be null in malformed code. 5725 if (const Expr *Arg = Args[ArgIdx]) { 5726 if (Arg->containsErrors()) 5727 continue; 5728 5729 QualType ParamTy = Proto->getParamType(ArgIdx); 5730 QualType ArgTy = Arg->getType(); 5731 CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1), 5732 ArgTy, ParamTy); 5733 } 5734 } 5735 } 5736 5737 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) { 5738 auto *AA = FDecl->getAttr<AllocAlignAttr>(); 5739 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()]; 5740 if (!Arg->isValueDependent()) { 5741 Expr::EvalResult Align; 5742 if (Arg->EvaluateAsInt(Align, Context)) { 5743 const llvm::APSInt &I = Align.Val.getInt(); 5744 if (!I.isPowerOf2()) 5745 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two) 5746 << Arg->getSourceRange(); 5747 5748 if (I > Sema::MaximumAlignment) 5749 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great) 5750 << Arg->getSourceRange() << Sema::MaximumAlignment; 5751 } 5752 } 5753 } 5754 5755 if (FD) 5756 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 5757 } 5758 5759 /// CheckConstructorCall - Check a constructor call for correctness and safety 5760 /// properties not enforced by the C type system. 5761 void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, 5762 ArrayRef<const Expr *> Args, 5763 const FunctionProtoType *Proto, 5764 SourceLocation Loc) { 5765 VariadicCallType CallType = 5766 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 5767 5768 auto *Ctor = cast<CXXConstructorDecl>(FDecl); 5769 CheckArgAlignment(Loc, FDecl, "'this'", Context.getPointerType(ThisType), 5770 Context.getPointerType(Ctor->getThisObjectType())); 5771 5772 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 5773 Loc, SourceRange(), CallType); 5774 } 5775 5776 /// CheckFunctionCall - Check a direct function call for various correctness 5777 /// and safety properties not strictly enforced by the C type system. 5778 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 5779 const FunctionProtoType *Proto) { 5780 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 5781 isa<CXXMethodDecl>(FDecl); 5782 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 5783 IsMemberOperatorCall; 5784 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 5785 TheCall->getCallee()); 5786 Expr** Args = TheCall->getArgs(); 5787 unsigned NumArgs = TheCall->getNumArgs(); 5788 5789 Expr *ImplicitThis = nullptr; 5790 if (IsMemberOperatorCall) { 5791 // If this is a call to a member operator, hide the first argument 5792 // from checkCall. 5793 // FIXME: Our choice of AST representation here is less than ideal. 5794 ImplicitThis = Args[0]; 5795 ++Args; 5796 --NumArgs; 5797 } else if (IsMemberFunction) 5798 ImplicitThis = 5799 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 5800 5801 if (ImplicitThis) { 5802 // ImplicitThis may or may not be a pointer, depending on whether . or -> is 5803 // used. 5804 QualType ThisType = ImplicitThis->getType(); 5805 if (!ThisType->isPointerType()) { 5806 assert(!ThisType->isReferenceType()); 5807 ThisType = Context.getPointerType(ThisType); 5808 } 5809 5810 QualType ThisTypeFromDecl = 5811 Context.getPointerType(cast<CXXMethodDecl>(FDecl)->getThisObjectType()); 5812 5813 CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType, 5814 ThisTypeFromDecl); 5815 } 5816 5817 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs), 5818 IsMemberFunction, TheCall->getRParenLoc(), 5819 TheCall->getCallee()->getSourceRange(), CallType); 5820 5821 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 5822 // None of the checks below are needed for functions that don't have 5823 // simple names (e.g., C++ conversion functions). 5824 if (!FnInfo) 5825 return false; 5826 5827 // Enforce TCB except for builtin calls, which are always allowed. 5828 if (FDecl->getBuiltinID() == 0) 5829 CheckTCBEnforcement(TheCall->getExprLoc(), FDecl); 5830 5831 CheckAbsoluteValueFunction(TheCall, FDecl); 5832 CheckMaxUnsignedZero(TheCall, FDecl); 5833 5834 if (getLangOpts().ObjC) 5835 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 5836 5837 unsigned CMId = FDecl->getMemoryFunctionKind(); 5838 5839 // Handle memory setting and copying functions. 5840 switch (CMId) { 5841 case 0: 5842 return false; 5843 case Builtin::BIstrlcpy: // fallthrough 5844 case Builtin::BIstrlcat: 5845 CheckStrlcpycatArguments(TheCall, FnInfo); 5846 break; 5847 case Builtin::BIstrncat: 5848 CheckStrncatArguments(TheCall, FnInfo); 5849 break; 5850 case Builtin::BIfree: 5851 CheckFreeArguments(TheCall); 5852 break; 5853 default: 5854 CheckMemaccessArguments(TheCall, CMId, FnInfo); 5855 } 5856 5857 return false; 5858 } 5859 5860 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 5861 ArrayRef<const Expr *> Args) { 5862 VariadicCallType CallType = 5863 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 5864 5865 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 5866 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 5867 CallType); 5868 5869 CheckTCBEnforcement(lbrac, Method); 5870 5871 return false; 5872 } 5873 5874 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 5875 const FunctionProtoType *Proto) { 5876 QualType Ty; 5877 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 5878 Ty = V->getType().getNonReferenceType(); 5879 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 5880 Ty = F->getType().getNonReferenceType(); 5881 else 5882 return false; 5883 5884 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 5885 !Ty->isFunctionProtoType()) 5886 return false; 5887 5888 VariadicCallType CallType; 5889 if (!Proto || !Proto->isVariadic()) { 5890 CallType = VariadicDoesNotApply; 5891 } else if (Ty->isBlockPointerType()) { 5892 CallType = VariadicBlock; 5893 } else { // Ty->isFunctionPointerType() 5894 CallType = VariadicFunction; 5895 } 5896 5897 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 5898 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 5899 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 5900 TheCall->getCallee()->getSourceRange(), CallType); 5901 5902 return false; 5903 } 5904 5905 /// Checks function calls when a FunctionDecl or a NamedDecl is not available, 5906 /// such as function pointers returned from functions. 5907 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 5908 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 5909 TheCall->getCallee()); 5910 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 5911 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 5912 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 5913 TheCall->getCallee()->getSourceRange(), CallType); 5914 5915 return false; 5916 } 5917 5918 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 5919 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 5920 return false; 5921 5922 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 5923 switch (Op) { 5924 case AtomicExpr::AO__c11_atomic_init: 5925 case AtomicExpr::AO__opencl_atomic_init: 5926 llvm_unreachable("There is no ordering argument for an init"); 5927 5928 case AtomicExpr::AO__c11_atomic_load: 5929 case AtomicExpr::AO__opencl_atomic_load: 5930 case AtomicExpr::AO__hip_atomic_load: 5931 case AtomicExpr::AO__atomic_load_n: 5932 case AtomicExpr::AO__atomic_load: 5933 return OrderingCABI != llvm::AtomicOrderingCABI::release && 5934 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 5935 5936 case AtomicExpr::AO__c11_atomic_store: 5937 case AtomicExpr::AO__opencl_atomic_store: 5938 case AtomicExpr::AO__hip_atomic_store: 5939 case AtomicExpr::AO__atomic_store: 5940 case AtomicExpr::AO__atomic_store_n: 5941 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 5942 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 5943 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 5944 5945 default: 5946 return true; 5947 } 5948 } 5949 5950 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 5951 AtomicExpr::AtomicOp Op) { 5952 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 5953 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 5954 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()}; 5955 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()}, 5956 DRE->getSourceRange(), TheCall->getRParenLoc(), Args, 5957 Op); 5958 } 5959 5960 ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, 5961 SourceLocation RParenLoc, MultiExprArg Args, 5962 AtomicExpr::AtomicOp Op, 5963 AtomicArgumentOrder ArgOrder) { 5964 // All the non-OpenCL operations take one of the following forms. 5965 // The OpenCL operations take the __c11 forms with one extra argument for 5966 // synchronization scope. 5967 enum { 5968 // C __c11_atomic_init(A *, C) 5969 Init, 5970 5971 // C __c11_atomic_load(A *, int) 5972 Load, 5973 5974 // void __atomic_load(A *, CP, int) 5975 LoadCopy, 5976 5977 // void __atomic_store(A *, CP, int) 5978 Copy, 5979 5980 // C __c11_atomic_add(A *, M, int) 5981 Arithmetic, 5982 5983 // C __atomic_exchange_n(A *, CP, int) 5984 Xchg, 5985 5986 // void __atomic_exchange(A *, C *, CP, int) 5987 GNUXchg, 5988 5989 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 5990 C11CmpXchg, 5991 5992 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 5993 GNUCmpXchg 5994 } Form = Init; 5995 5996 const unsigned NumForm = GNUCmpXchg + 1; 5997 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 5998 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 5999 // where: 6000 // C is an appropriate type, 6001 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 6002 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 6003 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 6004 // the int parameters are for orderings. 6005 6006 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 6007 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 6008 "need to update code for modified forms"); 6009 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 6010 AtomicExpr::AO__c11_atomic_fetch_min + 1 == 6011 AtomicExpr::AO__atomic_load, 6012 "need to update code for modified C11 atomics"); 6013 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 6014 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 6015 bool IsHIP = Op >= AtomicExpr::AO__hip_atomic_load && 6016 Op <= AtomicExpr::AO__hip_atomic_fetch_max; 6017 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 6018 Op <= AtomicExpr::AO__c11_atomic_fetch_min) || 6019 IsOpenCL; 6020 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 6021 Op == AtomicExpr::AO__atomic_store_n || 6022 Op == AtomicExpr::AO__atomic_exchange_n || 6023 Op == AtomicExpr::AO__atomic_compare_exchange_n; 6024 bool IsAddSub = false; 6025 6026 switch (Op) { 6027 case AtomicExpr::AO__c11_atomic_init: 6028 case AtomicExpr::AO__opencl_atomic_init: 6029 Form = Init; 6030 break; 6031 6032 case AtomicExpr::AO__c11_atomic_load: 6033 case AtomicExpr::AO__opencl_atomic_load: 6034 case AtomicExpr::AO__hip_atomic_load: 6035 case AtomicExpr::AO__atomic_load_n: 6036 Form = Load; 6037 break; 6038 6039 case AtomicExpr::AO__atomic_load: 6040 Form = LoadCopy; 6041 break; 6042 6043 case AtomicExpr::AO__c11_atomic_store: 6044 case AtomicExpr::AO__opencl_atomic_store: 6045 case AtomicExpr::AO__hip_atomic_store: 6046 case AtomicExpr::AO__atomic_store: 6047 case AtomicExpr::AO__atomic_store_n: 6048 Form = Copy; 6049 break; 6050 case AtomicExpr::AO__hip_atomic_fetch_add: 6051 case AtomicExpr::AO__hip_atomic_fetch_min: 6052 case AtomicExpr::AO__hip_atomic_fetch_max: 6053 case AtomicExpr::AO__c11_atomic_fetch_add: 6054 case AtomicExpr::AO__c11_atomic_fetch_sub: 6055 case AtomicExpr::AO__opencl_atomic_fetch_add: 6056 case AtomicExpr::AO__opencl_atomic_fetch_sub: 6057 case AtomicExpr::AO__atomic_fetch_add: 6058 case AtomicExpr::AO__atomic_fetch_sub: 6059 case AtomicExpr::AO__atomic_add_fetch: 6060 case AtomicExpr::AO__atomic_sub_fetch: 6061 IsAddSub = true; 6062 Form = Arithmetic; 6063 break; 6064 case AtomicExpr::AO__c11_atomic_fetch_and: 6065 case AtomicExpr::AO__c11_atomic_fetch_or: 6066 case AtomicExpr::AO__c11_atomic_fetch_xor: 6067 case AtomicExpr::AO__hip_atomic_fetch_and: 6068 case AtomicExpr::AO__hip_atomic_fetch_or: 6069 case AtomicExpr::AO__hip_atomic_fetch_xor: 6070 case AtomicExpr::AO__c11_atomic_fetch_nand: 6071 case AtomicExpr::AO__opencl_atomic_fetch_and: 6072 case AtomicExpr::AO__opencl_atomic_fetch_or: 6073 case AtomicExpr::AO__opencl_atomic_fetch_xor: 6074 case AtomicExpr::AO__atomic_fetch_and: 6075 case AtomicExpr::AO__atomic_fetch_or: 6076 case AtomicExpr::AO__atomic_fetch_xor: 6077 case AtomicExpr::AO__atomic_fetch_nand: 6078 case AtomicExpr::AO__atomic_and_fetch: 6079 case AtomicExpr::AO__atomic_or_fetch: 6080 case AtomicExpr::AO__atomic_xor_fetch: 6081 case AtomicExpr::AO__atomic_nand_fetch: 6082 Form = Arithmetic; 6083 break; 6084 case AtomicExpr::AO__c11_atomic_fetch_min: 6085 case AtomicExpr::AO__c11_atomic_fetch_max: 6086 case AtomicExpr::AO__opencl_atomic_fetch_min: 6087 case AtomicExpr::AO__opencl_atomic_fetch_max: 6088 case AtomicExpr::AO__atomic_min_fetch: 6089 case AtomicExpr::AO__atomic_max_fetch: 6090 case AtomicExpr::AO__atomic_fetch_min: 6091 case AtomicExpr::AO__atomic_fetch_max: 6092 Form = Arithmetic; 6093 break; 6094 6095 case AtomicExpr::AO__c11_atomic_exchange: 6096 case AtomicExpr::AO__hip_atomic_exchange: 6097 case AtomicExpr::AO__opencl_atomic_exchange: 6098 case AtomicExpr::AO__atomic_exchange_n: 6099 Form = Xchg; 6100 break; 6101 6102 case AtomicExpr::AO__atomic_exchange: 6103 Form = GNUXchg; 6104 break; 6105 6106 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 6107 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 6108 case AtomicExpr::AO__hip_atomic_compare_exchange_strong: 6109 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 6110 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 6111 case AtomicExpr::AO__hip_atomic_compare_exchange_weak: 6112 Form = C11CmpXchg; 6113 break; 6114 6115 case AtomicExpr::AO__atomic_compare_exchange: 6116 case AtomicExpr::AO__atomic_compare_exchange_n: 6117 Form = GNUCmpXchg; 6118 break; 6119 } 6120 6121 unsigned AdjustedNumArgs = NumArgs[Form]; 6122 if ((IsOpenCL || IsHIP) && Op != AtomicExpr::AO__opencl_atomic_init) 6123 ++AdjustedNumArgs; 6124 // Check we have the right number of arguments. 6125 if (Args.size() < AdjustedNumArgs) { 6126 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args) 6127 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 6128 << ExprRange; 6129 return ExprError(); 6130 } else if (Args.size() > AdjustedNumArgs) { 6131 Diag(Args[AdjustedNumArgs]->getBeginLoc(), 6132 diag::err_typecheck_call_too_many_args) 6133 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 6134 << ExprRange; 6135 return ExprError(); 6136 } 6137 6138 // Inspect the first argument of the atomic operation. 6139 Expr *Ptr = Args[0]; 6140 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 6141 if (ConvertedPtr.isInvalid()) 6142 return ExprError(); 6143 6144 Ptr = ConvertedPtr.get(); 6145 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 6146 if (!pointerType) { 6147 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer) 6148 << Ptr->getType() << Ptr->getSourceRange(); 6149 return ExprError(); 6150 } 6151 6152 // For a __c11 builtin, this should be a pointer to an _Atomic type. 6153 QualType AtomTy = pointerType->getPointeeType(); // 'A' 6154 QualType ValType = AtomTy; // 'C' 6155 if (IsC11) { 6156 if (!AtomTy->isAtomicType()) { 6157 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic) 6158 << Ptr->getType() << Ptr->getSourceRange(); 6159 return ExprError(); 6160 } 6161 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 6162 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 6163 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic) 6164 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 6165 << Ptr->getSourceRange(); 6166 return ExprError(); 6167 } 6168 ValType = AtomTy->castAs<AtomicType>()->getValueType(); 6169 } else if (Form != Load && Form != LoadCopy) { 6170 if (ValType.isConstQualified()) { 6171 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer) 6172 << Ptr->getType() << Ptr->getSourceRange(); 6173 return ExprError(); 6174 } 6175 } 6176 6177 // For an arithmetic operation, the implied arithmetic must be well-formed. 6178 if (Form == Arithmetic) { 6179 // GCC does not enforce these rules for GNU atomics, but we do to help catch 6180 // trivial type errors. 6181 auto IsAllowedValueType = [&](QualType ValType) { 6182 if (ValType->isIntegerType()) 6183 return true; 6184 if (ValType->isPointerType()) 6185 return true; 6186 if (!ValType->isFloatingType()) 6187 return false; 6188 // LLVM Parser does not allow atomicrmw with x86_fp80 type. 6189 if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) && 6190 &Context.getTargetInfo().getLongDoubleFormat() == 6191 &llvm::APFloat::x87DoubleExtended()) 6192 return false; 6193 return true; 6194 }; 6195 if (IsAddSub && !IsAllowedValueType(ValType)) { 6196 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_ptr_or_fp) 6197 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 6198 return ExprError(); 6199 } 6200 if (!IsAddSub && !ValType->isIntegerType()) { 6201 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int) 6202 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 6203 return ExprError(); 6204 } 6205 if (IsC11 && ValType->isPointerType() && 6206 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 6207 diag::err_incomplete_type)) { 6208 return ExprError(); 6209 } 6210 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 6211 // For __atomic_*_n operations, the value type must be a scalar integral or 6212 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 6213 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 6214 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 6215 return ExprError(); 6216 } 6217 6218 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 6219 !AtomTy->isScalarType()) { 6220 // For GNU atomics, require a trivially-copyable type. This is not part of 6221 // the GNU atomics specification but we enforce it for consistency with 6222 // other atomics which generally all require a trivially-copyable type. This 6223 // is because atomics just copy bits. 6224 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy) 6225 << Ptr->getType() << Ptr->getSourceRange(); 6226 return ExprError(); 6227 } 6228 6229 switch (ValType.getObjCLifetime()) { 6230 case Qualifiers::OCL_None: 6231 case Qualifiers::OCL_ExplicitNone: 6232 // okay 6233 break; 6234 6235 case Qualifiers::OCL_Weak: 6236 case Qualifiers::OCL_Strong: 6237 case Qualifiers::OCL_Autoreleasing: 6238 // FIXME: Can this happen? By this point, ValType should be known 6239 // to be trivially copyable. 6240 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership) 6241 << ValType << Ptr->getSourceRange(); 6242 return ExprError(); 6243 } 6244 6245 // All atomic operations have an overload which takes a pointer to a volatile 6246 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 6247 // into the result or the other operands. Similarly atomic_load takes a 6248 // pointer to a const 'A'. 6249 ValType.removeLocalVolatile(); 6250 ValType.removeLocalConst(); 6251 QualType ResultType = ValType; 6252 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 6253 Form == Init) 6254 ResultType = Context.VoidTy; 6255 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 6256 ResultType = Context.BoolTy; 6257 6258 // The type of a parameter passed 'by value'. In the GNU atomics, such 6259 // arguments are actually passed as pointers. 6260 QualType ByValType = ValType; // 'CP' 6261 bool IsPassedByAddress = false; 6262 if (!IsC11 && !IsHIP && !IsN) { 6263 ByValType = Ptr->getType(); 6264 IsPassedByAddress = true; 6265 } 6266 6267 SmallVector<Expr *, 5> APIOrderedArgs; 6268 if (ArgOrder == Sema::AtomicArgumentOrder::AST) { 6269 APIOrderedArgs.push_back(Args[0]); 6270 switch (Form) { 6271 case Init: 6272 case Load: 6273 APIOrderedArgs.push_back(Args[1]); // Val1/Order 6274 break; 6275 case LoadCopy: 6276 case Copy: 6277 case Arithmetic: 6278 case Xchg: 6279 APIOrderedArgs.push_back(Args[2]); // Val1 6280 APIOrderedArgs.push_back(Args[1]); // Order 6281 break; 6282 case GNUXchg: 6283 APIOrderedArgs.push_back(Args[2]); // Val1 6284 APIOrderedArgs.push_back(Args[3]); // Val2 6285 APIOrderedArgs.push_back(Args[1]); // Order 6286 break; 6287 case C11CmpXchg: 6288 APIOrderedArgs.push_back(Args[2]); // Val1 6289 APIOrderedArgs.push_back(Args[4]); // Val2 6290 APIOrderedArgs.push_back(Args[1]); // Order 6291 APIOrderedArgs.push_back(Args[3]); // OrderFail 6292 break; 6293 case GNUCmpXchg: 6294 APIOrderedArgs.push_back(Args[2]); // Val1 6295 APIOrderedArgs.push_back(Args[4]); // Val2 6296 APIOrderedArgs.push_back(Args[5]); // Weak 6297 APIOrderedArgs.push_back(Args[1]); // Order 6298 APIOrderedArgs.push_back(Args[3]); // OrderFail 6299 break; 6300 } 6301 } else 6302 APIOrderedArgs.append(Args.begin(), Args.end()); 6303 6304 // The first argument's non-CV pointer type is used to deduce the type of 6305 // subsequent arguments, except for: 6306 // - weak flag (always converted to bool) 6307 // - memory order (always converted to int) 6308 // - scope (always converted to int) 6309 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) { 6310 QualType Ty; 6311 if (i < NumVals[Form] + 1) { 6312 switch (i) { 6313 case 0: 6314 // The first argument is always a pointer. It has a fixed type. 6315 // It is always dereferenced, a nullptr is undefined. 6316 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 6317 // Nothing else to do: we already know all we want about this pointer. 6318 continue; 6319 case 1: 6320 // The second argument is the non-atomic operand. For arithmetic, this 6321 // is always passed by value, and for a compare_exchange it is always 6322 // passed by address. For the rest, GNU uses by-address and C11 uses 6323 // by-value. 6324 assert(Form != Load); 6325 if (Form == Arithmetic && ValType->isPointerType()) 6326 Ty = Context.getPointerDiffType(); 6327 else if (Form == Init || Form == Arithmetic) 6328 Ty = ValType; 6329 else if (Form == Copy || Form == Xchg) { 6330 if (IsPassedByAddress) { 6331 // The value pointer is always dereferenced, a nullptr is undefined. 6332 CheckNonNullArgument(*this, APIOrderedArgs[i], 6333 ExprRange.getBegin()); 6334 } 6335 Ty = ByValType; 6336 } else { 6337 Expr *ValArg = APIOrderedArgs[i]; 6338 // The value pointer is always dereferenced, a nullptr is undefined. 6339 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin()); 6340 LangAS AS = LangAS::Default; 6341 // Keep address space of non-atomic pointer type. 6342 if (const PointerType *PtrTy = 6343 ValArg->getType()->getAs<PointerType>()) { 6344 AS = PtrTy->getPointeeType().getAddressSpace(); 6345 } 6346 Ty = Context.getPointerType( 6347 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 6348 } 6349 break; 6350 case 2: 6351 // The third argument to compare_exchange / GNU exchange is the desired 6352 // value, either by-value (for the C11 and *_n variant) or as a pointer. 6353 if (IsPassedByAddress) 6354 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 6355 Ty = ByValType; 6356 break; 6357 case 3: 6358 // The fourth argument to GNU compare_exchange is a 'weak' flag. 6359 Ty = Context.BoolTy; 6360 break; 6361 } 6362 } else { 6363 // The order(s) and scope are always converted to int. 6364 Ty = Context.IntTy; 6365 } 6366 6367 InitializedEntity Entity = 6368 InitializedEntity::InitializeParameter(Context, Ty, false); 6369 ExprResult Arg = APIOrderedArgs[i]; 6370 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6371 if (Arg.isInvalid()) 6372 return true; 6373 APIOrderedArgs[i] = Arg.get(); 6374 } 6375 6376 // Permute the arguments into a 'consistent' order. 6377 SmallVector<Expr*, 5> SubExprs; 6378 SubExprs.push_back(Ptr); 6379 switch (Form) { 6380 case Init: 6381 // Note, AtomicExpr::getVal1() has a special case for this atomic. 6382 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6383 break; 6384 case Load: 6385 SubExprs.push_back(APIOrderedArgs[1]); // Order 6386 break; 6387 case LoadCopy: 6388 case Copy: 6389 case Arithmetic: 6390 case Xchg: 6391 SubExprs.push_back(APIOrderedArgs[2]); // Order 6392 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6393 break; 6394 case GNUXchg: 6395 // Note, AtomicExpr::getVal2() has a special case for this atomic. 6396 SubExprs.push_back(APIOrderedArgs[3]); // Order 6397 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6398 SubExprs.push_back(APIOrderedArgs[2]); // Val2 6399 break; 6400 case C11CmpXchg: 6401 SubExprs.push_back(APIOrderedArgs[3]); // Order 6402 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6403 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail 6404 SubExprs.push_back(APIOrderedArgs[2]); // Val2 6405 break; 6406 case GNUCmpXchg: 6407 SubExprs.push_back(APIOrderedArgs[4]); // Order 6408 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6409 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail 6410 SubExprs.push_back(APIOrderedArgs[2]); // Val2 6411 SubExprs.push_back(APIOrderedArgs[3]); // Weak 6412 break; 6413 } 6414 6415 if (SubExprs.size() >= 2 && Form != Init) { 6416 if (Optional<llvm::APSInt> Result = 6417 SubExprs[1]->getIntegerConstantExpr(Context)) 6418 if (!isValidOrderingForOp(Result->getSExtValue(), Op)) 6419 Diag(SubExprs[1]->getBeginLoc(), 6420 diag::warn_atomic_op_has_invalid_memory_order) 6421 << SubExprs[1]->getSourceRange(); 6422 } 6423 6424 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 6425 auto *Scope = Args[Args.size() - 1]; 6426 if (Optional<llvm::APSInt> Result = 6427 Scope->getIntegerConstantExpr(Context)) { 6428 if (!ScopeModel->isValid(Result->getZExtValue())) 6429 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 6430 << Scope->getSourceRange(); 6431 } 6432 SubExprs.push_back(Scope); 6433 } 6434 6435 AtomicExpr *AE = new (Context) 6436 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc); 6437 6438 if ((Op == AtomicExpr::AO__c11_atomic_load || 6439 Op == AtomicExpr::AO__c11_atomic_store || 6440 Op == AtomicExpr::AO__opencl_atomic_load || 6441 Op == AtomicExpr::AO__hip_atomic_load || 6442 Op == AtomicExpr::AO__opencl_atomic_store || 6443 Op == AtomicExpr::AO__hip_atomic_store) && 6444 Context.AtomicUsesUnsupportedLibcall(AE)) 6445 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 6446 << ((Op == AtomicExpr::AO__c11_atomic_load || 6447 Op == AtomicExpr::AO__opencl_atomic_load || 6448 Op == AtomicExpr::AO__hip_atomic_load) 6449 ? 0 6450 : 1); 6451 6452 if (ValType->isBitIntType()) { 6453 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_bit_int_prohibit); 6454 return ExprError(); 6455 } 6456 6457 return AE; 6458 } 6459 6460 /// checkBuiltinArgument - Given a call to a builtin function, perform 6461 /// normal type-checking on the given argument, updating the call in 6462 /// place. This is useful when a builtin function requires custom 6463 /// type-checking for some of its arguments but not necessarily all of 6464 /// them. 6465 /// 6466 /// Returns true on error. 6467 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 6468 FunctionDecl *Fn = E->getDirectCallee(); 6469 assert(Fn && "builtin call without direct callee!"); 6470 6471 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 6472 InitializedEntity Entity = 6473 InitializedEntity::InitializeParameter(S.Context, Param); 6474 6475 ExprResult Arg = E->getArg(ArgIndex); 6476 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 6477 if (Arg.isInvalid()) 6478 return true; 6479 6480 E->setArg(ArgIndex, Arg.get()); 6481 return false; 6482 } 6483 6484 /// We have a call to a function like __sync_fetch_and_add, which is an 6485 /// overloaded function based on the pointer type of its first argument. 6486 /// The main BuildCallExpr routines have already promoted the types of 6487 /// arguments because all of these calls are prototyped as void(...). 6488 /// 6489 /// This function goes through and does final semantic checking for these 6490 /// builtins, as well as generating any warnings. 6491 ExprResult 6492 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 6493 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 6494 Expr *Callee = TheCall->getCallee(); 6495 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 6496 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6497 6498 // Ensure that we have at least one argument to do type inference from. 6499 if (TheCall->getNumArgs() < 1) { 6500 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 6501 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange(); 6502 return ExprError(); 6503 } 6504 6505 // Inspect the first argument of the atomic builtin. This should always be 6506 // a pointer type, whose element is an integral scalar or pointer type. 6507 // Because it is a pointer type, we don't have to worry about any implicit 6508 // casts here. 6509 // FIXME: We don't allow floating point scalars as input. 6510 Expr *FirstArg = TheCall->getArg(0); 6511 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 6512 if (FirstArgResult.isInvalid()) 6513 return ExprError(); 6514 FirstArg = FirstArgResult.get(); 6515 TheCall->setArg(0, FirstArg); 6516 6517 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 6518 if (!pointerType) { 6519 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 6520 << FirstArg->getType() << FirstArg->getSourceRange(); 6521 return ExprError(); 6522 } 6523 6524 QualType ValType = pointerType->getPointeeType(); 6525 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 6526 !ValType->isBlockPointerType()) { 6527 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 6528 << FirstArg->getType() << FirstArg->getSourceRange(); 6529 return ExprError(); 6530 } 6531 6532 if (ValType.isConstQualified()) { 6533 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 6534 << FirstArg->getType() << FirstArg->getSourceRange(); 6535 return ExprError(); 6536 } 6537 6538 switch (ValType.getObjCLifetime()) { 6539 case Qualifiers::OCL_None: 6540 case Qualifiers::OCL_ExplicitNone: 6541 // okay 6542 break; 6543 6544 case Qualifiers::OCL_Weak: 6545 case Qualifiers::OCL_Strong: 6546 case Qualifiers::OCL_Autoreleasing: 6547 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 6548 << ValType << FirstArg->getSourceRange(); 6549 return ExprError(); 6550 } 6551 6552 // Strip any qualifiers off ValType. 6553 ValType = ValType.getUnqualifiedType(); 6554 6555 // The majority of builtins return a value, but a few have special return 6556 // types, so allow them to override appropriately below. 6557 QualType ResultType = ValType; 6558 6559 // We need to figure out which concrete builtin this maps onto. For example, 6560 // __sync_fetch_and_add with a 2 byte object turns into 6561 // __sync_fetch_and_add_2. 6562 #define BUILTIN_ROW(x) \ 6563 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 6564 Builtin::BI##x##_8, Builtin::BI##x##_16 } 6565 6566 static const unsigned BuiltinIndices[][5] = { 6567 BUILTIN_ROW(__sync_fetch_and_add), 6568 BUILTIN_ROW(__sync_fetch_and_sub), 6569 BUILTIN_ROW(__sync_fetch_and_or), 6570 BUILTIN_ROW(__sync_fetch_and_and), 6571 BUILTIN_ROW(__sync_fetch_and_xor), 6572 BUILTIN_ROW(__sync_fetch_and_nand), 6573 6574 BUILTIN_ROW(__sync_add_and_fetch), 6575 BUILTIN_ROW(__sync_sub_and_fetch), 6576 BUILTIN_ROW(__sync_and_and_fetch), 6577 BUILTIN_ROW(__sync_or_and_fetch), 6578 BUILTIN_ROW(__sync_xor_and_fetch), 6579 BUILTIN_ROW(__sync_nand_and_fetch), 6580 6581 BUILTIN_ROW(__sync_val_compare_and_swap), 6582 BUILTIN_ROW(__sync_bool_compare_and_swap), 6583 BUILTIN_ROW(__sync_lock_test_and_set), 6584 BUILTIN_ROW(__sync_lock_release), 6585 BUILTIN_ROW(__sync_swap) 6586 }; 6587 #undef BUILTIN_ROW 6588 6589 // Determine the index of the size. 6590 unsigned SizeIndex; 6591 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 6592 case 1: SizeIndex = 0; break; 6593 case 2: SizeIndex = 1; break; 6594 case 4: SizeIndex = 2; break; 6595 case 8: SizeIndex = 3; break; 6596 case 16: SizeIndex = 4; break; 6597 default: 6598 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 6599 << FirstArg->getType() << FirstArg->getSourceRange(); 6600 return ExprError(); 6601 } 6602 6603 // Each of these builtins has one pointer argument, followed by some number of 6604 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 6605 // that we ignore. Find out which row of BuiltinIndices to read from as well 6606 // as the number of fixed args. 6607 unsigned BuiltinID = FDecl->getBuiltinID(); 6608 unsigned BuiltinIndex, NumFixed = 1; 6609 bool WarnAboutSemanticsChange = false; 6610 switch (BuiltinID) { 6611 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 6612 case Builtin::BI__sync_fetch_and_add: 6613 case Builtin::BI__sync_fetch_and_add_1: 6614 case Builtin::BI__sync_fetch_and_add_2: 6615 case Builtin::BI__sync_fetch_and_add_4: 6616 case Builtin::BI__sync_fetch_and_add_8: 6617 case Builtin::BI__sync_fetch_and_add_16: 6618 BuiltinIndex = 0; 6619 break; 6620 6621 case Builtin::BI__sync_fetch_and_sub: 6622 case Builtin::BI__sync_fetch_and_sub_1: 6623 case Builtin::BI__sync_fetch_and_sub_2: 6624 case Builtin::BI__sync_fetch_and_sub_4: 6625 case Builtin::BI__sync_fetch_and_sub_8: 6626 case Builtin::BI__sync_fetch_and_sub_16: 6627 BuiltinIndex = 1; 6628 break; 6629 6630 case Builtin::BI__sync_fetch_and_or: 6631 case Builtin::BI__sync_fetch_and_or_1: 6632 case Builtin::BI__sync_fetch_and_or_2: 6633 case Builtin::BI__sync_fetch_and_or_4: 6634 case Builtin::BI__sync_fetch_and_or_8: 6635 case Builtin::BI__sync_fetch_and_or_16: 6636 BuiltinIndex = 2; 6637 break; 6638 6639 case Builtin::BI__sync_fetch_and_and: 6640 case Builtin::BI__sync_fetch_and_and_1: 6641 case Builtin::BI__sync_fetch_and_and_2: 6642 case Builtin::BI__sync_fetch_and_and_4: 6643 case Builtin::BI__sync_fetch_and_and_8: 6644 case Builtin::BI__sync_fetch_and_and_16: 6645 BuiltinIndex = 3; 6646 break; 6647 6648 case Builtin::BI__sync_fetch_and_xor: 6649 case Builtin::BI__sync_fetch_and_xor_1: 6650 case Builtin::BI__sync_fetch_and_xor_2: 6651 case Builtin::BI__sync_fetch_and_xor_4: 6652 case Builtin::BI__sync_fetch_and_xor_8: 6653 case Builtin::BI__sync_fetch_and_xor_16: 6654 BuiltinIndex = 4; 6655 break; 6656 6657 case Builtin::BI__sync_fetch_and_nand: 6658 case Builtin::BI__sync_fetch_and_nand_1: 6659 case Builtin::BI__sync_fetch_and_nand_2: 6660 case Builtin::BI__sync_fetch_and_nand_4: 6661 case Builtin::BI__sync_fetch_and_nand_8: 6662 case Builtin::BI__sync_fetch_and_nand_16: 6663 BuiltinIndex = 5; 6664 WarnAboutSemanticsChange = true; 6665 break; 6666 6667 case Builtin::BI__sync_add_and_fetch: 6668 case Builtin::BI__sync_add_and_fetch_1: 6669 case Builtin::BI__sync_add_and_fetch_2: 6670 case Builtin::BI__sync_add_and_fetch_4: 6671 case Builtin::BI__sync_add_and_fetch_8: 6672 case Builtin::BI__sync_add_and_fetch_16: 6673 BuiltinIndex = 6; 6674 break; 6675 6676 case Builtin::BI__sync_sub_and_fetch: 6677 case Builtin::BI__sync_sub_and_fetch_1: 6678 case Builtin::BI__sync_sub_and_fetch_2: 6679 case Builtin::BI__sync_sub_and_fetch_4: 6680 case Builtin::BI__sync_sub_and_fetch_8: 6681 case Builtin::BI__sync_sub_and_fetch_16: 6682 BuiltinIndex = 7; 6683 break; 6684 6685 case Builtin::BI__sync_and_and_fetch: 6686 case Builtin::BI__sync_and_and_fetch_1: 6687 case Builtin::BI__sync_and_and_fetch_2: 6688 case Builtin::BI__sync_and_and_fetch_4: 6689 case Builtin::BI__sync_and_and_fetch_8: 6690 case Builtin::BI__sync_and_and_fetch_16: 6691 BuiltinIndex = 8; 6692 break; 6693 6694 case Builtin::BI__sync_or_and_fetch: 6695 case Builtin::BI__sync_or_and_fetch_1: 6696 case Builtin::BI__sync_or_and_fetch_2: 6697 case Builtin::BI__sync_or_and_fetch_4: 6698 case Builtin::BI__sync_or_and_fetch_8: 6699 case Builtin::BI__sync_or_and_fetch_16: 6700 BuiltinIndex = 9; 6701 break; 6702 6703 case Builtin::BI__sync_xor_and_fetch: 6704 case Builtin::BI__sync_xor_and_fetch_1: 6705 case Builtin::BI__sync_xor_and_fetch_2: 6706 case Builtin::BI__sync_xor_and_fetch_4: 6707 case Builtin::BI__sync_xor_and_fetch_8: 6708 case Builtin::BI__sync_xor_and_fetch_16: 6709 BuiltinIndex = 10; 6710 break; 6711 6712 case Builtin::BI__sync_nand_and_fetch: 6713 case Builtin::BI__sync_nand_and_fetch_1: 6714 case Builtin::BI__sync_nand_and_fetch_2: 6715 case Builtin::BI__sync_nand_and_fetch_4: 6716 case Builtin::BI__sync_nand_and_fetch_8: 6717 case Builtin::BI__sync_nand_and_fetch_16: 6718 BuiltinIndex = 11; 6719 WarnAboutSemanticsChange = true; 6720 break; 6721 6722 case Builtin::BI__sync_val_compare_and_swap: 6723 case Builtin::BI__sync_val_compare_and_swap_1: 6724 case Builtin::BI__sync_val_compare_and_swap_2: 6725 case Builtin::BI__sync_val_compare_and_swap_4: 6726 case Builtin::BI__sync_val_compare_and_swap_8: 6727 case Builtin::BI__sync_val_compare_and_swap_16: 6728 BuiltinIndex = 12; 6729 NumFixed = 2; 6730 break; 6731 6732 case Builtin::BI__sync_bool_compare_and_swap: 6733 case Builtin::BI__sync_bool_compare_and_swap_1: 6734 case Builtin::BI__sync_bool_compare_and_swap_2: 6735 case Builtin::BI__sync_bool_compare_and_swap_4: 6736 case Builtin::BI__sync_bool_compare_and_swap_8: 6737 case Builtin::BI__sync_bool_compare_and_swap_16: 6738 BuiltinIndex = 13; 6739 NumFixed = 2; 6740 ResultType = Context.BoolTy; 6741 break; 6742 6743 case Builtin::BI__sync_lock_test_and_set: 6744 case Builtin::BI__sync_lock_test_and_set_1: 6745 case Builtin::BI__sync_lock_test_and_set_2: 6746 case Builtin::BI__sync_lock_test_and_set_4: 6747 case Builtin::BI__sync_lock_test_and_set_8: 6748 case Builtin::BI__sync_lock_test_and_set_16: 6749 BuiltinIndex = 14; 6750 break; 6751 6752 case Builtin::BI__sync_lock_release: 6753 case Builtin::BI__sync_lock_release_1: 6754 case Builtin::BI__sync_lock_release_2: 6755 case Builtin::BI__sync_lock_release_4: 6756 case Builtin::BI__sync_lock_release_8: 6757 case Builtin::BI__sync_lock_release_16: 6758 BuiltinIndex = 15; 6759 NumFixed = 0; 6760 ResultType = Context.VoidTy; 6761 break; 6762 6763 case Builtin::BI__sync_swap: 6764 case Builtin::BI__sync_swap_1: 6765 case Builtin::BI__sync_swap_2: 6766 case Builtin::BI__sync_swap_4: 6767 case Builtin::BI__sync_swap_8: 6768 case Builtin::BI__sync_swap_16: 6769 BuiltinIndex = 16; 6770 break; 6771 } 6772 6773 // Now that we know how many fixed arguments we expect, first check that we 6774 // have at least that many. 6775 if (TheCall->getNumArgs() < 1+NumFixed) { 6776 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 6777 << 0 << 1 + NumFixed << TheCall->getNumArgs() 6778 << Callee->getSourceRange(); 6779 return ExprError(); 6780 } 6781 6782 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 6783 << Callee->getSourceRange(); 6784 6785 if (WarnAboutSemanticsChange) { 6786 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 6787 << Callee->getSourceRange(); 6788 } 6789 6790 // Get the decl for the concrete builtin from this, we can tell what the 6791 // concrete integer type we should convert to is. 6792 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 6793 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 6794 FunctionDecl *NewBuiltinDecl; 6795 if (NewBuiltinID == BuiltinID) 6796 NewBuiltinDecl = FDecl; 6797 else { 6798 // Perform builtin lookup to avoid redeclaring it. 6799 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 6800 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 6801 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 6802 assert(Res.getFoundDecl()); 6803 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 6804 if (!NewBuiltinDecl) 6805 return ExprError(); 6806 } 6807 6808 // The first argument --- the pointer --- has a fixed type; we 6809 // deduce the types of the rest of the arguments accordingly. Walk 6810 // the remaining arguments, converting them to the deduced value type. 6811 for (unsigned i = 0; i != NumFixed; ++i) { 6812 ExprResult Arg = TheCall->getArg(i+1); 6813 6814 // GCC does an implicit conversion to the pointer or integer ValType. This 6815 // can fail in some cases (1i -> int**), check for this error case now. 6816 // Initialize the argument. 6817 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 6818 ValType, /*consume*/ false); 6819 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6820 if (Arg.isInvalid()) 6821 return ExprError(); 6822 6823 // Okay, we have something that *can* be converted to the right type. Check 6824 // to see if there is a potentially weird extension going on here. This can 6825 // happen when you do an atomic operation on something like an char* and 6826 // pass in 42. The 42 gets converted to char. This is even more strange 6827 // for things like 45.123 -> char, etc. 6828 // FIXME: Do this check. 6829 TheCall->setArg(i+1, Arg.get()); 6830 } 6831 6832 // Create a new DeclRefExpr to refer to the new decl. 6833 DeclRefExpr *NewDRE = DeclRefExpr::Create( 6834 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, 6835 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, 6836 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse()); 6837 6838 // Set the callee in the CallExpr. 6839 // FIXME: This loses syntactic information. 6840 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 6841 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 6842 CK_BuiltinFnToFnPtr); 6843 TheCall->setCallee(PromotedCall.get()); 6844 6845 // Change the result type of the call to match the original value type. This 6846 // is arbitrary, but the codegen for these builtins ins design to handle it 6847 // gracefully. 6848 TheCall->setType(ResultType); 6849 6850 // Prohibit problematic uses of bit-precise integer types with atomic 6851 // builtins. The arguments would have already been converted to the first 6852 // argument's type, so only need to check the first argument. 6853 const auto *BitIntValType = ValType->getAs<BitIntType>(); 6854 if (BitIntValType && !llvm::isPowerOf2_64(BitIntValType->getNumBits())) { 6855 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size); 6856 return ExprError(); 6857 } 6858 6859 return TheCallResult; 6860 } 6861 6862 /// SemaBuiltinNontemporalOverloaded - We have a call to 6863 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 6864 /// overloaded function based on the pointer type of its last argument. 6865 /// 6866 /// This function goes through and does final semantic checking for these 6867 /// builtins. 6868 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 6869 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 6870 DeclRefExpr *DRE = 6871 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6872 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6873 unsigned BuiltinID = FDecl->getBuiltinID(); 6874 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 6875 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 6876 "Unexpected nontemporal load/store builtin!"); 6877 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 6878 unsigned numArgs = isStore ? 2 : 1; 6879 6880 // Ensure that we have the proper number of arguments. 6881 if (checkArgCount(*this, TheCall, numArgs)) 6882 return ExprError(); 6883 6884 // Inspect the last argument of the nontemporal builtin. This should always 6885 // be a pointer type, from which we imply the type of the memory access. 6886 // Because it is a pointer type, we don't have to worry about any implicit 6887 // casts here. 6888 Expr *PointerArg = TheCall->getArg(numArgs - 1); 6889 ExprResult PointerArgResult = 6890 DefaultFunctionArrayLvalueConversion(PointerArg); 6891 6892 if (PointerArgResult.isInvalid()) 6893 return ExprError(); 6894 PointerArg = PointerArgResult.get(); 6895 TheCall->setArg(numArgs - 1, PointerArg); 6896 6897 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 6898 if (!pointerType) { 6899 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 6900 << PointerArg->getType() << PointerArg->getSourceRange(); 6901 return ExprError(); 6902 } 6903 6904 QualType ValType = pointerType->getPointeeType(); 6905 6906 // Strip any qualifiers off ValType. 6907 ValType = ValType.getUnqualifiedType(); 6908 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 6909 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 6910 !ValType->isVectorType()) { 6911 Diag(DRE->getBeginLoc(), 6912 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 6913 << PointerArg->getType() << PointerArg->getSourceRange(); 6914 return ExprError(); 6915 } 6916 6917 if (!isStore) { 6918 TheCall->setType(ValType); 6919 return TheCallResult; 6920 } 6921 6922 ExprResult ValArg = TheCall->getArg(0); 6923 InitializedEntity Entity = InitializedEntity::InitializeParameter( 6924 Context, ValType, /*consume*/ false); 6925 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 6926 if (ValArg.isInvalid()) 6927 return ExprError(); 6928 6929 TheCall->setArg(0, ValArg.get()); 6930 TheCall->setType(Context.VoidTy); 6931 return TheCallResult; 6932 } 6933 6934 /// CheckObjCString - Checks that the argument to the builtin 6935 /// CFString constructor is correct 6936 /// Note: It might also make sense to do the UTF-16 conversion here (would 6937 /// simplify the backend). 6938 bool Sema::CheckObjCString(Expr *Arg) { 6939 Arg = Arg->IgnoreParenCasts(); 6940 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 6941 6942 if (!Literal || !Literal->isAscii()) { 6943 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 6944 << Arg->getSourceRange(); 6945 return true; 6946 } 6947 6948 if (Literal->containsNonAsciiOrNull()) { 6949 StringRef String = Literal->getString(); 6950 unsigned NumBytes = String.size(); 6951 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 6952 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 6953 llvm::UTF16 *ToPtr = &ToBuf[0]; 6954 6955 llvm::ConversionResult Result = 6956 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 6957 ToPtr + NumBytes, llvm::strictConversion); 6958 // Check for conversion failure. 6959 if (Result != llvm::conversionOK) 6960 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 6961 << Arg->getSourceRange(); 6962 } 6963 return false; 6964 } 6965 6966 /// CheckObjCString - Checks that the format string argument to the os_log() 6967 /// and os_trace() functions is correct, and converts it to const char *. 6968 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 6969 Arg = Arg->IgnoreParenCasts(); 6970 auto *Literal = dyn_cast<StringLiteral>(Arg); 6971 if (!Literal) { 6972 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 6973 Literal = ObjcLiteral->getString(); 6974 } 6975 } 6976 6977 if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) { 6978 return ExprError( 6979 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 6980 << Arg->getSourceRange()); 6981 } 6982 6983 ExprResult Result(Literal); 6984 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 6985 InitializedEntity Entity = 6986 InitializedEntity::InitializeParameter(Context, ResultTy, false); 6987 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 6988 return Result; 6989 } 6990 6991 /// Check that the user is calling the appropriate va_start builtin for the 6992 /// target and calling convention. 6993 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 6994 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 6995 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 6996 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 || 6997 TT.getArch() == llvm::Triple::aarch64_32); 6998 bool IsWindows = TT.isOSWindows(); 6999 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 7000 if (IsX64 || IsAArch64) { 7001 CallingConv CC = CC_C; 7002 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 7003 CC = FD->getType()->castAs<FunctionType>()->getCallConv(); 7004 if (IsMSVAStart) { 7005 // Don't allow this in System V ABI functions. 7006 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 7007 return S.Diag(Fn->getBeginLoc(), 7008 diag::err_ms_va_start_used_in_sysv_function); 7009 } else { 7010 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 7011 // On x64 Windows, don't allow this in System V ABI functions. 7012 // (Yes, that means there's no corresponding way to support variadic 7013 // System V ABI functions on Windows.) 7014 if ((IsWindows && CC == CC_X86_64SysV) || 7015 (!IsWindows && CC == CC_Win64)) 7016 return S.Diag(Fn->getBeginLoc(), 7017 diag::err_va_start_used_in_wrong_abi_function) 7018 << !IsWindows; 7019 } 7020 return false; 7021 } 7022 7023 if (IsMSVAStart) 7024 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 7025 return false; 7026 } 7027 7028 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 7029 ParmVarDecl **LastParam = nullptr) { 7030 // Determine whether the current function, block, or obj-c method is variadic 7031 // and get its parameter list. 7032 bool IsVariadic = false; 7033 ArrayRef<ParmVarDecl *> Params; 7034 DeclContext *Caller = S.CurContext; 7035 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 7036 IsVariadic = Block->isVariadic(); 7037 Params = Block->parameters(); 7038 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 7039 IsVariadic = FD->isVariadic(); 7040 Params = FD->parameters(); 7041 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 7042 IsVariadic = MD->isVariadic(); 7043 // FIXME: This isn't correct for methods (results in bogus warning). 7044 Params = MD->parameters(); 7045 } else if (isa<CapturedDecl>(Caller)) { 7046 // We don't support va_start in a CapturedDecl. 7047 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 7048 return true; 7049 } else { 7050 // This must be some other declcontext that parses exprs. 7051 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 7052 return true; 7053 } 7054 7055 if (!IsVariadic) { 7056 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 7057 return true; 7058 } 7059 7060 if (LastParam) 7061 *LastParam = Params.empty() ? nullptr : Params.back(); 7062 7063 return false; 7064 } 7065 7066 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 7067 /// for validity. Emit an error and return true on failure; return false 7068 /// on success. 7069 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 7070 Expr *Fn = TheCall->getCallee(); 7071 7072 if (checkVAStartABI(*this, BuiltinID, Fn)) 7073 return true; 7074 7075 if (checkArgCount(*this, TheCall, 2)) 7076 return true; 7077 7078 // Type-check the first argument normally. 7079 if (checkBuiltinArgument(*this, TheCall, 0)) 7080 return true; 7081 7082 // Check that the current function is variadic, and get its last parameter. 7083 ParmVarDecl *LastParam; 7084 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 7085 return true; 7086 7087 // Verify that the second argument to the builtin is the last argument of the 7088 // current function or method. 7089 bool SecondArgIsLastNamedArgument = false; 7090 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 7091 7092 // These are valid if SecondArgIsLastNamedArgument is false after the next 7093 // block. 7094 QualType Type; 7095 SourceLocation ParamLoc; 7096 bool IsCRegister = false; 7097 7098 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 7099 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 7100 SecondArgIsLastNamedArgument = PV == LastParam; 7101 7102 Type = PV->getType(); 7103 ParamLoc = PV->getLocation(); 7104 IsCRegister = 7105 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 7106 } 7107 } 7108 7109 if (!SecondArgIsLastNamedArgument) 7110 Diag(TheCall->getArg(1)->getBeginLoc(), 7111 diag::warn_second_arg_of_va_start_not_last_named_param); 7112 else if (IsCRegister || Type->isReferenceType() || 7113 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 7114 // Promotable integers are UB, but enumerations need a bit of 7115 // extra checking to see what their promotable type actually is. 7116 if (!Type->isPromotableIntegerType()) 7117 return false; 7118 if (!Type->isEnumeralType()) 7119 return true; 7120 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl(); 7121 return !(ED && 7122 Context.typesAreCompatible(ED->getPromotionType(), Type)); 7123 }()) { 7124 unsigned Reason = 0; 7125 if (Type->isReferenceType()) Reason = 1; 7126 else if (IsCRegister) Reason = 2; 7127 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 7128 Diag(ParamLoc, diag::note_parameter_type) << Type; 7129 } 7130 7131 TheCall->setType(Context.VoidTy); 7132 return false; 7133 } 7134 7135 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 7136 auto IsSuitablyTypedFormatArgument = [this](const Expr *Arg) -> bool { 7137 const LangOptions &LO = getLangOpts(); 7138 7139 if (LO.CPlusPlus) 7140 return Arg->getType() 7141 .getCanonicalType() 7142 .getTypePtr() 7143 ->getPointeeType() 7144 .withoutLocalFastQualifiers() == Context.CharTy; 7145 7146 // In C, allow aliasing through `char *`, this is required for AArch64 at 7147 // least. 7148 return true; 7149 }; 7150 7151 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 7152 // const char *named_addr); 7153 7154 Expr *Func = Call->getCallee(); 7155 7156 if (Call->getNumArgs() < 3) 7157 return Diag(Call->getEndLoc(), 7158 diag::err_typecheck_call_too_few_args_at_least) 7159 << 0 /*function call*/ << 3 << Call->getNumArgs(); 7160 7161 // Type-check the first argument normally. 7162 if (checkBuiltinArgument(*this, Call, 0)) 7163 return true; 7164 7165 // Check that the current function is variadic. 7166 if (checkVAStartIsInVariadicFunction(*this, Func)) 7167 return true; 7168 7169 // __va_start on Windows does not validate the parameter qualifiers 7170 7171 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 7172 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 7173 7174 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 7175 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 7176 7177 const QualType &ConstCharPtrTy = 7178 Context.getPointerType(Context.CharTy.withConst()); 7179 if (!Arg1Ty->isPointerType() || !IsSuitablyTypedFormatArgument(Arg1)) 7180 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 7181 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 7182 << 0 /* qualifier difference */ 7183 << 3 /* parameter mismatch */ 7184 << 2 << Arg1->getType() << ConstCharPtrTy; 7185 7186 const QualType SizeTy = Context.getSizeType(); 7187 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 7188 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 7189 << Arg2->getType() << SizeTy << 1 /* different class */ 7190 << 0 /* qualifier difference */ 7191 << 3 /* parameter mismatch */ 7192 << 3 << Arg2->getType() << SizeTy; 7193 7194 return false; 7195 } 7196 7197 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 7198 /// friends. This is declared to take (...), so we have to check everything. 7199 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 7200 if (checkArgCount(*this, TheCall, 2)) 7201 return true; 7202 7203 ExprResult OrigArg0 = TheCall->getArg(0); 7204 ExprResult OrigArg1 = TheCall->getArg(1); 7205 7206 // Do standard promotions between the two arguments, returning their common 7207 // type. 7208 QualType Res = UsualArithmeticConversions( 7209 OrigArg0, OrigArg1, TheCall->getExprLoc(), ACK_Comparison); 7210 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 7211 return true; 7212 7213 // Make sure any conversions are pushed back into the call; this is 7214 // type safe since unordered compare builtins are declared as "_Bool 7215 // foo(...)". 7216 TheCall->setArg(0, OrigArg0.get()); 7217 TheCall->setArg(1, OrigArg1.get()); 7218 7219 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 7220 return false; 7221 7222 // If the common type isn't a real floating type, then the arguments were 7223 // invalid for this operation. 7224 if (Res.isNull() || !Res->isRealFloatingType()) 7225 return Diag(OrigArg0.get()->getBeginLoc(), 7226 diag::err_typecheck_call_invalid_ordered_compare) 7227 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 7228 << SourceRange(OrigArg0.get()->getBeginLoc(), 7229 OrigArg1.get()->getEndLoc()); 7230 7231 return false; 7232 } 7233 7234 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 7235 /// __builtin_isnan and friends. This is declared to take (...), so we have 7236 /// to check everything. We expect the last argument to be a floating point 7237 /// value. 7238 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 7239 if (checkArgCount(*this, TheCall, NumArgs)) 7240 return true; 7241 7242 // __builtin_fpclassify is the only case where NumArgs != 1, so we can count 7243 // on all preceding parameters just being int. Try all of those. 7244 for (unsigned i = 0; i < NumArgs - 1; ++i) { 7245 Expr *Arg = TheCall->getArg(i); 7246 7247 if (Arg->isTypeDependent()) 7248 return false; 7249 7250 ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing); 7251 7252 if (Res.isInvalid()) 7253 return true; 7254 TheCall->setArg(i, Res.get()); 7255 } 7256 7257 Expr *OrigArg = TheCall->getArg(NumArgs-1); 7258 7259 if (OrigArg->isTypeDependent()) 7260 return false; 7261 7262 // Usual Unary Conversions will convert half to float, which we want for 7263 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the 7264 // type how it is, but do normal L->Rvalue conversions. 7265 if (Context.getTargetInfo().useFP16ConversionIntrinsics()) 7266 OrigArg = UsualUnaryConversions(OrigArg).get(); 7267 else 7268 OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get(); 7269 TheCall->setArg(NumArgs - 1, OrigArg); 7270 7271 // This operation requires a non-_Complex floating-point number. 7272 if (!OrigArg->getType()->isRealFloatingType()) 7273 return Diag(OrigArg->getBeginLoc(), 7274 diag::err_typecheck_call_invalid_unary_fp) 7275 << OrigArg->getType() << OrigArg->getSourceRange(); 7276 7277 return false; 7278 } 7279 7280 /// Perform semantic analysis for a call to __builtin_complex. 7281 bool Sema::SemaBuiltinComplex(CallExpr *TheCall) { 7282 if (checkArgCount(*this, TheCall, 2)) 7283 return true; 7284 7285 bool Dependent = false; 7286 for (unsigned I = 0; I != 2; ++I) { 7287 Expr *Arg = TheCall->getArg(I); 7288 QualType T = Arg->getType(); 7289 if (T->isDependentType()) { 7290 Dependent = true; 7291 continue; 7292 } 7293 7294 // Despite supporting _Complex int, GCC requires a real floating point type 7295 // for the operands of __builtin_complex. 7296 if (!T->isRealFloatingType()) { 7297 return Diag(Arg->getBeginLoc(), diag::err_typecheck_call_requires_real_fp) 7298 << Arg->getType() << Arg->getSourceRange(); 7299 } 7300 7301 ExprResult Converted = DefaultLvalueConversion(Arg); 7302 if (Converted.isInvalid()) 7303 return true; 7304 TheCall->setArg(I, Converted.get()); 7305 } 7306 7307 if (Dependent) { 7308 TheCall->setType(Context.DependentTy); 7309 return false; 7310 } 7311 7312 Expr *Real = TheCall->getArg(0); 7313 Expr *Imag = TheCall->getArg(1); 7314 if (!Context.hasSameType(Real->getType(), Imag->getType())) { 7315 return Diag(Real->getBeginLoc(), 7316 diag::err_typecheck_call_different_arg_types) 7317 << Real->getType() << Imag->getType() 7318 << Real->getSourceRange() << Imag->getSourceRange(); 7319 } 7320 7321 // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers; 7322 // don't allow this builtin to form those types either. 7323 // FIXME: Should we allow these types? 7324 if (Real->getType()->isFloat16Type()) 7325 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 7326 << "_Float16"; 7327 if (Real->getType()->isHalfType()) 7328 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 7329 << "half"; 7330 7331 TheCall->setType(Context.getComplexType(Real->getType())); 7332 return false; 7333 } 7334 7335 // Customized Sema Checking for VSX builtins that have the following signature: 7336 // vector [...] builtinName(vector [...], vector [...], const int); 7337 // Which takes the same type of vectors (any legal vector type) for the first 7338 // two arguments and takes compile time constant for the third argument. 7339 // Example builtins are : 7340 // vector double vec_xxpermdi(vector double, vector double, int); 7341 // vector short vec_xxsldwi(vector short, vector short, int); 7342 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 7343 unsigned ExpectedNumArgs = 3; 7344 if (checkArgCount(*this, TheCall, ExpectedNumArgs)) 7345 return true; 7346 7347 // Check the third argument is a compile time constant 7348 if (!TheCall->getArg(2)->isIntegerConstantExpr(Context)) 7349 return Diag(TheCall->getBeginLoc(), 7350 diag::err_vsx_builtin_nonconstant_argument) 7351 << 3 /* argument index */ << TheCall->getDirectCallee() 7352 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 7353 TheCall->getArg(2)->getEndLoc()); 7354 7355 QualType Arg1Ty = TheCall->getArg(0)->getType(); 7356 QualType Arg2Ty = TheCall->getArg(1)->getType(); 7357 7358 // Check the type of argument 1 and argument 2 are vectors. 7359 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 7360 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 7361 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 7362 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 7363 << TheCall->getDirectCallee() 7364 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7365 TheCall->getArg(1)->getEndLoc()); 7366 } 7367 7368 // Check the first two arguments are the same type. 7369 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 7370 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 7371 << TheCall->getDirectCallee() 7372 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7373 TheCall->getArg(1)->getEndLoc()); 7374 } 7375 7376 // When default clang type checking is turned off and the customized type 7377 // checking is used, the returning type of the function must be explicitly 7378 // set. Otherwise it is _Bool by default. 7379 TheCall->setType(Arg1Ty); 7380 7381 return false; 7382 } 7383 7384 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 7385 // This is declared to take (...), so we have to check everything. 7386 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 7387 if (TheCall->getNumArgs() < 2) 7388 return ExprError(Diag(TheCall->getEndLoc(), 7389 diag::err_typecheck_call_too_few_args_at_least) 7390 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 7391 << TheCall->getSourceRange()); 7392 7393 // Determine which of the following types of shufflevector we're checking: 7394 // 1) unary, vector mask: (lhs, mask) 7395 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 7396 QualType resType = TheCall->getArg(0)->getType(); 7397 unsigned numElements = 0; 7398 7399 if (!TheCall->getArg(0)->isTypeDependent() && 7400 !TheCall->getArg(1)->isTypeDependent()) { 7401 QualType LHSType = TheCall->getArg(0)->getType(); 7402 QualType RHSType = TheCall->getArg(1)->getType(); 7403 7404 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 7405 return ExprError( 7406 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 7407 << TheCall->getDirectCallee() 7408 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7409 TheCall->getArg(1)->getEndLoc())); 7410 7411 numElements = LHSType->castAs<VectorType>()->getNumElements(); 7412 unsigned numResElements = TheCall->getNumArgs() - 2; 7413 7414 // Check to see if we have a call with 2 vector arguments, the unary shuffle 7415 // with mask. If so, verify that RHS is an integer vector type with the 7416 // same number of elts as lhs. 7417 if (TheCall->getNumArgs() == 2) { 7418 if (!RHSType->hasIntegerRepresentation() || 7419 RHSType->castAs<VectorType>()->getNumElements() != numElements) 7420 return ExprError(Diag(TheCall->getBeginLoc(), 7421 diag::err_vec_builtin_incompatible_vector) 7422 << TheCall->getDirectCallee() 7423 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 7424 TheCall->getArg(1)->getEndLoc())); 7425 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 7426 return ExprError(Diag(TheCall->getBeginLoc(), 7427 diag::err_vec_builtin_incompatible_vector) 7428 << TheCall->getDirectCallee() 7429 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7430 TheCall->getArg(1)->getEndLoc())); 7431 } else if (numElements != numResElements) { 7432 QualType eltType = LHSType->castAs<VectorType>()->getElementType(); 7433 resType = Context.getVectorType(eltType, numResElements, 7434 VectorType::GenericVector); 7435 } 7436 } 7437 7438 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 7439 if (TheCall->getArg(i)->isTypeDependent() || 7440 TheCall->getArg(i)->isValueDependent()) 7441 continue; 7442 7443 Optional<llvm::APSInt> Result; 7444 if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context))) 7445 return ExprError(Diag(TheCall->getBeginLoc(), 7446 diag::err_shufflevector_nonconstant_argument) 7447 << TheCall->getArg(i)->getSourceRange()); 7448 7449 // Allow -1 which will be translated to undef in the IR. 7450 if (Result->isSigned() && Result->isAllOnes()) 7451 continue; 7452 7453 if (Result->getActiveBits() > 64 || 7454 Result->getZExtValue() >= numElements * 2) 7455 return ExprError(Diag(TheCall->getBeginLoc(), 7456 diag::err_shufflevector_argument_too_large) 7457 << TheCall->getArg(i)->getSourceRange()); 7458 } 7459 7460 SmallVector<Expr*, 32> exprs; 7461 7462 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 7463 exprs.push_back(TheCall->getArg(i)); 7464 TheCall->setArg(i, nullptr); 7465 } 7466 7467 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 7468 TheCall->getCallee()->getBeginLoc(), 7469 TheCall->getRParenLoc()); 7470 } 7471 7472 /// SemaConvertVectorExpr - Handle __builtin_convertvector 7473 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 7474 SourceLocation BuiltinLoc, 7475 SourceLocation RParenLoc) { 7476 ExprValueKind VK = VK_PRValue; 7477 ExprObjectKind OK = OK_Ordinary; 7478 QualType DstTy = TInfo->getType(); 7479 QualType SrcTy = E->getType(); 7480 7481 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 7482 return ExprError(Diag(BuiltinLoc, 7483 diag::err_convertvector_non_vector) 7484 << E->getSourceRange()); 7485 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 7486 return ExprError(Diag(BuiltinLoc, 7487 diag::err_convertvector_non_vector_type)); 7488 7489 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 7490 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements(); 7491 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements(); 7492 if (SrcElts != DstElts) 7493 return ExprError(Diag(BuiltinLoc, 7494 diag::err_convertvector_incompatible_vector) 7495 << E->getSourceRange()); 7496 } 7497 7498 return new (Context) 7499 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 7500 } 7501 7502 /// SemaBuiltinPrefetch - Handle __builtin_prefetch. 7503 // This is declared to take (const void*, ...) and can take two 7504 // optional constant int args. 7505 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 7506 unsigned NumArgs = TheCall->getNumArgs(); 7507 7508 if (NumArgs > 3) 7509 return Diag(TheCall->getEndLoc(), 7510 diag::err_typecheck_call_too_many_args_at_most) 7511 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 7512 7513 // Argument 0 is checked for us and the remaining arguments must be 7514 // constant integers. 7515 for (unsigned i = 1; i != NumArgs; ++i) 7516 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 7517 return true; 7518 7519 return false; 7520 } 7521 7522 /// SemaBuiltinArithmeticFence - Handle __arithmetic_fence. 7523 bool Sema::SemaBuiltinArithmeticFence(CallExpr *TheCall) { 7524 if (!Context.getTargetInfo().checkArithmeticFenceSupported()) 7525 return Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 7526 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 7527 if (checkArgCount(*this, TheCall, 1)) 7528 return true; 7529 Expr *Arg = TheCall->getArg(0); 7530 if (Arg->isInstantiationDependent()) 7531 return false; 7532 7533 QualType ArgTy = Arg->getType(); 7534 if (!ArgTy->hasFloatingRepresentation()) 7535 return Diag(TheCall->getEndLoc(), diag::err_typecheck_expect_flt_or_vector) 7536 << ArgTy; 7537 if (Arg->isLValue()) { 7538 ExprResult FirstArg = DefaultLvalueConversion(Arg); 7539 TheCall->setArg(0, FirstArg.get()); 7540 } 7541 TheCall->setType(TheCall->getArg(0)->getType()); 7542 return false; 7543 } 7544 7545 /// SemaBuiltinAssume - Handle __assume (MS Extension). 7546 // __assume does not evaluate its arguments, and should warn if its argument 7547 // has side effects. 7548 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 7549 Expr *Arg = TheCall->getArg(0); 7550 if (Arg->isInstantiationDependent()) return false; 7551 7552 if (Arg->HasSideEffects(Context)) 7553 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 7554 << Arg->getSourceRange() 7555 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 7556 7557 return false; 7558 } 7559 7560 /// Handle __builtin_alloca_with_align. This is declared 7561 /// as (size_t, size_t) where the second size_t must be a power of 2 greater 7562 /// than 8. 7563 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 7564 // The alignment must be a constant integer. 7565 Expr *Arg = TheCall->getArg(1); 7566 7567 // We can't check the value of a dependent argument. 7568 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 7569 if (const auto *UE = 7570 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 7571 if (UE->getKind() == UETT_AlignOf || 7572 UE->getKind() == UETT_PreferredAlignOf) 7573 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 7574 << Arg->getSourceRange(); 7575 7576 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 7577 7578 if (!Result.isPowerOf2()) 7579 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 7580 << Arg->getSourceRange(); 7581 7582 if (Result < Context.getCharWidth()) 7583 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 7584 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 7585 7586 if (Result > std::numeric_limits<int32_t>::max()) 7587 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 7588 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 7589 } 7590 7591 return false; 7592 } 7593 7594 /// Handle __builtin_assume_aligned. This is declared 7595 /// as (const void*, size_t, ...) and can take one optional constant int arg. 7596 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 7597 unsigned NumArgs = TheCall->getNumArgs(); 7598 7599 if (NumArgs > 3) 7600 return Diag(TheCall->getEndLoc(), 7601 diag::err_typecheck_call_too_many_args_at_most) 7602 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 7603 7604 // The alignment must be a constant integer. 7605 Expr *Arg = TheCall->getArg(1); 7606 7607 // We can't check the value of a dependent argument. 7608 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 7609 llvm::APSInt Result; 7610 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 7611 return true; 7612 7613 if (!Result.isPowerOf2()) 7614 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 7615 << Arg->getSourceRange(); 7616 7617 if (Result > Sema::MaximumAlignment) 7618 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great) 7619 << Arg->getSourceRange() << Sema::MaximumAlignment; 7620 } 7621 7622 if (NumArgs > 2) { 7623 ExprResult Arg(TheCall->getArg(2)); 7624 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 7625 Context.getSizeType(), false); 7626 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 7627 if (Arg.isInvalid()) return true; 7628 TheCall->setArg(2, Arg.get()); 7629 } 7630 7631 return false; 7632 } 7633 7634 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 7635 unsigned BuiltinID = 7636 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 7637 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 7638 7639 unsigned NumArgs = TheCall->getNumArgs(); 7640 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 7641 if (NumArgs < NumRequiredArgs) { 7642 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 7643 << 0 /* function call */ << NumRequiredArgs << NumArgs 7644 << TheCall->getSourceRange(); 7645 } 7646 if (NumArgs >= NumRequiredArgs + 0x100) { 7647 return Diag(TheCall->getEndLoc(), 7648 diag::err_typecheck_call_too_many_args_at_most) 7649 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 7650 << TheCall->getSourceRange(); 7651 } 7652 unsigned i = 0; 7653 7654 // For formatting call, check buffer arg. 7655 if (!IsSizeCall) { 7656 ExprResult Arg(TheCall->getArg(i)); 7657 InitializedEntity Entity = InitializedEntity::InitializeParameter( 7658 Context, Context.VoidPtrTy, false); 7659 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 7660 if (Arg.isInvalid()) 7661 return true; 7662 TheCall->setArg(i, Arg.get()); 7663 i++; 7664 } 7665 7666 // Check string literal arg. 7667 unsigned FormatIdx = i; 7668 { 7669 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 7670 if (Arg.isInvalid()) 7671 return true; 7672 TheCall->setArg(i, Arg.get()); 7673 i++; 7674 } 7675 7676 // Make sure variadic args are scalar. 7677 unsigned FirstDataArg = i; 7678 while (i < NumArgs) { 7679 ExprResult Arg = DefaultVariadicArgumentPromotion( 7680 TheCall->getArg(i), VariadicFunction, nullptr); 7681 if (Arg.isInvalid()) 7682 return true; 7683 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 7684 if (ArgSize.getQuantity() >= 0x100) { 7685 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 7686 << i << (int)ArgSize.getQuantity() << 0xff 7687 << TheCall->getSourceRange(); 7688 } 7689 TheCall->setArg(i, Arg.get()); 7690 i++; 7691 } 7692 7693 // Check formatting specifiers. NOTE: We're only doing this for the non-size 7694 // call to avoid duplicate diagnostics. 7695 if (!IsSizeCall) { 7696 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 7697 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 7698 bool Success = CheckFormatArguments( 7699 Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog, 7700 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 7701 CheckedVarArgs); 7702 if (!Success) 7703 return true; 7704 } 7705 7706 if (IsSizeCall) { 7707 TheCall->setType(Context.getSizeType()); 7708 } else { 7709 TheCall->setType(Context.VoidPtrTy); 7710 } 7711 return false; 7712 } 7713 7714 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 7715 /// TheCall is a constant expression. 7716 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 7717 llvm::APSInt &Result) { 7718 Expr *Arg = TheCall->getArg(ArgNum); 7719 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 7720 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 7721 7722 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 7723 7724 Optional<llvm::APSInt> R; 7725 if (!(R = Arg->getIntegerConstantExpr(Context))) 7726 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 7727 << FDecl->getDeclName() << Arg->getSourceRange(); 7728 Result = *R; 7729 return false; 7730 } 7731 7732 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 7733 /// TheCall is a constant expression in the range [Low, High]. 7734 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 7735 int Low, int High, bool RangeIsError) { 7736 if (isConstantEvaluated()) 7737 return false; 7738 llvm::APSInt Result; 7739 7740 // We can't check the value of a dependent argument. 7741 Expr *Arg = TheCall->getArg(ArgNum); 7742 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7743 return false; 7744 7745 // Check constant-ness first. 7746 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7747 return true; 7748 7749 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 7750 if (RangeIsError) 7751 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 7752 << toString(Result, 10) << Low << High << Arg->getSourceRange(); 7753 else 7754 // Defer the warning until we know if the code will be emitted so that 7755 // dead code can ignore this. 7756 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 7757 PDiag(diag::warn_argument_invalid_range) 7758 << toString(Result, 10) << Low << High 7759 << Arg->getSourceRange()); 7760 } 7761 7762 return false; 7763 } 7764 7765 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 7766 /// TheCall is a constant expression is a multiple of Num.. 7767 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 7768 unsigned Num) { 7769 llvm::APSInt Result; 7770 7771 // We can't check the value of a dependent argument. 7772 Expr *Arg = TheCall->getArg(ArgNum); 7773 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7774 return false; 7775 7776 // Check constant-ness first. 7777 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7778 return true; 7779 7780 if (Result.getSExtValue() % Num != 0) 7781 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 7782 << Num << Arg->getSourceRange(); 7783 7784 return false; 7785 } 7786 7787 /// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a 7788 /// constant expression representing a power of 2. 7789 bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) { 7790 llvm::APSInt Result; 7791 7792 // We can't check the value of a dependent argument. 7793 Expr *Arg = TheCall->getArg(ArgNum); 7794 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7795 return false; 7796 7797 // Check constant-ness first. 7798 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7799 return true; 7800 7801 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if 7802 // and only if x is a power of 2. 7803 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0) 7804 return false; 7805 7806 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2) 7807 << Arg->getSourceRange(); 7808 } 7809 7810 static bool IsShiftedByte(llvm::APSInt Value) { 7811 if (Value.isNegative()) 7812 return false; 7813 7814 // Check if it's a shifted byte, by shifting it down 7815 while (true) { 7816 // If the value fits in the bottom byte, the check passes. 7817 if (Value < 0x100) 7818 return true; 7819 7820 // Otherwise, if the value has _any_ bits in the bottom byte, the check 7821 // fails. 7822 if ((Value & 0xFF) != 0) 7823 return false; 7824 7825 // If the bottom 8 bits are all 0, but something above that is nonzero, 7826 // then shifting the value right by 8 bits won't affect whether it's a 7827 // shifted byte or not. So do that, and go round again. 7828 Value >>= 8; 7829 } 7830 } 7831 7832 /// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is 7833 /// a constant expression representing an arbitrary byte value shifted left by 7834 /// a multiple of 8 bits. 7835 bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, 7836 unsigned ArgBits) { 7837 llvm::APSInt Result; 7838 7839 // We can't check the value of a dependent argument. 7840 Expr *Arg = TheCall->getArg(ArgNum); 7841 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7842 return false; 7843 7844 // Check constant-ness first. 7845 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7846 return true; 7847 7848 // Truncate to the given size. 7849 Result = Result.getLoBits(ArgBits); 7850 Result.setIsUnsigned(true); 7851 7852 if (IsShiftedByte(Result)) 7853 return false; 7854 7855 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte) 7856 << Arg->getSourceRange(); 7857 } 7858 7859 /// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of 7860 /// TheCall is a constant expression representing either a shifted byte value, 7861 /// or a value of the form 0x??FF (i.e. a member of the arithmetic progression 7862 /// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some 7863 /// Arm MVE intrinsics. 7864 bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, 7865 int ArgNum, 7866 unsigned ArgBits) { 7867 llvm::APSInt Result; 7868 7869 // We can't check the value of a dependent argument. 7870 Expr *Arg = TheCall->getArg(ArgNum); 7871 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7872 return false; 7873 7874 // Check constant-ness first. 7875 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7876 return true; 7877 7878 // Truncate to the given size. 7879 Result = Result.getLoBits(ArgBits); 7880 Result.setIsUnsigned(true); 7881 7882 // Check to see if it's in either of the required forms. 7883 if (IsShiftedByte(Result) || 7884 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF)) 7885 return false; 7886 7887 return Diag(TheCall->getBeginLoc(), 7888 diag::err_argument_not_shifted_byte_or_xxff) 7889 << Arg->getSourceRange(); 7890 } 7891 7892 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 7893 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { 7894 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 7895 if (checkArgCount(*this, TheCall, 2)) 7896 return true; 7897 Expr *Arg0 = TheCall->getArg(0); 7898 Expr *Arg1 = TheCall->getArg(1); 7899 7900 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7901 if (FirstArg.isInvalid()) 7902 return true; 7903 QualType FirstArgType = FirstArg.get()->getType(); 7904 if (!FirstArgType->isAnyPointerType()) 7905 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7906 << "first" << FirstArgType << Arg0->getSourceRange(); 7907 TheCall->setArg(0, FirstArg.get()); 7908 7909 ExprResult SecArg = DefaultLvalueConversion(Arg1); 7910 if (SecArg.isInvalid()) 7911 return true; 7912 QualType SecArgType = SecArg.get()->getType(); 7913 if (!SecArgType->isIntegerType()) 7914 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 7915 << "second" << SecArgType << Arg1->getSourceRange(); 7916 7917 // Derive the return type from the pointer argument. 7918 TheCall->setType(FirstArgType); 7919 return false; 7920 } 7921 7922 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 7923 if (checkArgCount(*this, TheCall, 2)) 7924 return true; 7925 7926 Expr *Arg0 = TheCall->getArg(0); 7927 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7928 if (FirstArg.isInvalid()) 7929 return true; 7930 QualType FirstArgType = FirstArg.get()->getType(); 7931 if (!FirstArgType->isAnyPointerType()) 7932 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7933 << "first" << FirstArgType << Arg0->getSourceRange(); 7934 TheCall->setArg(0, FirstArg.get()); 7935 7936 // Derive the return type from the pointer argument. 7937 TheCall->setType(FirstArgType); 7938 7939 // Second arg must be an constant in range [0,15] 7940 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 7941 } 7942 7943 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 7944 if (checkArgCount(*this, TheCall, 2)) 7945 return true; 7946 Expr *Arg0 = TheCall->getArg(0); 7947 Expr *Arg1 = TheCall->getArg(1); 7948 7949 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7950 if (FirstArg.isInvalid()) 7951 return true; 7952 QualType FirstArgType = FirstArg.get()->getType(); 7953 if (!FirstArgType->isAnyPointerType()) 7954 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7955 << "first" << FirstArgType << Arg0->getSourceRange(); 7956 7957 QualType SecArgType = Arg1->getType(); 7958 if (!SecArgType->isIntegerType()) 7959 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 7960 << "second" << SecArgType << Arg1->getSourceRange(); 7961 TheCall->setType(Context.IntTy); 7962 return false; 7963 } 7964 7965 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 7966 BuiltinID == AArch64::BI__builtin_arm_stg) { 7967 if (checkArgCount(*this, TheCall, 1)) 7968 return true; 7969 Expr *Arg0 = TheCall->getArg(0); 7970 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7971 if (FirstArg.isInvalid()) 7972 return true; 7973 7974 QualType FirstArgType = FirstArg.get()->getType(); 7975 if (!FirstArgType->isAnyPointerType()) 7976 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7977 << "first" << FirstArgType << Arg0->getSourceRange(); 7978 TheCall->setArg(0, FirstArg.get()); 7979 7980 // Derive the return type from the pointer argument. 7981 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 7982 TheCall->setType(FirstArgType); 7983 return false; 7984 } 7985 7986 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 7987 Expr *ArgA = TheCall->getArg(0); 7988 Expr *ArgB = TheCall->getArg(1); 7989 7990 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA); 7991 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB); 7992 7993 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 7994 return true; 7995 7996 QualType ArgTypeA = ArgExprA.get()->getType(); 7997 QualType ArgTypeB = ArgExprB.get()->getType(); 7998 7999 auto isNull = [&] (Expr *E) -> bool { 8000 return E->isNullPointerConstant( 8001 Context, Expr::NPC_ValueDependentIsNotNull); }; 8002 8003 // argument should be either a pointer or null 8004 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 8005 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 8006 << "first" << ArgTypeA << ArgA->getSourceRange(); 8007 8008 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 8009 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 8010 << "second" << ArgTypeB << ArgB->getSourceRange(); 8011 8012 // Ensure Pointee types are compatible 8013 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 8014 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 8015 QualType pointeeA = ArgTypeA->getPointeeType(); 8016 QualType pointeeB = ArgTypeB->getPointeeType(); 8017 if (!Context.typesAreCompatible( 8018 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 8019 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 8020 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) 8021 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 8022 << ArgB->getSourceRange(); 8023 } 8024 } 8025 8026 // at least one argument should be pointer type 8027 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 8028 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 8029 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 8030 8031 if (isNull(ArgA)) // adopt type of the other pointer 8032 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 8033 8034 if (isNull(ArgB)) 8035 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 8036 8037 TheCall->setArg(0, ArgExprA.get()); 8038 TheCall->setArg(1, ArgExprB.get()); 8039 TheCall->setType(Context.LongLongTy); 8040 return false; 8041 } 8042 assert(false && "Unhandled ARM MTE intrinsic"); 8043 return true; 8044 } 8045 8046 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 8047 /// TheCall is an ARM/AArch64 special register string literal. 8048 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 8049 int ArgNum, unsigned ExpectedFieldNum, 8050 bool AllowName) { 8051 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 8052 BuiltinID == ARM::BI__builtin_arm_wsr64 || 8053 BuiltinID == ARM::BI__builtin_arm_rsr || 8054 BuiltinID == ARM::BI__builtin_arm_rsrp || 8055 BuiltinID == ARM::BI__builtin_arm_wsr || 8056 BuiltinID == ARM::BI__builtin_arm_wsrp; 8057 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 8058 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 8059 BuiltinID == AArch64::BI__builtin_arm_rsr || 8060 BuiltinID == AArch64::BI__builtin_arm_rsrp || 8061 BuiltinID == AArch64::BI__builtin_arm_wsr || 8062 BuiltinID == AArch64::BI__builtin_arm_wsrp; 8063 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 8064 8065 // We can't check the value of a dependent argument. 8066 Expr *Arg = TheCall->getArg(ArgNum); 8067 if (Arg->isTypeDependent() || Arg->isValueDependent()) 8068 return false; 8069 8070 // Check if the argument is a string literal. 8071 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 8072 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 8073 << Arg->getSourceRange(); 8074 8075 // Check the type of special register given. 8076 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 8077 SmallVector<StringRef, 6> Fields; 8078 Reg.split(Fields, ":"); 8079 8080 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 8081 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 8082 << Arg->getSourceRange(); 8083 8084 // If the string is the name of a register then we cannot check that it is 8085 // valid here but if the string is of one the forms described in ACLE then we 8086 // can check that the supplied fields are integers and within the valid 8087 // ranges. 8088 if (Fields.size() > 1) { 8089 bool FiveFields = Fields.size() == 5; 8090 8091 bool ValidString = true; 8092 if (IsARMBuiltin) { 8093 ValidString &= Fields[0].startswith_insensitive("cp") || 8094 Fields[0].startswith_insensitive("p"); 8095 if (ValidString) 8096 Fields[0] = Fields[0].drop_front( 8097 Fields[0].startswith_insensitive("cp") ? 2 : 1); 8098 8099 ValidString &= Fields[2].startswith_insensitive("c"); 8100 if (ValidString) 8101 Fields[2] = Fields[2].drop_front(1); 8102 8103 if (FiveFields) { 8104 ValidString &= Fields[3].startswith_insensitive("c"); 8105 if (ValidString) 8106 Fields[3] = Fields[3].drop_front(1); 8107 } 8108 } 8109 8110 SmallVector<int, 5> Ranges; 8111 if (FiveFields) 8112 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 8113 else 8114 Ranges.append({15, 7, 15}); 8115 8116 for (unsigned i=0; i<Fields.size(); ++i) { 8117 int IntField; 8118 ValidString &= !Fields[i].getAsInteger(10, IntField); 8119 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 8120 } 8121 8122 if (!ValidString) 8123 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 8124 << Arg->getSourceRange(); 8125 } else if (IsAArch64Builtin && Fields.size() == 1) { 8126 // If the register name is one of those that appear in the condition below 8127 // and the special register builtin being used is one of the write builtins, 8128 // then we require that the argument provided for writing to the register 8129 // is an integer constant expression. This is because it will be lowered to 8130 // an MSR (immediate) instruction, so we need to know the immediate at 8131 // compile time. 8132 if (TheCall->getNumArgs() != 2) 8133 return false; 8134 8135 std::string RegLower = Reg.lower(); 8136 if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" && 8137 RegLower != "pan" && RegLower != "uao") 8138 return false; 8139 8140 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 8141 } 8142 8143 return false; 8144 } 8145 8146 /// SemaBuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity. 8147 /// Emit an error and return true on failure; return false on success. 8148 /// TypeStr is a string containing the type descriptor of the value returned by 8149 /// the builtin and the descriptors of the expected type of the arguments. 8150 bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID, 8151 const char *TypeStr) { 8152 8153 assert((TypeStr[0] != '\0') && 8154 "Invalid types in PPC MMA builtin declaration"); 8155 8156 switch (BuiltinID) { 8157 default: 8158 // This function is called in CheckPPCBuiltinFunctionCall where the 8159 // BuiltinID is guaranteed to be an MMA or pair vector memop builtin, here 8160 // we are isolating the pair vector memop builtins that can be used with mma 8161 // off so the default case is every builtin that requires mma and paired 8162 // vector memops. 8163 if (SemaFeatureCheck(*this, TheCall, "paired-vector-memops", 8164 diag::err_ppc_builtin_only_on_arch, "10") || 8165 SemaFeatureCheck(*this, TheCall, "mma", 8166 diag::err_ppc_builtin_only_on_arch, "10")) 8167 return true; 8168 break; 8169 case PPC::BI__builtin_vsx_lxvp: 8170 case PPC::BI__builtin_vsx_stxvp: 8171 case PPC::BI__builtin_vsx_assemble_pair: 8172 case PPC::BI__builtin_vsx_disassemble_pair: 8173 if (SemaFeatureCheck(*this, TheCall, "paired-vector-memops", 8174 diag::err_ppc_builtin_only_on_arch, "10")) 8175 return true; 8176 break; 8177 } 8178 8179 unsigned Mask = 0; 8180 unsigned ArgNum = 0; 8181 8182 // The first type in TypeStr is the type of the value returned by the 8183 // builtin. So we first read that type and change the type of TheCall. 8184 QualType type = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 8185 TheCall->setType(type); 8186 8187 while (*TypeStr != '\0') { 8188 Mask = 0; 8189 QualType ExpectedType = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 8190 if (ArgNum >= TheCall->getNumArgs()) { 8191 ArgNum++; 8192 break; 8193 } 8194 8195 Expr *Arg = TheCall->getArg(ArgNum); 8196 QualType PassedType = Arg->getType(); 8197 QualType StrippedRVType = PassedType.getCanonicalType(); 8198 8199 // Strip Restrict/Volatile qualifiers. 8200 if (StrippedRVType.isRestrictQualified() || 8201 StrippedRVType.isVolatileQualified()) 8202 StrippedRVType = StrippedRVType.getCanonicalType().getUnqualifiedType(); 8203 8204 // The only case where the argument type and expected type are allowed to 8205 // mismatch is if the argument type is a non-void pointer (or array) and 8206 // expected type is a void pointer. 8207 if (StrippedRVType != ExpectedType) 8208 if (!(ExpectedType->isVoidPointerType() && 8209 (StrippedRVType->isPointerType() || StrippedRVType->isArrayType()))) 8210 return Diag(Arg->getBeginLoc(), 8211 diag::err_typecheck_convert_incompatible) 8212 << PassedType << ExpectedType << 1 << 0 << 0; 8213 8214 // If the value of the Mask is not 0, we have a constraint in the size of 8215 // the integer argument so here we ensure the argument is a constant that 8216 // is in the valid range. 8217 if (Mask != 0 && 8218 SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true)) 8219 return true; 8220 8221 ArgNum++; 8222 } 8223 8224 // In case we exited early from the previous loop, there are other types to 8225 // read from TypeStr. So we need to read them all to ensure we have the right 8226 // number of arguments in TheCall and if it is not the case, to display a 8227 // better error message. 8228 while (*TypeStr != '\0') { 8229 (void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 8230 ArgNum++; 8231 } 8232 if (checkArgCount(*this, TheCall, ArgNum)) 8233 return true; 8234 8235 return false; 8236 } 8237 8238 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 8239 /// This checks that the target supports __builtin_longjmp and 8240 /// that val is a constant 1. 8241 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 8242 if (!Context.getTargetInfo().hasSjLjLowering()) 8243 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 8244 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 8245 8246 Expr *Arg = TheCall->getArg(1); 8247 llvm::APSInt Result; 8248 8249 // TODO: This is less than ideal. Overload this to take a value. 8250 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 8251 return true; 8252 8253 if (Result != 1) 8254 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 8255 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 8256 8257 return false; 8258 } 8259 8260 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 8261 /// This checks that the target supports __builtin_setjmp. 8262 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 8263 if (!Context.getTargetInfo().hasSjLjLowering()) 8264 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 8265 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 8266 return false; 8267 } 8268 8269 namespace { 8270 8271 class UncoveredArgHandler { 8272 enum { Unknown = -1, AllCovered = -2 }; 8273 8274 signed FirstUncoveredArg = Unknown; 8275 SmallVector<const Expr *, 4> DiagnosticExprs; 8276 8277 public: 8278 UncoveredArgHandler() = default; 8279 8280 bool hasUncoveredArg() const { 8281 return (FirstUncoveredArg >= 0); 8282 } 8283 8284 unsigned getUncoveredArg() const { 8285 assert(hasUncoveredArg() && "no uncovered argument"); 8286 return FirstUncoveredArg; 8287 } 8288 8289 void setAllCovered() { 8290 // A string has been found with all arguments covered, so clear out 8291 // the diagnostics. 8292 DiagnosticExprs.clear(); 8293 FirstUncoveredArg = AllCovered; 8294 } 8295 8296 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 8297 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 8298 8299 // Don't update if a previous string covers all arguments. 8300 if (FirstUncoveredArg == AllCovered) 8301 return; 8302 8303 // UncoveredArgHandler tracks the highest uncovered argument index 8304 // and with it all the strings that match this index. 8305 if (NewFirstUncoveredArg == FirstUncoveredArg) 8306 DiagnosticExprs.push_back(StrExpr); 8307 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 8308 DiagnosticExprs.clear(); 8309 DiagnosticExprs.push_back(StrExpr); 8310 FirstUncoveredArg = NewFirstUncoveredArg; 8311 } 8312 } 8313 8314 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 8315 }; 8316 8317 enum StringLiteralCheckType { 8318 SLCT_NotALiteral, 8319 SLCT_UncheckedLiteral, 8320 SLCT_CheckedLiteral 8321 }; 8322 8323 } // namespace 8324 8325 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 8326 BinaryOperatorKind BinOpKind, 8327 bool AddendIsRight) { 8328 unsigned BitWidth = Offset.getBitWidth(); 8329 unsigned AddendBitWidth = Addend.getBitWidth(); 8330 // There might be negative interim results. 8331 if (Addend.isUnsigned()) { 8332 Addend = Addend.zext(++AddendBitWidth); 8333 Addend.setIsSigned(true); 8334 } 8335 // Adjust the bit width of the APSInts. 8336 if (AddendBitWidth > BitWidth) { 8337 Offset = Offset.sext(AddendBitWidth); 8338 BitWidth = AddendBitWidth; 8339 } else if (BitWidth > AddendBitWidth) { 8340 Addend = Addend.sext(BitWidth); 8341 } 8342 8343 bool Ov = false; 8344 llvm::APSInt ResOffset = Offset; 8345 if (BinOpKind == BO_Add) 8346 ResOffset = Offset.sadd_ov(Addend, Ov); 8347 else { 8348 assert(AddendIsRight && BinOpKind == BO_Sub && 8349 "operator must be add or sub with addend on the right"); 8350 ResOffset = Offset.ssub_ov(Addend, Ov); 8351 } 8352 8353 // We add an offset to a pointer here so we should support an offset as big as 8354 // possible. 8355 if (Ov) { 8356 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 8357 "index (intermediate) result too big"); 8358 Offset = Offset.sext(2 * BitWidth); 8359 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 8360 return; 8361 } 8362 8363 Offset = ResOffset; 8364 } 8365 8366 namespace { 8367 8368 // This is a wrapper class around StringLiteral to support offsetted string 8369 // literals as format strings. It takes the offset into account when returning 8370 // the string and its length or the source locations to display notes correctly. 8371 class FormatStringLiteral { 8372 const StringLiteral *FExpr; 8373 int64_t Offset; 8374 8375 public: 8376 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 8377 : FExpr(fexpr), Offset(Offset) {} 8378 8379 StringRef getString() const { 8380 return FExpr->getString().drop_front(Offset); 8381 } 8382 8383 unsigned getByteLength() const { 8384 return FExpr->getByteLength() - getCharByteWidth() * Offset; 8385 } 8386 8387 unsigned getLength() const { return FExpr->getLength() - Offset; } 8388 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 8389 8390 StringLiteral::StringKind getKind() const { return FExpr->getKind(); } 8391 8392 QualType getType() const { return FExpr->getType(); } 8393 8394 bool isAscii() const { return FExpr->isAscii(); } 8395 bool isWide() const { return FExpr->isWide(); } 8396 bool isUTF8() const { return FExpr->isUTF8(); } 8397 bool isUTF16() const { return FExpr->isUTF16(); } 8398 bool isUTF32() const { return FExpr->isUTF32(); } 8399 bool isPascal() const { return FExpr->isPascal(); } 8400 8401 SourceLocation getLocationOfByte( 8402 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 8403 const TargetInfo &Target, unsigned *StartToken = nullptr, 8404 unsigned *StartTokenByteOffset = nullptr) const { 8405 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 8406 StartToken, StartTokenByteOffset); 8407 } 8408 8409 SourceLocation getBeginLoc() const LLVM_READONLY { 8410 return FExpr->getBeginLoc().getLocWithOffset(Offset); 8411 } 8412 8413 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 8414 }; 8415 8416 } // namespace 8417 8418 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 8419 const Expr *OrigFormatExpr, 8420 ArrayRef<const Expr *> Args, 8421 bool HasVAListArg, unsigned format_idx, 8422 unsigned firstDataArg, 8423 Sema::FormatStringType Type, 8424 bool inFunctionCall, 8425 Sema::VariadicCallType CallType, 8426 llvm::SmallBitVector &CheckedVarArgs, 8427 UncoveredArgHandler &UncoveredArg, 8428 bool IgnoreStringsWithoutSpecifiers); 8429 8430 // Determine if an expression is a string literal or constant string. 8431 // If this function returns false on the arguments to a function expecting a 8432 // format string, we will usually need to emit a warning. 8433 // True string literals are then checked by CheckFormatString. 8434 static StringLiteralCheckType 8435 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 8436 bool HasVAListArg, unsigned format_idx, 8437 unsigned firstDataArg, Sema::FormatStringType Type, 8438 Sema::VariadicCallType CallType, bool InFunctionCall, 8439 llvm::SmallBitVector &CheckedVarArgs, 8440 UncoveredArgHandler &UncoveredArg, 8441 llvm::APSInt Offset, 8442 bool IgnoreStringsWithoutSpecifiers = false) { 8443 if (S.isConstantEvaluated()) 8444 return SLCT_NotALiteral; 8445 tryAgain: 8446 assert(Offset.isSigned() && "invalid offset"); 8447 8448 if (E->isTypeDependent() || E->isValueDependent()) 8449 return SLCT_NotALiteral; 8450 8451 E = E->IgnoreParenCasts(); 8452 8453 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 8454 // Technically -Wformat-nonliteral does not warn about this case. 8455 // The behavior of printf and friends in this case is implementation 8456 // dependent. Ideally if the format string cannot be null then 8457 // it should have a 'nonnull' attribute in the function prototype. 8458 return SLCT_UncheckedLiteral; 8459 8460 switch (E->getStmtClass()) { 8461 case Stmt::BinaryConditionalOperatorClass: 8462 case Stmt::ConditionalOperatorClass: { 8463 // The expression is a literal if both sub-expressions were, and it was 8464 // completely checked only if both sub-expressions were checked. 8465 const AbstractConditionalOperator *C = 8466 cast<AbstractConditionalOperator>(E); 8467 8468 // Determine whether it is necessary to check both sub-expressions, for 8469 // example, because the condition expression is a constant that can be 8470 // evaluated at compile time. 8471 bool CheckLeft = true, CheckRight = true; 8472 8473 bool Cond; 8474 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(), 8475 S.isConstantEvaluated())) { 8476 if (Cond) 8477 CheckRight = false; 8478 else 8479 CheckLeft = false; 8480 } 8481 8482 // We need to maintain the offsets for the right and the left hand side 8483 // separately to check if every possible indexed expression is a valid 8484 // string literal. They might have different offsets for different string 8485 // literals in the end. 8486 StringLiteralCheckType Left; 8487 if (!CheckLeft) 8488 Left = SLCT_UncheckedLiteral; 8489 else { 8490 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, 8491 HasVAListArg, format_idx, firstDataArg, 8492 Type, CallType, InFunctionCall, 8493 CheckedVarArgs, UncoveredArg, Offset, 8494 IgnoreStringsWithoutSpecifiers); 8495 if (Left == SLCT_NotALiteral || !CheckRight) { 8496 return Left; 8497 } 8498 } 8499 8500 StringLiteralCheckType Right = checkFormatStringExpr( 8501 S, C->getFalseExpr(), Args, HasVAListArg, format_idx, firstDataArg, 8502 Type, CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8503 IgnoreStringsWithoutSpecifiers); 8504 8505 return (CheckLeft && Left < Right) ? Left : Right; 8506 } 8507 8508 case Stmt::ImplicitCastExprClass: 8509 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 8510 goto tryAgain; 8511 8512 case Stmt::OpaqueValueExprClass: 8513 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 8514 E = src; 8515 goto tryAgain; 8516 } 8517 return SLCT_NotALiteral; 8518 8519 case Stmt::PredefinedExprClass: 8520 // While __func__, etc., are technically not string literals, they 8521 // cannot contain format specifiers and thus are not a security 8522 // liability. 8523 return SLCT_UncheckedLiteral; 8524 8525 case Stmt::DeclRefExprClass: { 8526 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 8527 8528 // As an exception, do not flag errors for variables binding to 8529 // const string literals. 8530 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 8531 bool isConstant = false; 8532 QualType T = DR->getType(); 8533 8534 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 8535 isConstant = AT->getElementType().isConstant(S.Context); 8536 } else if (const PointerType *PT = T->getAs<PointerType>()) { 8537 isConstant = T.isConstant(S.Context) && 8538 PT->getPointeeType().isConstant(S.Context); 8539 } else if (T->isObjCObjectPointerType()) { 8540 // In ObjC, there is usually no "const ObjectPointer" type, 8541 // so don't check if the pointee type is constant. 8542 isConstant = T.isConstant(S.Context); 8543 } 8544 8545 if (isConstant) { 8546 if (const Expr *Init = VD->getAnyInitializer()) { 8547 // Look through initializers like const char c[] = { "foo" } 8548 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 8549 if (InitList->isStringLiteralInit()) 8550 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 8551 } 8552 return checkFormatStringExpr(S, Init, Args, 8553 HasVAListArg, format_idx, 8554 firstDataArg, Type, CallType, 8555 /*InFunctionCall*/ false, CheckedVarArgs, 8556 UncoveredArg, Offset); 8557 } 8558 } 8559 8560 // For vprintf* functions (i.e., HasVAListArg==true), we add a 8561 // special check to see if the format string is a function parameter 8562 // of the function calling the printf function. If the function 8563 // has an attribute indicating it is a printf-like function, then we 8564 // should suppress warnings concerning non-literals being used in a call 8565 // to a vprintf function. For example: 8566 // 8567 // void 8568 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){ 8569 // va_list ap; 8570 // va_start(ap, fmt); 8571 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt". 8572 // ... 8573 // } 8574 if (HasVAListArg) { 8575 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) { 8576 if (const Decl *D = dyn_cast<Decl>(PV->getDeclContext())) { 8577 int PVIndex = PV->getFunctionScopeIndex() + 1; 8578 for (const auto *PVFormat : D->specific_attrs<FormatAttr>()) { 8579 // adjust for implicit parameter 8580 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) 8581 if (MD->isInstance()) 8582 ++PVIndex; 8583 // We also check if the formats are compatible. 8584 // We can't pass a 'scanf' string to a 'printf' function. 8585 if (PVIndex == PVFormat->getFormatIdx() && 8586 Type == S.GetFormatStringType(PVFormat)) 8587 return SLCT_UncheckedLiteral; 8588 } 8589 } 8590 } 8591 } 8592 } 8593 8594 return SLCT_NotALiteral; 8595 } 8596 8597 case Stmt::CallExprClass: 8598 case Stmt::CXXMemberCallExprClass: { 8599 const CallExpr *CE = cast<CallExpr>(E); 8600 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 8601 bool IsFirst = true; 8602 StringLiteralCheckType CommonResult; 8603 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 8604 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 8605 StringLiteralCheckType Result = checkFormatStringExpr( 8606 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 8607 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8608 IgnoreStringsWithoutSpecifiers); 8609 if (IsFirst) { 8610 CommonResult = Result; 8611 IsFirst = false; 8612 } 8613 } 8614 if (!IsFirst) 8615 return CommonResult; 8616 8617 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 8618 unsigned BuiltinID = FD->getBuiltinID(); 8619 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 8620 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 8621 const Expr *Arg = CE->getArg(0); 8622 return checkFormatStringExpr(S, Arg, Args, 8623 HasVAListArg, format_idx, 8624 firstDataArg, Type, CallType, 8625 InFunctionCall, CheckedVarArgs, 8626 UncoveredArg, Offset, 8627 IgnoreStringsWithoutSpecifiers); 8628 } 8629 } 8630 } 8631 8632 return SLCT_NotALiteral; 8633 } 8634 case Stmt::ObjCMessageExprClass: { 8635 const auto *ME = cast<ObjCMessageExpr>(E); 8636 if (const auto *MD = ME->getMethodDecl()) { 8637 if (const auto *FA = MD->getAttr<FormatArgAttr>()) { 8638 // As a special case heuristic, if we're using the method -[NSBundle 8639 // localizedStringForKey:value:table:], ignore any key strings that lack 8640 // format specifiers. The idea is that if the key doesn't have any 8641 // format specifiers then its probably just a key to map to the 8642 // localized strings. If it does have format specifiers though, then its 8643 // likely that the text of the key is the format string in the 8644 // programmer's language, and should be checked. 8645 const ObjCInterfaceDecl *IFace; 8646 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) && 8647 IFace->getIdentifier()->isStr("NSBundle") && 8648 MD->getSelector().isKeywordSelector( 8649 {"localizedStringForKey", "value", "table"})) { 8650 IgnoreStringsWithoutSpecifiers = true; 8651 } 8652 8653 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 8654 return checkFormatStringExpr( 8655 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 8656 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8657 IgnoreStringsWithoutSpecifiers); 8658 } 8659 } 8660 8661 return SLCT_NotALiteral; 8662 } 8663 case Stmt::ObjCStringLiteralClass: 8664 case Stmt::StringLiteralClass: { 8665 const StringLiteral *StrE = nullptr; 8666 8667 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 8668 StrE = ObjCFExpr->getString(); 8669 else 8670 StrE = cast<StringLiteral>(E); 8671 8672 if (StrE) { 8673 if (Offset.isNegative() || Offset > StrE->getLength()) { 8674 // TODO: It would be better to have an explicit warning for out of 8675 // bounds literals. 8676 return SLCT_NotALiteral; 8677 } 8678 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 8679 CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx, 8680 firstDataArg, Type, InFunctionCall, CallType, 8681 CheckedVarArgs, UncoveredArg, 8682 IgnoreStringsWithoutSpecifiers); 8683 return SLCT_CheckedLiteral; 8684 } 8685 8686 return SLCT_NotALiteral; 8687 } 8688 case Stmt::BinaryOperatorClass: { 8689 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 8690 8691 // A string literal + an int offset is still a string literal. 8692 if (BinOp->isAdditiveOp()) { 8693 Expr::EvalResult LResult, RResult; 8694 8695 bool LIsInt = BinOp->getLHS()->EvaluateAsInt( 8696 LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 8697 bool RIsInt = BinOp->getRHS()->EvaluateAsInt( 8698 RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 8699 8700 if (LIsInt != RIsInt) { 8701 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 8702 8703 if (LIsInt) { 8704 if (BinOpKind == BO_Add) { 8705 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt); 8706 E = BinOp->getRHS(); 8707 goto tryAgain; 8708 } 8709 } else { 8710 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt); 8711 E = BinOp->getLHS(); 8712 goto tryAgain; 8713 } 8714 } 8715 } 8716 8717 return SLCT_NotALiteral; 8718 } 8719 case Stmt::UnaryOperatorClass: { 8720 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 8721 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 8722 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 8723 Expr::EvalResult IndexResult; 8724 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context, 8725 Expr::SE_NoSideEffects, 8726 S.isConstantEvaluated())) { 8727 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add, 8728 /*RHS is int*/ true); 8729 E = ASE->getBase(); 8730 goto tryAgain; 8731 } 8732 } 8733 8734 return SLCT_NotALiteral; 8735 } 8736 8737 default: 8738 return SLCT_NotALiteral; 8739 } 8740 } 8741 8742 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 8743 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 8744 .Case("scanf", FST_Scanf) 8745 .Cases("printf", "printf0", FST_Printf) 8746 .Cases("NSString", "CFString", FST_NSString) 8747 .Case("strftime", FST_Strftime) 8748 .Case("strfmon", FST_Strfmon) 8749 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 8750 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 8751 .Case("os_trace", FST_OSLog) 8752 .Case("os_log", FST_OSLog) 8753 .Default(FST_Unknown); 8754 } 8755 8756 /// CheckFormatArguments - Check calls to printf and scanf (and similar 8757 /// functions) for correct use of format strings. 8758 /// Returns true if a format string has been fully checked. 8759 bool Sema::CheckFormatArguments(const FormatAttr *Format, 8760 ArrayRef<const Expr *> Args, 8761 bool IsCXXMember, 8762 VariadicCallType CallType, 8763 SourceLocation Loc, SourceRange Range, 8764 llvm::SmallBitVector &CheckedVarArgs) { 8765 FormatStringInfo FSI; 8766 if (getFormatStringInfo(Format, IsCXXMember, &FSI)) 8767 return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx, 8768 FSI.FirstDataArg, GetFormatStringType(Format), 8769 CallType, Loc, Range, CheckedVarArgs); 8770 return false; 8771 } 8772 8773 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 8774 bool HasVAListArg, unsigned format_idx, 8775 unsigned firstDataArg, FormatStringType Type, 8776 VariadicCallType CallType, 8777 SourceLocation Loc, SourceRange Range, 8778 llvm::SmallBitVector &CheckedVarArgs) { 8779 // CHECK: printf/scanf-like function is called with no format string. 8780 if (format_idx >= Args.size()) { 8781 Diag(Loc, diag::warn_missing_format_string) << Range; 8782 return false; 8783 } 8784 8785 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 8786 8787 // CHECK: format string is not a string literal. 8788 // 8789 // Dynamically generated format strings are difficult to 8790 // automatically vet at compile time. Requiring that format strings 8791 // are string literals: (1) permits the checking of format strings by 8792 // the compiler and thereby (2) can practically remove the source of 8793 // many format string exploits. 8794 8795 // Format string can be either ObjC string (e.g. @"%d") or 8796 // C string (e.g. "%d") 8797 // ObjC string uses the same format specifiers as C string, so we can use 8798 // the same format string checking logic for both ObjC and C strings. 8799 UncoveredArgHandler UncoveredArg; 8800 StringLiteralCheckType CT = 8801 checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg, 8802 format_idx, firstDataArg, Type, CallType, 8803 /*IsFunctionCall*/ true, CheckedVarArgs, 8804 UncoveredArg, 8805 /*no string offset*/ llvm::APSInt(64, false) = 0); 8806 8807 // Generate a diagnostic where an uncovered argument is detected. 8808 if (UncoveredArg.hasUncoveredArg()) { 8809 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 8810 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 8811 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 8812 } 8813 8814 if (CT != SLCT_NotALiteral) 8815 // Literal format string found, check done! 8816 return CT == SLCT_CheckedLiteral; 8817 8818 // Strftime is particular as it always uses a single 'time' argument, 8819 // so it is safe to pass a non-literal string. 8820 if (Type == FST_Strftime) 8821 return false; 8822 8823 // Do not emit diag when the string param is a macro expansion and the 8824 // format is either NSString or CFString. This is a hack to prevent 8825 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 8826 // which are usually used in place of NS and CF string literals. 8827 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 8828 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 8829 return false; 8830 8831 // If there are no arguments specified, warn with -Wformat-security, otherwise 8832 // warn only with -Wformat-nonliteral. 8833 if (Args.size() == firstDataArg) { 8834 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 8835 << OrigFormatExpr->getSourceRange(); 8836 switch (Type) { 8837 default: 8838 break; 8839 case FST_Kprintf: 8840 case FST_FreeBSDKPrintf: 8841 case FST_Printf: 8842 Diag(FormatLoc, diag::note_format_security_fixit) 8843 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 8844 break; 8845 case FST_NSString: 8846 Diag(FormatLoc, diag::note_format_security_fixit) 8847 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 8848 break; 8849 } 8850 } else { 8851 Diag(FormatLoc, diag::warn_format_nonliteral) 8852 << OrigFormatExpr->getSourceRange(); 8853 } 8854 return false; 8855 } 8856 8857 namespace { 8858 8859 class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 8860 protected: 8861 Sema &S; 8862 const FormatStringLiteral *FExpr; 8863 const Expr *OrigFormatExpr; 8864 const Sema::FormatStringType FSType; 8865 const unsigned FirstDataArg; 8866 const unsigned NumDataArgs; 8867 const char *Beg; // Start of format string. 8868 const bool HasVAListArg; 8869 ArrayRef<const Expr *> Args; 8870 unsigned FormatIdx; 8871 llvm::SmallBitVector CoveredArgs; 8872 bool usesPositionalArgs = false; 8873 bool atFirstArg = true; 8874 bool inFunctionCall; 8875 Sema::VariadicCallType CallType; 8876 llvm::SmallBitVector &CheckedVarArgs; 8877 UncoveredArgHandler &UncoveredArg; 8878 8879 public: 8880 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 8881 const Expr *origFormatExpr, 8882 const Sema::FormatStringType type, unsigned firstDataArg, 8883 unsigned numDataArgs, const char *beg, bool hasVAListArg, 8884 ArrayRef<const Expr *> Args, unsigned formatIdx, 8885 bool inFunctionCall, Sema::VariadicCallType callType, 8886 llvm::SmallBitVector &CheckedVarArgs, 8887 UncoveredArgHandler &UncoveredArg) 8888 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 8889 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 8890 HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx), 8891 inFunctionCall(inFunctionCall), CallType(callType), 8892 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 8893 CoveredArgs.resize(numDataArgs); 8894 CoveredArgs.reset(); 8895 } 8896 8897 void DoneProcessing(); 8898 8899 void HandleIncompleteSpecifier(const char *startSpecifier, 8900 unsigned specifierLen) override; 8901 8902 void HandleInvalidLengthModifier( 8903 const analyze_format_string::FormatSpecifier &FS, 8904 const analyze_format_string::ConversionSpecifier &CS, 8905 const char *startSpecifier, unsigned specifierLen, 8906 unsigned DiagID); 8907 8908 void HandleNonStandardLengthModifier( 8909 const analyze_format_string::FormatSpecifier &FS, 8910 const char *startSpecifier, unsigned specifierLen); 8911 8912 void HandleNonStandardConversionSpecifier( 8913 const analyze_format_string::ConversionSpecifier &CS, 8914 const char *startSpecifier, unsigned specifierLen); 8915 8916 void HandlePosition(const char *startPos, unsigned posLen) override; 8917 8918 void HandleInvalidPosition(const char *startSpecifier, 8919 unsigned specifierLen, 8920 analyze_format_string::PositionContext p) override; 8921 8922 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 8923 8924 void HandleNullChar(const char *nullCharacter) override; 8925 8926 template <typename Range> 8927 static void 8928 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 8929 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 8930 bool IsStringLocation, Range StringRange, 8931 ArrayRef<FixItHint> Fixit = None); 8932 8933 protected: 8934 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 8935 const char *startSpec, 8936 unsigned specifierLen, 8937 const char *csStart, unsigned csLen); 8938 8939 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 8940 const char *startSpec, 8941 unsigned specifierLen); 8942 8943 SourceRange getFormatStringRange(); 8944 CharSourceRange getSpecifierRange(const char *startSpecifier, 8945 unsigned specifierLen); 8946 SourceLocation getLocationOfByte(const char *x); 8947 8948 const Expr *getDataArg(unsigned i) const; 8949 8950 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 8951 const analyze_format_string::ConversionSpecifier &CS, 8952 const char *startSpecifier, unsigned specifierLen, 8953 unsigned argIndex); 8954 8955 template <typename Range> 8956 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 8957 bool IsStringLocation, Range StringRange, 8958 ArrayRef<FixItHint> Fixit = None); 8959 }; 8960 8961 } // namespace 8962 8963 SourceRange CheckFormatHandler::getFormatStringRange() { 8964 return OrigFormatExpr->getSourceRange(); 8965 } 8966 8967 CharSourceRange CheckFormatHandler:: 8968 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 8969 SourceLocation Start = getLocationOfByte(startSpecifier); 8970 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 8971 8972 // Advance the end SourceLocation by one due to half-open ranges. 8973 End = End.getLocWithOffset(1); 8974 8975 return CharSourceRange::getCharRange(Start, End); 8976 } 8977 8978 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 8979 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 8980 S.getLangOpts(), S.Context.getTargetInfo()); 8981 } 8982 8983 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 8984 unsigned specifierLen){ 8985 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 8986 getLocationOfByte(startSpecifier), 8987 /*IsStringLocation*/true, 8988 getSpecifierRange(startSpecifier, specifierLen)); 8989 } 8990 8991 void CheckFormatHandler::HandleInvalidLengthModifier( 8992 const analyze_format_string::FormatSpecifier &FS, 8993 const analyze_format_string::ConversionSpecifier &CS, 8994 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 8995 using namespace analyze_format_string; 8996 8997 const LengthModifier &LM = FS.getLengthModifier(); 8998 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 8999 9000 // See if we know how to fix this length modifier. 9001 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 9002 if (FixedLM) { 9003 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 9004 getLocationOfByte(LM.getStart()), 9005 /*IsStringLocation*/true, 9006 getSpecifierRange(startSpecifier, specifierLen)); 9007 9008 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 9009 << FixedLM->toString() 9010 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 9011 9012 } else { 9013 FixItHint Hint; 9014 if (DiagID == diag::warn_format_nonsensical_length) 9015 Hint = FixItHint::CreateRemoval(LMRange); 9016 9017 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 9018 getLocationOfByte(LM.getStart()), 9019 /*IsStringLocation*/true, 9020 getSpecifierRange(startSpecifier, specifierLen), 9021 Hint); 9022 } 9023 } 9024 9025 void CheckFormatHandler::HandleNonStandardLengthModifier( 9026 const analyze_format_string::FormatSpecifier &FS, 9027 const char *startSpecifier, unsigned specifierLen) { 9028 using namespace analyze_format_string; 9029 9030 const LengthModifier &LM = FS.getLengthModifier(); 9031 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 9032 9033 // See if we know how to fix this length modifier. 9034 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 9035 if (FixedLM) { 9036 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9037 << LM.toString() << 0, 9038 getLocationOfByte(LM.getStart()), 9039 /*IsStringLocation*/true, 9040 getSpecifierRange(startSpecifier, specifierLen)); 9041 9042 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 9043 << FixedLM->toString() 9044 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 9045 9046 } else { 9047 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9048 << LM.toString() << 0, 9049 getLocationOfByte(LM.getStart()), 9050 /*IsStringLocation*/true, 9051 getSpecifierRange(startSpecifier, specifierLen)); 9052 } 9053 } 9054 9055 void CheckFormatHandler::HandleNonStandardConversionSpecifier( 9056 const analyze_format_string::ConversionSpecifier &CS, 9057 const char *startSpecifier, unsigned specifierLen) { 9058 using namespace analyze_format_string; 9059 9060 // See if we know how to fix this conversion specifier. 9061 Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 9062 if (FixedCS) { 9063 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9064 << CS.toString() << /*conversion specifier*/1, 9065 getLocationOfByte(CS.getStart()), 9066 /*IsStringLocation*/true, 9067 getSpecifierRange(startSpecifier, specifierLen)); 9068 9069 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 9070 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 9071 << FixedCS->toString() 9072 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 9073 } else { 9074 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9075 << CS.toString() << /*conversion specifier*/1, 9076 getLocationOfByte(CS.getStart()), 9077 /*IsStringLocation*/true, 9078 getSpecifierRange(startSpecifier, specifierLen)); 9079 } 9080 } 9081 9082 void CheckFormatHandler::HandlePosition(const char *startPos, 9083 unsigned posLen) { 9084 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 9085 getLocationOfByte(startPos), 9086 /*IsStringLocation*/true, 9087 getSpecifierRange(startPos, posLen)); 9088 } 9089 9090 void 9091 CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, 9092 analyze_format_string::PositionContext p) { 9093 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) 9094 << (unsigned) p, 9095 getLocationOfByte(startPos), /*IsStringLocation*/true, 9096 getSpecifierRange(startPos, posLen)); 9097 } 9098 9099 void CheckFormatHandler::HandleZeroPosition(const char *startPos, 9100 unsigned posLen) { 9101 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 9102 getLocationOfByte(startPos), 9103 /*IsStringLocation*/true, 9104 getSpecifierRange(startPos, posLen)); 9105 } 9106 9107 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 9108 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 9109 // The presence of a null character is likely an error. 9110 EmitFormatDiagnostic( 9111 S.PDiag(diag::warn_printf_format_string_contains_null_char), 9112 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 9113 getFormatStringRange()); 9114 } 9115 } 9116 9117 // Note that this may return NULL if there was an error parsing or building 9118 // one of the argument expressions. 9119 const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 9120 return Args[FirstDataArg + i]; 9121 } 9122 9123 void CheckFormatHandler::DoneProcessing() { 9124 // Does the number of data arguments exceed the number of 9125 // format conversions in the format string? 9126 if (!HasVAListArg) { 9127 // Find any arguments that weren't covered. 9128 CoveredArgs.flip(); 9129 signed notCoveredArg = CoveredArgs.find_first(); 9130 if (notCoveredArg >= 0) { 9131 assert((unsigned)notCoveredArg < NumDataArgs); 9132 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 9133 } else { 9134 UncoveredArg.setAllCovered(); 9135 } 9136 } 9137 } 9138 9139 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 9140 const Expr *ArgExpr) { 9141 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 && 9142 "Invalid state"); 9143 9144 if (!ArgExpr) 9145 return; 9146 9147 SourceLocation Loc = ArgExpr->getBeginLoc(); 9148 9149 if (S.getSourceManager().isInSystemMacro(Loc)) 9150 return; 9151 9152 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 9153 for (auto E : DiagnosticExprs) 9154 PDiag << E->getSourceRange(); 9155 9156 CheckFormatHandler::EmitFormatDiagnostic( 9157 S, IsFunctionCall, DiagnosticExprs[0], 9158 PDiag, Loc, /*IsStringLocation*/false, 9159 DiagnosticExprs[0]->getSourceRange()); 9160 } 9161 9162 bool 9163 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 9164 SourceLocation Loc, 9165 const char *startSpec, 9166 unsigned specifierLen, 9167 const char *csStart, 9168 unsigned csLen) { 9169 bool keepGoing = true; 9170 if (argIndex < NumDataArgs) { 9171 // Consider the argument coverered, even though the specifier doesn't 9172 // make sense. 9173 CoveredArgs.set(argIndex); 9174 } 9175 else { 9176 // If argIndex exceeds the number of data arguments we 9177 // don't issue a warning because that is just a cascade of warnings (and 9178 // they may have intended '%%' anyway). We don't want to continue processing 9179 // the format string after this point, however, as we will like just get 9180 // gibberish when trying to match arguments. 9181 keepGoing = false; 9182 } 9183 9184 StringRef Specifier(csStart, csLen); 9185 9186 // If the specifier in non-printable, it could be the first byte of a UTF-8 9187 // sequence. In that case, print the UTF-8 code point. If not, print the byte 9188 // hex value. 9189 std::string CodePointStr; 9190 if (!llvm::sys::locale::isPrint(*csStart)) { 9191 llvm::UTF32 CodePoint; 9192 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 9193 const llvm::UTF8 *E = 9194 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 9195 llvm::ConversionResult Result = 9196 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 9197 9198 if (Result != llvm::conversionOK) { 9199 unsigned char FirstChar = *csStart; 9200 CodePoint = (llvm::UTF32)FirstChar; 9201 } 9202 9203 llvm::raw_string_ostream OS(CodePointStr); 9204 if (CodePoint < 256) 9205 OS << "\\x" << llvm::format("%02x", CodePoint); 9206 else if (CodePoint <= 0xFFFF) 9207 OS << "\\u" << llvm::format("%04x", CodePoint); 9208 else 9209 OS << "\\U" << llvm::format("%08x", CodePoint); 9210 OS.flush(); 9211 Specifier = CodePointStr; 9212 } 9213 9214 EmitFormatDiagnostic( 9215 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 9216 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 9217 9218 return keepGoing; 9219 } 9220 9221 void 9222 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 9223 const char *startSpec, 9224 unsigned specifierLen) { 9225 EmitFormatDiagnostic( 9226 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 9227 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 9228 } 9229 9230 bool 9231 CheckFormatHandler::CheckNumArgs( 9232 const analyze_format_string::FormatSpecifier &FS, 9233 const analyze_format_string::ConversionSpecifier &CS, 9234 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 9235 9236 if (argIndex >= NumDataArgs) { 9237 PartialDiagnostic PDiag = FS.usesPositionalArg() 9238 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 9239 << (argIndex+1) << NumDataArgs) 9240 : S.PDiag(diag::warn_printf_insufficient_data_args); 9241 EmitFormatDiagnostic( 9242 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 9243 getSpecifierRange(startSpecifier, specifierLen)); 9244 9245 // Since more arguments than conversion tokens are given, by extension 9246 // all arguments are covered, so mark this as so. 9247 UncoveredArg.setAllCovered(); 9248 return false; 9249 } 9250 return true; 9251 } 9252 9253 template<typename Range> 9254 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 9255 SourceLocation Loc, 9256 bool IsStringLocation, 9257 Range StringRange, 9258 ArrayRef<FixItHint> FixIt) { 9259 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 9260 Loc, IsStringLocation, StringRange, FixIt); 9261 } 9262 9263 /// If the format string is not within the function call, emit a note 9264 /// so that the function call and string are in diagnostic messages. 9265 /// 9266 /// \param InFunctionCall if true, the format string is within the function 9267 /// call and only one diagnostic message will be produced. Otherwise, an 9268 /// extra note will be emitted pointing to location of the format string. 9269 /// 9270 /// \param ArgumentExpr the expression that is passed as the format string 9271 /// argument in the function call. Used for getting locations when two 9272 /// diagnostics are emitted. 9273 /// 9274 /// \param PDiag the callee should already have provided any strings for the 9275 /// diagnostic message. This function only adds locations and fixits 9276 /// to diagnostics. 9277 /// 9278 /// \param Loc primary location for diagnostic. If two diagnostics are 9279 /// required, one will be at Loc and a new SourceLocation will be created for 9280 /// the other one. 9281 /// 9282 /// \param IsStringLocation if true, Loc points to the format string should be 9283 /// used for the note. Otherwise, Loc points to the argument list and will 9284 /// be used with PDiag. 9285 /// 9286 /// \param StringRange some or all of the string to highlight. This is 9287 /// templated so it can accept either a CharSourceRange or a SourceRange. 9288 /// 9289 /// \param FixIt optional fix it hint for the format string. 9290 template <typename Range> 9291 void CheckFormatHandler::EmitFormatDiagnostic( 9292 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 9293 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 9294 Range StringRange, ArrayRef<FixItHint> FixIt) { 9295 if (InFunctionCall) { 9296 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 9297 D << StringRange; 9298 D << FixIt; 9299 } else { 9300 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 9301 << ArgumentExpr->getSourceRange(); 9302 9303 const Sema::SemaDiagnosticBuilder &Note = 9304 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 9305 diag::note_format_string_defined); 9306 9307 Note << StringRange; 9308 Note << FixIt; 9309 } 9310 } 9311 9312 //===--- CHECK: Printf format string checking ------------------------------===// 9313 9314 namespace { 9315 9316 class CheckPrintfHandler : public CheckFormatHandler { 9317 public: 9318 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 9319 const Expr *origFormatExpr, 9320 const Sema::FormatStringType type, unsigned firstDataArg, 9321 unsigned numDataArgs, bool isObjC, const char *beg, 9322 bool hasVAListArg, ArrayRef<const Expr *> Args, 9323 unsigned formatIdx, bool inFunctionCall, 9324 Sema::VariadicCallType CallType, 9325 llvm::SmallBitVector &CheckedVarArgs, 9326 UncoveredArgHandler &UncoveredArg) 9327 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 9328 numDataArgs, beg, hasVAListArg, Args, formatIdx, 9329 inFunctionCall, CallType, CheckedVarArgs, 9330 UncoveredArg) {} 9331 9332 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 9333 9334 /// Returns true if '%@' specifiers are allowed in the format string. 9335 bool allowsObjCArg() const { 9336 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 9337 FSType == Sema::FST_OSTrace; 9338 } 9339 9340 bool HandleInvalidPrintfConversionSpecifier( 9341 const analyze_printf::PrintfSpecifier &FS, 9342 const char *startSpecifier, 9343 unsigned specifierLen) override; 9344 9345 void handleInvalidMaskType(StringRef MaskType) override; 9346 9347 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 9348 const char *startSpecifier, unsigned specifierLen, 9349 const TargetInfo &Target) override; 9350 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 9351 const char *StartSpecifier, 9352 unsigned SpecifierLen, 9353 const Expr *E); 9354 9355 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 9356 const char *startSpecifier, unsigned specifierLen); 9357 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 9358 const analyze_printf::OptionalAmount &Amt, 9359 unsigned type, 9360 const char *startSpecifier, unsigned specifierLen); 9361 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 9362 const analyze_printf::OptionalFlag &flag, 9363 const char *startSpecifier, unsigned specifierLen); 9364 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 9365 const analyze_printf::OptionalFlag &ignoredFlag, 9366 const analyze_printf::OptionalFlag &flag, 9367 const char *startSpecifier, unsigned specifierLen); 9368 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 9369 const Expr *E); 9370 9371 void HandleEmptyObjCModifierFlag(const char *startFlag, 9372 unsigned flagLen) override; 9373 9374 void HandleInvalidObjCModifierFlag(const char *startFlag, 9375 unsigned flagLen) override; 9376 9377 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 9378 const char *flagsEnd, 9379 const char *conversionPosition) 9380 override; 9381 }; 9382 9383 } // namespace 9384 9385 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 9386 const analyze_printf::PrintfSpecifier &FS, 9387 const char *startSpecifier, 9388 unsigned specifierLen) { 9389 const analyze_printf::PrintfConversionSpecifier &CS = 9390 FS.getConversionSpecifier(); 9391 9392 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 9393 getLocationOfByte(CS.getStart()), 9394 startSpecifier, specifierLen, 9395 CS.getStart(), CS.getLength()); 9396 } 9397 9398 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 9399 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 9400 } 9401 9402 bool CheckPrintfHandler::HandleAmount( 9403 const analyze_format_string::OptionalAmount &Amt, 9404 unsigned k, const char *startSpecifier, 9405 unsigned specifierLen) { 9406 if (Amt.hasDataArgument()) { 9407 if (!HasVAListArg) { 9408 unsigned argIndex = Amt.getArgIndex(); 9409 if (argIndex >= NumDataArgs) { 9410 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 9411 << k, 9412 getLocationOfByte(Amt.getStart()), 9413 /*IsStringLocation*/true, 9414 getSpecifierRange(startSpecifier, specifierLen)); 9415 // Don't do any more checking. We will just emit 9416 // spurious errors. 9417 return false; 9418 } 9419 9420 // Type check the data argument. It should be an 'int'. 9421 // Although not in conformance with C99, we also allow the argument to be 9422 // an 'unsigned int' as that is a reasonably safe case. GCC also 9423 // doesn't emit a warning for that case. 9424 CoveredArgs.set(argIndex); 9425 const Expr *Arg = getDataArg(argIndex); 9426 if (!Arg) 9427 return false; 9428 9429 QualType T = Arg->getType(); 9430 9431 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 9432 assert(AT.isValid()); 9433 9434 if (!AT.matchesType(S.Context, T)) { 9435 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 9436 << k << AT.getRepresentativeTypeName(S.Context) 9437 << T << Arg->getSourceRange(), 9438 getLocationOfByte(Amt.getStart()), 9439 /*IsStringLocation*/true, 9440 getSpecifierRange(startSpecifier, specifierLen)); 9441 // Don't do any more checking. We will just emit 9442 // spurious errors. 9443 return false; 9444 } 9445 } 9446 } 9447 return true; 9448 } 9449 9450 void CheckPrintfHandler::HandleInvalidAmount( 9451 const analyze_printf::PrintfSpecifier &FS, 9452 const analyze_printf::OptionalAmount &Amt, 9453 unsigned type, 9454 const char *startSpecifier, 9455 unsigned specifierLen) { 9456 const analyze_printf::PrintfConversionSpecifier &CS = 9457 FS.getConversionSpecifier(); 9458 9459 FixItHint fixit = 9460 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 9461 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 9462 Amt.getConstantLength())) 9463 : FixItHint(); 9464 9465 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 9466 << type << CS.toString(), 9467 getLocationOfByte(Amt.getStart()), 9468 /*IsStringLocation*/true, 9469 getSpecifierRange(startSpecifier, specifierLen), 9470 fixit); 9471 } 9472 9473 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 9474 const analyze_printf::OptionalFlag &flag, 9475 const char *startSpecifier, 9476 unsigned specifierLen) { 9477 // Warn about pointless flag with a fixit removal. 9478 const analyze_printf::PrintfConversionSpecifier &CS = 9479 FS.getConversionSpecifier(); 9480 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 9481 << flag.toString() << CS.toString(), 9482 getLocationOfByte(flag.getPosition()), 9483 /*IsStringLocation*/true, 9484 getSpecifierRange(startSpecifier, specifierLen), 9485 FixItHint::CreateRemoval( 9486 getSpecifierRange(flag.getPosition(), 1))); 9487 } 9488 9489 void CheckPrintfHandler::HandleIgnoredFlag( 9490 const analyze_printf::PrintfSpecifier &FS, 9491 const analyze_printf::OptionalFlag &ignoredFlag, 9492 const analyze_printf::OptionalFlag &flag, 9493 const char *startSpecifier, 9494 unsigned specifierLen) { 9495 // Warn about ignored flag with a fixit removal. 9496 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 9497 << ignoredFlag.toString() << flag.toString(), 9498 getLocationOfByte(ignoredFlag.getPosition()), 9499 /*IsStringLocation*/true, 9500 getSpecifierRange(startSpecifier, specifierLen), 9501 FixItHint::CreateRemoval( 9502 getSpecifierRange(ignoredFlag.getPosition(), 1))); 9503 } 9504 9505 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 9506 unsigned flagLen) { 9507 // Warn about an empty flag. 9508 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 9509 getLocationOfByte(startFlag), 9510 /*IsStringLocation*/true, 9511 getSpecifierRange(startFlag, flagLen)); 9512 } 9513 9514 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 9515 unsigned flagLen) { 9516 // Warn about an invalid flag. 9517 auto Range = getSpecifierRange(startFlag, flagLen); 9518 StringRef flag(startFlag, flagLen); 9519 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 9520 getLocationOfByte(startFlag), 9521 /*IsStringLocation*/true, 9522 Range, FixItHint::CreateRemoval(Range)); 9523 } 9524 9525 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 9526 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 9527 // Warn about using '[...]' without a '@' conversion. 9528 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 9529 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 9530 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 9531 getLocationOfByte(conversionPosition), 9532 /*IsStringLocation*/true, 9533 Range, FixItHint::CreateRemoval(Range)); 9534 } 9535 9536 // Determines if the specified is a C++ class or struct containing 9537 // a member with the specified name and kind (e.g. a CXXMethodDecl named 9538 // "c_str()"). 9539 template<typename MemberKind> 9540 static llvm::SmallPtrSet<MemberKind*, 1> 9541 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 9542 const RecordType *RT = Ty->getAs<RecordType>(); 9543 llvm::SmallPtrSet<MemberKind*, 1> Results; 9544 9545 if (!RT) 9546 return Results; 9547 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 9548 if (!RD || !RD->getDefinition()) 9549 return Results; 9550 9551 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 9552 Sema::LookupMemberName); 9553 R.suppressDiagnostics(); 9554 9555 // We just need to include all members of the right kind turned up by the 9556 // filter, at this point. 9557 if (S.LookupQualifiedName(R, RT->getDecl())) 9558 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 9559 NamedDecl *decl = (*I)->getUnderlyingDecl(); 9560 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 9561 Results.insert(FK); 9562 } 9563 return Results; 9564 } 9565 9566 /// Check if we could call '.c_str()' on an object. 9567 /// 9568 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 9569 /// allow the call, or if it would be ambiguous). 9570 bool Sema::hasCStrMethod(const Expr *E) { 9571 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 9572 9573 MethodSet Results = 9574 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 9575 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 9576 MI != ME; ++MI) 9577 if ((*MI)->getMinRequiredArguments() == 0) 9578 return true; 9579 return false; 9580 } 9581 9582 // Check if a (w)string was passed when a (w)char* was needed, and offer a 9583 // better diagnostic if so. AT is assumed to be valid. 9584 // Returns true when a c_str() conversion method is found. 9585 bool CheckPrintfHandler::checkForCStrMembers( 9586 const analyze_printf::ArgType &AT, const Expr *E) { 9587 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 9588 9589 MethodSet Results = 9590 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 9591 9592 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 9593 MI != ME; ++MI) { 9594 const CXXMethodDecl *Method = *MI; 9595 if (Method->getMinRequiredArguments() == 0 && 9596 AT.matchesType(S.Context, Method->getReturnType())) { 9597 // FIXME: Suggest parens if the expression needs them. 9598 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 9599 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 9600 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 9601 return true; 9602 } 9603 } 9604 9605 return false; 9606 } 9607 9608 bool CheckPrintfHandler::HandlePrintfSpecifier( 9609 const analyze_printf::PrintfSpecifier &FS, const char *startSpecifier, 9610 unsigned specifierLen, const TargetInfo &Target) { 9611 using namespace analyze_format_string; 9612 using namespace analyze_printf; 9613 9614 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 9615 9616 if (FS.consumesDataArgument()) { 9617 if (atFirstArg) { 9618 atFirstArg = false; 9619 usesPositionalArgs = FS.usesPositionalArg(); 9620 } 9621 else if (usesPositionalArgs != FS.usesPositionalArg()) { 9622 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 9623 startSpecifier, specifierLen); 9624 return false; 9625 } 9626 } 9627 9628 // First check if the field width, precision, and conversion specifier 9629 // have matching data arguments. 9630 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 9631 startSpecifier, specifierLen)) { 9632 return false; 9633 } 9634 9635 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 9636 startSpecifier, specifierLen)) { 9637 return false; 9638 } 9639 9640 if (!CS.consumesDataArgument()) { 9641 // FIXME: Technically specifying a precision or field width here 9642 // makes no sense. Worth issuing a warning at some point. 9643 return true; 9644 } 9645 9646 // Consume the argument. 9647 unsigned argIndex = FS.getArgIndex(); 9648 if (argIndex < NumDataArgs) { 9649 // The check to see if the argIndex is valid will come later. 9650 // We set the bit here because we may exit early from this 9651 // function if we encounter some other error. 9652 CoveredArgs.set(argIndex); 9653 } 9654 9655 // FreeBSD kernel extensions. 9656 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 9657 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 9658 // We need at least two arguments. 9659 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 9660 return false; 9661 9662 // Claim the second argument. 9663 CoveredArgs.set(argIndex + 1); 9664 9665 // Type check the first argument (int for %b, pointer for %D) 9666 const Expr *Ex = getDataArg(argIndex); 9667 const analyze_printf::ArgType &AT = 9668 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? 9669 ArgType(S.Context.IntTy) : ArgType::CPointerTy; 9670 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 9671 EmitFormatDiagnostic( 9672 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 9673 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 9674 << false << Ex->getSourceRange(), 9675 Ex->getBeginLoc(), /*IsStringLocation*/ false, 9676 getSpecifierRange(startSpecifier, specifierLen)); 9677 9678 // Type check the second argument (char * for both %b and %D) 9679 Ex = getDataArg(argIndex + 1); 9680 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 9681 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 9682 EmitFormatDiagnostic( 9683 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 9684 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 9685 << false << Ex->getSourceRange(), 9686 Ex->getBeginLoc(), /*IsStringLocation*/ false, 9687 getSpecifierRange(startSpecifier, specifierLen)); 9688 9689 return true; 9690 } 9691 9692 // Check for using an Objective-C specific conversion specifier 9693 // in a non-ObjC literal. 9694 if (!allowsObjCArg() && CS.isObjCArg()) { 9695 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9696 specifierLen); 9697 } 9698 9699 // %P can only be used with os_log. 9700 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 9701 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9702 specifierLen); 9703 } 9704 9705 // %n is not allowed with os_log. 9706 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 9707 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 9708 getLocationOfByte(CS.getStart()), 9709 /*IsStringLocation*/ false, 9710 getSpecifierRange(startSpecifier, specifierLen)); 9711 9712 return true; 9713 } 9714 9715 // Only scalars are allowed for os_trace. 9716 if (FSType == Sema::FST_OSTrace && 9717 (CS.getKind() == ConversionSpecifier::PArg || 9718 CS.getKind() == ConversionSpecifier::sArg || 9719 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 9720 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9721 specifierLen); 9722 } 9723 9724 // Check for use of public/private annotation outside of os_log(). 9725 if (FSType != Sema::FST_OSLog) { 9726 if (FS.isPublic().isSet()) { 9727 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 9728 << "public", 9729 getLocationOfByte(FS.isPublic().getPosition()), 9730 /*IsStringLocation*/ false, 9731 getSpecifierRange(startSpecifier, specifierLen)); 9732 } 9733 if (FS.isPrivate().isSet()) { 9734 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 9735 << "private", 9736 getLocationOfByte(FS.isPrivate().getPosition()), 9737 /*IsStringLocation*/ false, 9738 getSpecifierRange(startSpecifier, specifierLen)); 9739 } 9740 } 9741 9742 const llvm::Triple &Triple = Target.getTriple(); 9743 if (CS.getKind() == ConversionSpecifier::nArg && 9744 (Triple.isAndroid() || Triple.isOSFuchsia())) { 9745 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_narg_not_supported), 9746 getLocationOfByte(CS.getStart()), 9747 /*IsStringLocation*/ false, 9748 getSpecifierRange(startSpecifier, specifierLen)); 9749 } 9750 9751 // Check for invalid use of field width 9752 if (!FS.hasValidFieldWidth()) { 9753 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 9754 startSpecifier, specifierLen); 9755 } 9756 9757 // Check for invalid use of precision 9758 if (!FS.hasValidPrecision()) { 9759 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 9760 startSpecifier, specifierLen); 9761 } 9762 9763 // Precision is mandatory for %P specifier. 9764 if (CS.getKind() == ConversionSpecifier::PArg && 9765 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 9766 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 9767 getLocationOfByte(startSpecifier), 9768 /*IsStringLocation*/ false, 9769 getSpecifierRange(startSpecifier, specifierLen)); 9770 } 9771 9772 // Check each flag does not conflict with any other component. 9773 if (!FS.hasValidThousandsGroupingPrefix()) 9774 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 9775 if (!FS.hasValidLeadingZeros()) 9776 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 9777 if (!FS.hasValidPlusPrefix()) 9778 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 9779 if (!FS.hasValidSpacePrefix()) 9780 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 9781 if (!FS.hasValidAlternativeForm()) 9782 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 9783 if (!FS.hasValidLeftJustified()) 9784 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 9785 9786 // Check that flags are not ignored by another flag 9787 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 9788 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 9789 startSpecifier, specifierLen); 9790 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 9791 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 9792 startSpecifier, specifierLen); 9793 9794 // Check the length modifier is valid with the given conversion specifier. 9795 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 9796 S.getLangOpts())) 9797 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9798 diag::warn_format_nonsensical_length); 9799 else if (!FS.hasStandardLengthModifier()) 9800 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 9801 else if (!FS.hasStandardLengthConversionCombination()) 9802 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9803 diag::warn_format_non_standard_conversion_spec); 9804 9805 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 9806 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 9807 9808 // The remaining checks depend on the data arguments. 9809 if (HasVAListArg) 9810 return true; 9811 9812 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 9813 return false; 9814 9815 const Expr *Arg = getDataArg(argIndex); 9816 if (!Arg) 9817 return true; 9818 9819 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 9820 } 9821 9822 static bool requiresParensToAddCast(const Expr *E) { 9823 // FIXME: We should have a general way to reason about operator 9824 // precedence and whether parens are actually needed here. 9825 // Take care of a few common cases where they aren't. 9826 const Expr *Inside = E->IgnoreImpCasts(); 9827 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 9828 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 9829 9830 switch (Inside->getStmtClass()) { 9831 case Stmt::ArraySubscriptExprClass: 9832 case Stmt::CallExprClass: 9833 case Stmt::CharacterLiteralClass: 9834 case Stmt::CXXBoolLiteralExprClass: 9835 case Stmt::DeclRefExprClass: 9836 case Stmt::FloatingLiteralClass: 9837 case Stmt::IntegerLiteralClass: 9838 case Stmt::MemberExprClass: 9839 case Stmt::ObjCArrayLiteralClass: 9840 case Stmt::ObjCBoolLiteralExprClass: 9841 case Stmt::ObjCBoxedExprClass: 9842 case Stmt::ObjCDictionaryLiteralClass: 9843 case Stmt::ObjCEncodeExprClass: 9844 case Stmt::ObjCIvarRefExprClass: 9845 case Stmt::ObjCMessageExprClass: 9846 case Stmt::ObjCPropertyRefExprClass: 9847 case Stmt::ObjCStringLiteralClass: 9848 case Stmt::ObjCSubscriptRefExprClass: 9849 case Stmt::ParenExprClass: 9850 case Stmt::StringLiteralClass: 9851 case Stmt::UnaryOperatorClass: 9852 return false; 9853 default: 9854 return true; 9855 } 9856 } 9857 9858 static std::pair<QualType, StringRef> 9859 shouldNotPrintDirectly(const ASTContext &Context, 9860 QualType IntendedTy, 9861 const Expr *E) { 9862 // Use a 'while' to peel off layers of typedefs. 9863 QualType TyTy = IntendedTy; 9864 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 9865 StringRef Name = UserTy->getDecl()->getName(); 9866 QualType CastTy = llvm::StringSwitch<QualType>(Name) 9867 .Case("CFIndex", Context.getNSIntegerType()) 9868 .Case("NSInteger", Context.getNSIntegerType()) 9869 .Case("NSUInteger", Context.getNSUIntegerType()) 9870 .Case("SInt32", Context.IntTy) 9871 .Case("UInt32", Context.UnsignedIntTy) 9872 .Default(QualType()); 9873 9874 if (!CastTy.isNull()) 9875 return std::make_pair(CastTy, Name); 9876 9877 TyTy = UserTy->desugar(); 9878 } 9879 9880 // Strip parens if necessary. 9881 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 9882 return shouldNotPrintDirectly(Context, 9883 PE->getSubExpr()->getType(), 9884 PE->getSubExpr()); 9885 9886 // If this is a conditional expression, then its result type is constructed 9887 // via usual arithmetic conversions and thus there might be no necessary 9888 // typedef sugar there. Recurse to operands to check for NSInteger & 9889 // Co. usage condition. 9890 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 9891 QualType TrueTy, FalseTy; 9892 StringRef TrueName, FalseName; 9893 9894 std::tie(TrueTy, TrueName) = 9895 shouldNotPrintDirectly(Context, 9896 CO->getTrueExpr()->getType(), 9897 CO->getTrueExpr()); 9898 std::tie(FalseTy, FalseName) = 9899 shouldNotPrintDirectly(Context, 9900 CO->getFalseExpr()->getType(), 9901 CO->getFalseExpr()); 9902 9903 if (TrueTy == FalseTy) 9904 return std::make_pair(TrueTy, TrueName); 9905 else if (TrueTy.isNull()) 9906 return std::make_pair(FalseTy, FalseName); 9907 else if (FalseTy.isNull()) 9908 return std::make_pair(TrueTy, TrueName); 9909 } 9910 9911 return std::make_pair(QualType(), StringRef()); 9912 } 9913 9914 /// Return true if \p ICE is an implicit argument promotion of an arithmetic 9915 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked 9916 /// type do not count. 9917 static bool 9918 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { 9919 QualType From = ICE->getSubExpr()->getType(); 9920 QualType To = ICE->getType(); 9921 // It's an integer promotion if the destination type is the promoted 9922 // source type. 9923 if (ICE->getCastKind() == CK_IntegralCast && 9924 From->isPromotableIntegerType() && 9925 S.Context.getPromotedIntegerType(From) == To) 9926 return true; 9927 // Look through vector types, since we do default argument promotion for 9928 // those in OpenCL. 9929 if (const auto *VecTy = From->getAs<ExtVectorType>()) 9930 From = VecTy->getElementType(); 9931 if (const auto *VecTy = To->getAs<ExtVectorType>()) 9932 To = VecTy->getElementType(); 9933 // It's a floating promotion if the source type is a lower rank. 9934 return ICE->getCastKind() == CK_FloatingCast && 9935 S.Context.getFloatingTypeOrder(From, To) < 0; 9936 } 9937 9938 bool 9939 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 9940 const char *StartSpecifier, 9941 unsigned SpecifierLen, 9942 const Expr *E) { 9943 using namespace analyze_format_string; 9944 using namespace analyze_printf; 9945 9946 // Now type check the data expression that matches the 9947 // format specifier. 9948 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 9949 if (!AT.isValid()) 9950 return true; 9951 9952 QualType ExprTy = E->getType(); 9953 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 9954 ExprTy = TET->getUnderlyingExpr()->getType(); 9955 } 9956 9957 // Diagnose attempts to print a boolean value as a character. Unlike other 9958 // -Wformat diagnostics, this is fine from a type perspective, but it still 9959 // doesn't make sense. 9960 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg && 9961 E->isKnownToHaveBooleanValue()) { 9962 const CharSourceRange &CSR = 9963 getSpecifierRange(StartSpecifier, SpecifierLen); 9964 SmallString<4> FSString; 9965 llvm::raw_svector_ostream os(FSString); 9966 FS.toString(os); 9967 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character) 9968 << FSString, 9969 E->getExprLoc(), false, CSR); 9970 return true; 9971 } 9972 9973 analyze_printf::ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy); 9974 if (Match == analyze_printf::ArgType::Match) 9975 return true; 9976 9977 // Look through argument promotions for our error message's reported type. 9978 // This includes the integral and floating promotions, but excludes array 9979 // and function pointer decay (seeing that an argument intended to be a 9980 // string has type 'char [6]' is probably more confusing than 'char *') and 9981 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type). 9982 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 9983 if (isArithmeticArgumentPromotion(S, ICE)) { 9984 E = ICE->getSubExpr(); 9985 ExprTy = E->getType(); 9986 9987 // Check if we didn't match because of an implicit cast from a 'char' 9988 // or 'short' to an 'int'. This is done because printf is a varargs 9989 // function. 9990 if (ICE->getType() == S.Context.IntTy || 9991 ICE->getType() == S.Context.UnsignedIntTy) { 9992 // All further checking is done on the subexpression 9993 const analyze_printf::ArgType::MatchKind ImplicitMatch = 9994 AT.matchesType(S.Context, ExprTy); 9995 if (ImplicitMatch == analyze_printf::ArgType::Match) 9996 return true; 9997 if (ImplicitMatch == ArgType::NoMatchPedantic || 9998 ImplicitMatch == ArgType::NoMatchTypeConfusion) 9999 Match = ImplicitMatch; 10000 } 10001 } 10002 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 10003 // Special case for 'a', which has type 'int' in C. 10004 // Note, however, that we do /not/ want to treat multibyte constants like 10005 // 'MooV' as characters! This form is deprecated but still exists. In 10006 // addition, don't treat expressions as of type 'char' if one byte length 10007 // modifier is provided. 10008 if (ExprTy == S.Context.IntTy && 10009 FS.getLengthModifier().getKind() != LengthModifier::AsChar) 10010 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) 10011 ExprTy = S.Context.CharTy; 10012 } 10013 10014 // Look through enums to their underlying type. 10015 bool IsEnum = false; 10016 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 10017 ExprTy = EnumTy->getDecl()->getIntegerType(); 10018 IsEnum = true; 10019 } 10020 10021 // %C in an Objective-C context prints a unichar, not a wchar_t. 10022 // If the argument is an integer of some kind, believe the %C and suggest 10023 // a cast instead of changing the conversion specifier. 10024 QualType IntendedTy = ExprTy; 10025 if (isObjCContext() && 10026 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 10027 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 10028 !ExprTy->isCharType()) { 10029 // 'unichar' is defined as a typedef of unsigned short, but we should 10030 // prefer using the typedef if it is visible. 10031 IntendedTy = S.Context.UnsignedShortTy; 10032 10033 // While we are here, check if the value is an IntegerLiteral that happens 10034 // to be within the valid range. 10035 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 10036 const llvm::APInt &V = IL->getValue(); 10037 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 10038 return true; 10039 } 10040 10041 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 10042 Sema::LookupOrdinaryName); 10043 if (S.LookupName(Result, S.getCurScope())) { 10044 NamedDecl *ND = Result.getFoundDecl(); 10045 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 10046 if (TD->getUnderlyingType() == IntendedTy) 10047 IntendedTy = S.Context.getTypedefType(TD); 10048 } 10049 } 10050 } 10051 10052 // Special-case some of Darwin's platform-independence types by suggesting 10053 // casts to primitive types that are known to be large enough. 10054 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 10055 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 10056 QualType CastTy; 10057 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 10058 if (!CastTy.isNull()) { 10059 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 10060 // (long in ASTContext). Only complain to pedants. 10061 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 10062 (AT.isSizeT() || AT.isPtrdiffT()) && 10063 AT.matchesType(S.Context, CastTy)) 10064 Match = ArgType::NoMatchPedantic; 10065 IntendedTy = CastTy; 10066 ShouldNotPrintDirectly = true; 10067 } 10068 } 10069 10070 // We may be able to offer a FixItHint if it is a supported type. 10071 PrintfSpecifier fixedFS = FS; 10072 bool Success = 10073 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 10074 10075 if (Success) { 10076 // Get the fix string from the fixed format specifier 10077 SmallString<16> buf; 10078 llvm::raw_svector_ostream os(buf); 10079 fixedFS.toString(os); 10080 10081 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 10082 10083 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) { 10084 unsigned Diag; 10085 switch (Match) { 10086 case ArgType::Match: llvm_unreachable("expected non-matching"); 10087 case ArgType::NoMatchPedantic: 10088 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 10089 break; 10090 case ArgType::NoMatchTypeConfusion: 10091 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 10092 break; 10093 case ArgType::NoMatch: 10094 Diag = diag::warn_format_conversion_argument_type_mismatch; 10095 break; 10096 } 10097 10098 // In this case, the specifier is wrong and should be changed to match 10099 // the argument. 10100 EmitFormatDiagnostic(S.PDiag(Diag) 10101 << AT.getRepresentativeTypeName(S.Context) 10102 << IntendedTy << IsEnum << E->getSourceRange(), 10103 E->getBeginLoc(), 10104 /*IsStringLocation*/ false, SpecRange, 10105 FixItHint::CreateReplacement(SpecRange, os.str())); 10106 } else { 10107 // The canonical type for formatting this value is different from the 10108 // actual type of the expression. (This occurs, for example, with Darwin's 10109 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 10110 // should be printed as 'long' for 64-bit compatibility.) 10111 // Rather than emitting a normal format/argument mismatch, we want to 10112 // add a cast to the recommended type (and correct the format string 10113 // if necessary). 10114 SmallString<16> CastBuf; 10115 llvm::raw_svector_ostream CastFix(CastBuf); 10116 CastFix << "("; 10117 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 10118 CastFix << ")"; 10119 10120 SmallVector<FixItHint,4> Hints; 10121 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly) 10122 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 10123 10124 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 10125 // If there's already a cast present, just replace it. 10126 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 10127 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 10128 10129 } else if (!requiresParensToAddCast(E)) { 10130 // If the expression has high enough precedence, 10131 // just write the C-style cast. 10132 Hints.push_back( 10133 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 10134 } else { 10135 // Otherwise, add parens around the expression as well as the cast. 10136 CastFix << "("; 10137 Hints.push_back( 10138 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 10139 10140 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc()); 10141 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 10142 } 10143 10144 if (ShouldNotPrintDirectly) { 10145 // The expression has a type that should not be printed directly. 10146 // We extract the name from the typedef because we don't want to show 10147 // the underlying type in the diagnostic. 10148 StringRef Name; 10149 if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy)) 10150 Name = TypedefTy->getDecl()->getName(); 10151 else 10152 Name = CastTyName; 10153 unsigned Diag = Match == ArgType::NoMatchPedantic 10154 ? diag::warn_format_argument_needs_cast_pedantic 10155 : diag::warn_format_argument_needs_cast; 10156 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 10157 << E->getSourceRange(), 10158 E->getBeginLoc(), /*IsStringLocation=*/false, 10159 SpecRange, Hints); 10160 } else { 10161 // In this case, the expression could be printed using a different 10162 // specifier, but we've decided that the specifier is probably correct 10163 // and we should cast instead. Just use the normal warning message. 10164 EmitFormatDiagnostic( 10165 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 10166 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 10167 << E->getSourceRange(), 10168 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 10169 } 10170 } 10171 } else { 10172 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 10173 SpecifierLen); 10174 // Since the warning for passing non-POD types to variadic functions 10175 // was deferred until now, we emit a warning for non-POD 10176 // arguments here. 10177 switch (S.isValidVarArgType(ExprTy)) { 10178 case Sema::VAK_Valid: 10179 case Sema::VAK_ValidInCXX11: { 10180 unsigned Diag; 10181 switch (Match) { 10182 case ArgType::Match: llvm_unreachable("expected non-matching"); 10183 case ArgType::NoMatchPedantic: 10184 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 10185 break; 10186 case ArgType::NoMatchTypeConfusion: 10187 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 10188 break; 10189 case ArgType::NoMatch: 10190 Diag = diag::warn_format_conversion_argument_type_mismatch; 10191 break; 10192 } 10193 10194 EmitFormatDiagnostic( 10195 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 10196 << IsEnum << CSR << E->getSourceRange(), 10197 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 10198 break; 10199 } 10200 case Sema::VAK_Undefined: 10201 case Sema::VAK_MSVCUndefined: 10202 EmitFormatDiagnostic(S.PDiag(diag::warn_non_pod_vararg_with_format_string) 10203 << S.getLangOpts().CPlusPlus11 << ExprTy 10204 << CallType 10205 << AT.getRepresentativeTypeName(S.Context) << CSR 10206 << E->getSourceRange(), 10207 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 10208 checkForCStrMembers(AT, E); 10209 break; 10210 10211 case Sema::VAK_Invalid: 10212 if (ExprTy->isObjCObjectType()) 10213 EmitFormatDiagnostic( 10214 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 10215 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 10216 << AT.getRepresentativeTypeName(S.Context) << CSR 10217 << E->getSourceRange(), 10218 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 10219 else 10220 // FIXME: If this is an initializer list, suggest removing the braces 10221 // or inserting a cast to the target type. 10222 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 10223 << isa<InitListExpr>(E) << ExprTy << CallType 10224 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 10225 break; 10226 } 10227 10228 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 10229 "format string specifier index out of range"); 10230 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 10231 } 10232 10233 return true; 10234 } 10235 10236 //===--- CHECK: Scanf format string checking ------------------------------===// 10237 10238 namespace { 10239 10240 class CheckScanfHandler : public CheckFormatHandler { 10241 public: 10242 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 10243 const Expr *origFormatExpr, Sema::FormatStringType type, 10244 unsigned firstDataArg, unsigned numDataArgs, 10245 const char *beg, bool hasVAListArg, 10246 ArrayRef<const Expr *> Args, unsigned formatIdx, 10247 bool inFunctionCall, Sema::VariadicCallType CallType, 10248 llvm::SmallBitVector &CheckedVarArgs, 10249 UncoveredArgHandler &UncoveredArg) 10250 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 10251 numDataArgs, beg, hasVAListArg, Args, formatIdx, 10252 inFunctionCall, CallType, CheckedVarArgs, 10253 UncoveredArg) {} 10254 10255 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 10256 const char *startSpecifier, 10257 unsigned specifierLen) override; 10258 10259 bool HandleInvalidScanfConversionSpecifier( 10260 const analyze_scanf::ScanfSpecifier &FS, 10261 const char *startSpecifier, 10262 unsigned specifierLen) override; 10263 10264 void HandleIncompleteScanList(const char *start, const char *end) override; 10265 }; 10266 10267 } // namespace 10268 10269 void CheckScanfHandler::HandleIncompleteScanList(const char *start, 10270 const char *end) { 10271 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 10272 getLocationOfByte(end), /*IsStringLocation*/true, 10273 getSpecifierRange(start, end - start)); 10274 } 10275 10276 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 10277 const analyze_scanf::ScanfSpecifier &FS, 10278 const char *startSpecifier, 10279 unsigned specifierLen) { 10280 const analyze_scanf::ScanfConversionSpecifier &CS = 10281 FS.getConversionSpecifier(); 10282 10283 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 10284 getLocationOfByte(CS.getStart()), 10285 startSpecifier, specifierLen, 10286 CS.getStart(), CS.getLength()); 10287 } 10288 10289 bool CheckScanfHandler::HandleScanfSpecifier( 10290 const analyze_scanf::ScanfSpecifier &FS, 10291 const char *startSpecifier, 10292 unsigned specifierLen) { 10293 using namespace analyze_scanf; 10294 using namespace analyze_format_string; 10295 10296 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 10297 10298 // Handle case where '%' and '*' don't consume an argument. These shouldn't 10299 // be used to decide if we are using positional arguments consistently. 10300 if (FS.consumesDataArgument()) { 10301 if (atFirstArg) { 10302 atFirstArg = false; 10303 usesPositionalArgs = FS.usesPositionalArg(); 10304 } 10305 else if (usesPositionalArgs != FS.usesPositionalArg()) { 10306 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 10307 startSpecifier, specifierLen); 10308 return false; 10309 } 10310 } 10311 10312 // Check if the field with is non-zero. 10313 const OptionalAmount &Amt = FS.getFieldWidth(); 10314 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 10315 if (Amt.getConstantAmount() == 0) { 10316 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 10317 Amt.getConstantLength()); 10318 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 10319 getLocationOfByte(Amt.getStart()), 10320 /*IsStringLocation*/true, R, 10321 FixItHint::CreateRemoval(R)); 10322 } 10323 } 10324 10325 if (!FS.consumesDataArgument()) { 10326 // FIXME: Technically specifying a precision or field width here 10327 // makes no sense. Worth issuing a warning at some point. 10328 return true; 10329 } 10330 10331 // Consume the argument. 10332 unsigned argIndex = FS.getArgIndex(); 10333 if (argIndex < NumDataArgs) { 10334 // The check to see if the argIndex is valid will come later. 10335 // We set the bit here because we may exit early from this 10336 // function if we encounter some other error. 10337 CoveredArgs.set(argIndex); 10338 } 10339 10340 // Check the length modifier is valid with the given conversion specifier. 10341 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 10342 S.getLangOpts())) 10343 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 10344 diag::warn_format_nonsensical_length); 10345 else if (!FS.hasStandardLengthModifier()) 10346 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 10347 else if (!FS.hasStandardLengthConversionCombination()) 10348 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 10349 diag::warn_format_non_standard_conversion_spec); 10350 10351 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 10352 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 10353 10354 // The remaining checks depend on the data arguments. 10355 if (HasVAListArg) 10356 return true; 10357 10358 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 10359 return false; 10360 10361 // Check that the argument type matches the format specifier. 10362 const Expr *Ex = getDataArg(argIndex); 10363 if (!Ex) 10364 return true; 10365 10366 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 10367 10368 if (!AT.isValid()) { 10369 return true; 10370 } 10371 10372 analyze_format_string::ArgType::MatchKind Match = 10373 AT.matchesType(S.Context, Ex->getType()); 10374 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 10375 if (Match == analyze_format_string::ArgType::Match) 10376 return true; 10377 10378 ScanfSpecifier fixedFS = FS; 10379 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 10380 S.getLangOpts(), S.Context); 10381 10382 unsigned Diag = 10383 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 10384 : diag::warn_format_conversion_argument_type_mismatch; 10385 10386 if (Success) { 10387 // Get the fix string from the fixed format specifier. 10388 SmallString<128> buf; 10389 llvm::raw_svector_ostream os(buf); 10390 fixedFS.toString(os); 10391 10392 EmitFormatDiagnostic( 10393 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 10394 << Ex->getType() << false << Ex->getSourceRange(), 10395 Ex->getBeginLoc(), 10396 /*IsStringLocation*/ false, 10397 getSpecifierRange(startSpecifier, specifierLen), 10398 FixItHint::CreateReplacement( 10399 getSpecifierRange(startSpecifier, specifierLen), os.str())); 10400 } else { 10401 EmitFormatDiagnostic(S.PDiag(Diag) 10402 << AT.getRepresentativeTypeName(S.Context) 10403 << Ex->getType() << false << Ex->getSourceRange(), 10404 Ex->getBeginLoc(), 10405 /*IsStringLocation*/ false, 10406 getSpecifierRange(startSpecifier, specifierLen)); 10407 } 10408 10409 return true; 10410 } 10411 10412 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 10413 const Expr *OrigFormatExpr, 10414 ArrayRef<const Expr *> Args, 10415 bool HasVAListArg, unsigned format_idx, 10416 unsigned firstDataArg, 10417 Sema::FormatStringType Type, 10418 bool inFunctionCall, 10419 Sema::VariadicCallType CallType, 10420 llvm::SmallBitVector &CheckedVarArgs, 10421 UncoveredArgHandler &UncoveredArg, 10422 bool IgnoreStringsWithoutSpecifiers) { 10423 // CHECK: is the format string a wide literal? 10424 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 10425 CheckFormatHandler::EmitFormatDiagnostic( 10426 S, inFunctionCall, Args[format_idx], 10427 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 10428 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 10429 return; 10430 } 10431 10432 // Str - The format string. NOTE: this is NOT null-terminated! 10433 StringRef StrRef = FExpr->getString(); 10434 const char *Str = StrRef.data(); 10435 // Account for cases where the string literal is truncated in a declaration. 10436 const ConstantArrayType *T = 10437 S.Context.getAsConstantArrayType(FExpr->getType()); 10438 assert(T && "String literal not of constant array type!"); 10439 size_t TypeSize = T->getSize().getZExtValue(); 10440 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 10441 const unsigned numDataArgs = Args.size() - firstDataArg; 10442 10443 if (IgnoreStringsWithoutSpecifiers && 10444 !analyze_format_string::parseFormatStringHasFormattingSpecifiers( 10445 Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 10446 return; 10447 10448 // Emit a warning if the string literal is truncated and does not contain an 10449 // embedded null character. 10450 if (TypeSize <= StrRef.size() && !StrRef.substr(0, TypeSize).contains('\0')) { 10451 CheckFormatHandler::EmitFormatDiagnostic( 10452 S, inFunctionCall, Args[format_idx], 10453 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 10454 FExpr->getBeginLoc(), 10455 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 10456 return; 10457 } 10458 10459 // CHECK: empty format string? 10460 if (StrLen == 0 && numDataArgs > 0) { 10461 CheckFormatHandler::EmitFormatDiagnostic( 10462 S, inFunctionCall, Args[format_idx], 10463 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 10464 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 10465 return; 10466 } 10467 10468 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 10469 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog || 10470 Type == Sema::FST_OSTrace) { 10471 CheckPrintfHandler H( 10472 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 10473 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, 10474 HasVAListArg, Args, format_idx, inFunctionCall, CallType, 10475 CheckedVarArgs, UncoveredArg); 10476 10477 if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen, 10478 S.getLangOpts(), 10479 S.Context.getTargetInfo(), 10480 Type == Sema::FST_FreeBSDKPrintf)) 10481 H.DoneProcessing(); 10482 } else if (Type == Sema::FST_Scanf) { 10483 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 10484 numDataArgs, Str, HasVAListArg, Args, format_idx, 10485 inFunctionCall, CallType, CheckedVarArgs, UncoveredArg); 10486 10487 if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen, 10488 S.getLangOpts(), 10489 S.Context.getTargetInfo())) 10490 H.DoneProcessing(); 10491 } // TODO: handle other formats 10492 } 10493 10494 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 10495 // Str - The format string. NOTE: this is NOT null-terminated! 10496 StringRef StrRef = FExpr->getString(); 10497 const char *Str = StrRef.data(); 10498 // Account for cases where the string literal is truncated in a declaration. 10499 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 10500 assert(T && "String literal not of constant array type!"); 10501 size_t TypeSize = T->getSize().getZExtValue(); 10502 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 10503 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 10504 getLangOpts(), 10505 Context.getTargetInfo()); 10506 } 10507 10508 //===--- CHECK: Warn on use of wrong absolute value function. -------------===// 10509 10510 // Returns the related absolute value function that is larger, of 0 if one 10511 // does not exist. 10512 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 10513 switch (AbsFunction) { 10514 default: 10515 return 0; 10516 10517 case Builtin::BI__builtin_abs: 10518 return Builtin::BI__builtin_labs; 10519 case Builtin::BI__builtin_labs: 10520 return Builtin::BI__builtin_llabs; 10521 case Builtin::BI__builtin_llabs: 10522 return 0; 10523 10524 case Builtin::BI__builtin_fabsf: 10525 return Builtin::BI__builtin_fabs; 10526 case Builtin::BI__builtin_fabs: 10527 return Builtin::BI__builtin_fabsl; 10528 case Builtin::BI__builtin_fabsl: 10529 return 0; 10530 10531 case Builtin::BI__builtin_cabsf: 10532 return Builtin::BI__builtin_cabs; 10533 case Builtin::BI__builtin_cabs: 10534 return Builtin::BI__builtin_cabsl; 10535 case Builtin::BI__builtin_cabsl: 10536 return 0; 10537 10538 case Builtin::BIabs: 10539 return Builtin::BIlabs; 10540 case Builtin::BIlabs: 10541 return Builtin::BIllabs; 10542 case Builtin::BIllabs: 10543 return 0; 10544 10545 case Builtin::BIfabsf: 10546 return Builtin::BIfabs; 10547 case Builtin::BIfabs: 10548 return Builtin::BIfabsl; 10549 case Builtin::BIfabsl: 10550 return 0; 10551 10552 case Builtin::BIcabsf: 10553 return Builtin::BIcabs; 10554 case Builtin::BIcabs: 10555 return Builtin::BIcabsl; 10556 case Builtin::BIcabsl: 10557 return 0; 10558 } 10559 } 10560 10561 // Returns the argument type of the absolute value function. 10562 static QualType getAbsoluteValueArgumentType(ASTContext &Context, 10563 unsigned AbsType) { 10564 if (AbsType == 0) 10565 return QualType(); 10566 10567 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 10568 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 10569 if (Error != ASTContext::GE_None) 10570 return QualType(); 10571 10572 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 10573 if (!FT) 10574 return QualType(); 10575 10576 if (FT->getNumParams() != 1) 10577 return QualType(); 10578 10579 return FT->getParamType(0); 10580 } 10581 10582 // Returns the best absolute value function, or zero, based on type and 10583 // current absolute value function. 10584 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 10585 unsigned AbsFunctionKind) { 10586 unsigned BestKind = 0; 10587 uint64_t ArgSize = Context.getTypeSize(ArgType); 10588 for (unsigned Kind = AbsFunctionKind; Kind != 0; 10589 Kind = getLargerAbsoluteValueFunction(Kind)) { 10590 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 10591 if (Context.getTypeSize(ParamType) >= ArgSize) { 10592 if (BestKind == 0) 10593 BestKind = Kind; 10594 else if (Context.hasSameType(ParamType, ArgType)) { 10595 BestKind = Kind; 10596 break; 10597 } 10598 } 10599 } 10600 return BestKind; 10601 } 10602 10603 enum AbsoluteValueKind { 10604 AVK_Integer, 10605 AVK_Floating, 10606 AVK_Complex 10607 }; 10608 10609 static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 10610 if (T->isIntegralOrEnumerationType()) 10611 return AVK_Integer; 10612 if (T->isRealFloatingType()) 10613 return AVK_Floating; 10614 if (T->isAnyComplexType()) 10615 return AVK_Complex; 10616 10617 llvm_unreachable("Type not integer, floating, or complex"); 10618 } 10619 10620 // Changes the absolute value function to a different type. Preserves whether 10621 // the function is a builtin. 10622 static unsigned changeAbsFunction(unsigned AbsKind, 10623 AbsoluteValueKind ValueKind) { 10624 switch (ValueKind) { 10625 case AVK_Integer: 10626 switch (AbsKind) { 10627 default: 10628 return 0; 10629 case Builtin::BI__builtin_fabsf: 10630 case Builtin::BI__builtin_fabs: 10631 case Builtin::BI__builtin_fabsl: 10632 case Builtin::BI__builtin_cabsf: 10633 case Builtin::BI__builtin_cabs: 10634 case Builtin::BI__builtin_cabsl: 10635 return Builtin::BI__builtin_abs; 10636 case Builtin::BIfabsf: 10637 case Builtin::BIfabs: 10638 case Builtin::BIfabsl: 10639 case Builtin::BIcabsf: 10640 case Builtin::BIcabs: 10641 case Builtin::BIcabsl: 10642 return Builtin::BIabs; 10643 } 10644 case AVK_Floating: 10645 switch (AbsKind) { 10646 default: 10647 return 0; 10648 case Builtin::BI__builtin_abs: 10649 case Builtin::BI__builtin_labs: 10650 case Builtin::BI__builtin_llabs: 10651 case Builtin::BI__builtin_cabsf: 10652 case Builtin::BI__builtin_cabs: 10653 case Builtin::BI__builtin_cabsl: 10654 return Builtin::BI__builtin_fabsf; 10655 case Builtin::BIabs: 10656 case Builtin::BIlabs: 10657 case Builtin::BIllabs: 10658 case Builtin::BIcabsf: 10659 case Builtin::BIcabs: 10660 case Builtin::BIcabsl: 10661 return Builtin::BIfabsf; 10662 } 10663 case AVK_Complex: 10664 switch (AbsKind) { 10665 default: 10666 return 0; 10667 case Builtin::BI__builtin_abs: 10668 case Builtin::BI__builtin_labs: 10669 case Builtin::BI__builtin_llabs: 10670 case Builtin::BI__builtin_fabsf: 10671 case Builtin::BI__builtin_fabs: 10672 case Builtin::BI__builtin_fabsl: 10673 return Builtin::BI__builtin_cabsf; 10674 case Builtin::BIabs: 10675 case Builtin::BIlabs: 10676 case Builtin::BIllabs: 10677 case Builtin::BIfabsf: 10678 case Builtin::BIfabs: 10679 case Builtin::BIfabsl: 10680 return Builtin::BIcabsf; 10681 } 10682 } 10683 llvm_unreachable("Unable to convert function"); 10684 } 10685 10686 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 10687 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 10688 if (!FnInfo) 10689 return 0; 10690 10691 switch (FDecl->getBuiltinID()) { 10692 default: 10693 return 0; 10694 case Builtin::BI__builtin_abs: 10695 case Builtin::BI__builtin_fabs: 10696 case Builtin::BI__builtin_fabsf: 10697 case Builtin::BI__builtin_fabsl: 10698 case Builtin::BI__builtin_labs: 10699 case Builtin::BI__builtin_llabs: 10700 case Builtin::BI__builtin_cabs: 10701 case Builtin::BI__builtin_cabsf: 10702 case Builtin::BI__builtin_cabsl: 10703 case Builtin::BIabs: 10704 case Builtin::BIlabs: 10705 case Builtin::BIllabs: 10706 case Builtin::BIfabs: 10707 case Builtin::BIfabsf: 10708 case Builtin::BIfabsl: 10709 case Builtin::BIcabs: 10710 case Builtin::BIcabsf: 10711 case Builtin::BIcabsl: 10712 return FDecl->getBuiltinID(); 10713 } 10714 llvm_unreachable("Unknown Builtin type"); 10715 } 10716 10717 // If the replacement is valid, emit a note with replacement function. 10718 // Additionally, suggest including the proper header if not already included. 10719 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 10720 unsigned AbsKind, QualType ArgType) { 10721 bool EmitHeaderHint = true; 10722 const char *HeaderName = nullptr; 10723 const char *FunctionName = nullptr; 10724 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 10725 FunctionName = "std::abs"; 10726 if (ArgType->isIntegralOrEnumerationType()) { 10727 HeaderName = "cstdlib"; 10728 } else if (ArgType->isRealFloatingType()) { 10729 HeaderName = "cmath"; 10730 } else { 10731 llvm_unreachable("Invalid Type"); 10732 } 10733 10734 // Lookup all std::abs 10735 if (NamespaceDecl *Std = S.getStdNamespace()) { 10736 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 10737 R.suppressDiagnostics(); 10738 S.LookupQualifiedName(R, Std); 10739 10740 for (const auto *I : R) { 10741 const FunctionDecl *FDecl = nullptr; 10742 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 10743 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 10744 } else { 10745 FDecl = dyn_cast<FunctionDecl>(I); 10746 } 10747 if (!FDecl) 10748 continue; 10749 10750 // Found std::abs(), check that they are the right ones. 10751 if (FDecl->getNumParams() != 1) 10752 continue; 10753 10754 // Check that the parameter type can handle the argument. 10755 QualType ParamType = FDecl->getParamDecl(0)->getType(); 10756 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 10757 S.Context.getTypeSize(ArgType) <= 10758 S.Context.getTypeSize(ParamType)) { 10759 // Found a function, don't need the header hint. 10760 EmitHeaderHint = false; 10761 break; 10762 } 10763 } 10764 } 10765 } else { 10766 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 10767 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 10768 10769 if (HeaderName) { 10770 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 10771 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 10772 R.suppressDiagnostics(); 10773 S.LookupName(R, S.getCurScope()); 10774 10775 if (R.isSingleResult()) { 10776 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 10777 if (FD && FD->getBuiltinID() == AbsKind) { 10778 EmitHeaderHint = false; 10779 } else { 10780 return; 10781 } 10782 } else if (!R.empty()) { 10783 return; 10784 } 10785 } 10786 } 10787 10788 S.Diag(Loc, diag::note_replace_abs_function) 10789 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 10790 10791 if (!HeaderName) 10792 return; 10793 10794 if (!EmitHeaderHint) 10795 return; 10796 10797 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 10798 << FunctionName; 10799 } 10800 10801 template <std::size_t StrLen> 10802 static bool IsStdFunction(const FunctionDecl *FDecl, 10803 const char (&Str)[StrLen]) { 10804 if (!FDecl) 10805 return false; 10806 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 10807 return false; 10808 if (!FDecl->isInStdNamespace()) 10809 return false; 10810 10811 return true; 10812 } 10813 10814 // Warn when using the wrong abs() function. 10815 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 10816 const FunctionDecl *FDecl) { 10817 if (Call->getNumArgs() != 1) 10818 return; 10819 10820 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 10821 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 10822 if (AbsKind == 0 && !IsStdAbs) 10823 return; 10824 10825 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 10826 QualType ParamType = Call->getArg(0)->getType(); 10827 10828 // Unsigned types cannot be negative. Suggest removing the absolute value 10829 // function call. 10830 if (ArgType->isUnsignedIntegerType()) { 10831 const char *FunctionName = 10832 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 10833 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 10834 Diag(Call->getExprLoc(), diag::note_remove_abs) 10835 << FunctionName 10836 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 10837 return; 10838 } 10839 10840 // Taking the absolute value of a pointer is very suspicious, they probably 10841 // wanted to index into an array, dereference a pointer, call a function, etc. 10842 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 10843 unsigned DiagType = 0; 10844 if (ArgType->isFunctionType()) 10845 DiagType = 1; 10846 else if (ArgType->isArrayType()) 10847 DiagType = 2; 10848 10849 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 10850 return; 10851 } 10852 10853 // std::abs has overloads which prevent most of the absolute value problems 10854 // from occurring. 10855 if (IsStdAbs) 10856 return; 10857 10858 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 10859 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 10860 10861 // The argument and parameter are the same kind. Check if they are the right 10862 // size. 10863 if (ArgValueKind == ParamValueKind) { 10864 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 10865 return; 10866 10867 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 10868 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 10869 << FDecl << ArgType << ParamType; 10870 10871 if (NewAbsKind == 0) 10872 return; 10873 10874 emitReplacement(*this, Call->getExprLoc(), 10875 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 10876 return; 10877 } 10878 10879 // ArgValueKind != ParamValueKind 10880 // The wrong type of absolute value function was used. Attempt to find the 10881 // proper one. 10882 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 10883 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 10884 if (NewAbsKind == 0) 10885 return; 10886 10887 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 10888 << FDecl << ParamValueKind << ArgValueKind; 10889 10890 emitReplacement(*this, Call->getExprLoc(), 10891 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 10892 } 10893 10894 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 10895 void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 10896 const FunctionDecl *FDecl) { 10897 if (!Call || !FDecl) return; 10898 10899 // Ignore template specializations and macros. 10900 if (inTemplateInstantiation()) return; 10901 if (Call->getExprLoc().isMacroID()) return; 10902 10903 // Only care about the one template argument, two function parameter std::max 10904 if (Call->getNumArgs() != 2) return; 10905 if (!IsStdFunction(FDecl, "max")) return; 10906 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 10907 if (!ArgList) return; 10908 if (ArgList->size() != 1) return; 10909 10910 // Check that template type argument is unsigned integer. 10911 const auto& TA = ArgList->get(0); 10912 if (TA.getKind() != TemplateArgument::Type) return; 10913 QualType ArgType = TA.getAsType(); 10914 if (!ArgType->isUnsignedIntegerType()) return; 10915 10916 // See if either argument is a literal zero. 10917 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 10918 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 10919 if (!MTE) return false; 10920 const auto *Num = dyn_cast<IntegerLiteral>(MTE->getSubExpr()); 10921 if (!Num) return false; 10922 if (Num->getValue() != 0) return false; 10923 return true; 10924 }; 10925 10926 const Expr *FirstArg = Call->getArg(0); 10927 const Expr *SecondArg = Call->getArg(1); 10928 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 10929 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 10930 10931 // Only warn when exactly one argument is zero. 10932 if (IsFirstArgZero == IsSecondArgZero) return; 10933 10934 SourceRange FirstRange = FirstArg->getSourceRange(); 10935 SourceRange SecondRange = SecondArg->getSourceRange(); 10936 10937 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 10938 10939 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 10940 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 10941 10942 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 10943 SourceRange RemovalRange; 10944 if (IsFirstArgZero) { 10945 RemovalRange = SourceRange(FirstRange.getBegin(), 10946 SecondRange.getBegin().getLocWithOffset(-1)); 10947 } else { 10948 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 10949 SecondRange.getEnd()); 10950 } 10951 10952 Diag(Call->getExprLoc(), diag::note_remove_max_call) 10953 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 10954 << FixItHint::CreateRemoval(RemovalRange); 10955 } 10956 10957 //===--- CHECK: Standard memory functions ---------------------------------===// 10958 10959 /// Takes the expression passed to the size_t parameter of functions 10960 /// such as memcmp, strncat, etc and warns if it's a comparison. 10961 /// 10962 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 10963 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 10964 IdentifierInfo *FnName, 10965 SourceLocation FnLoc, 10966 SourceLocation RParenLoc) { 10967 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 10968 if (!Size) 10969 return false; 10970 10971 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 10972 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 10973 return false; 10974 10975 SourceRange SizeRange = Size->getSourceRange(); 10976 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 10977 << SizeRange << FnName; 10978 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 10979 << FnName 10980 << FixItHint::CreateInsertion( 10981 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 10982 << FixItHint::CreateRemoval(RParenLoc); 10983 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 10984 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 10985 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 10986 ")"); 10987 10988 return true; 10989 } 10990 10991 /// Determine whether the given type is or contains a dynamic class type 10992 /// (e.g., whether it has a vtable). 10993 static const CXXRecordDecl *getContainedDynamicClass(QualType T, 10994 bool &IsContained) { 10995 // Look through array types while ignoring qualifiers. 10996 const Type *Ty = T->getBaseElementTypeUnsafe(); 10997 IsContained = false; 10998 10999 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 11000 RD = RD ? RD->getDefinition() : nullptr; 11001 if (!RD || RD->isInvalidDecl()) 11002 return nullptr; 11003 11004 if (RD->isDynamicClass()) 11005 return RD; 11006 11007 // Check all the fields. If any bases were dynamic, the class is dynamic. 11008 // It's impossible for a class to transitively contain itself by value, so 11009 // infinite recursion is impossible. 11010 for (auto *FD : RD->fields()) { 11011 bool SubContained; 11012 if (const CXXRecordDecl *ContainedRD = 11013 getContainedDynamicClass(FD->getType(), SubContained)) { 11014 IsContained = true; 11015 return ContainedRD; 11016 } 11017 } 11018 11019 return nullptr; 11020 } 11021 11022 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 11023 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 11024 if (Unary->getKind() == UETT_SizeOf) 11025 return Unary; 11026 return nullptr; 11027 } 11028 11029 /// If E is a sizeof expression, returns its argument expression, 11030 /// otherwise returns NULL. 11031 static const Expr *getSizeOfExprArg(const Expr *E) { 11032 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 11033 if (!SizeOf->isArgumentType()) 11034 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 11035 return nullptr; 11036 } 11037 11038 /// If E is a sizeof expression, returns its argument type. 11039 static QualType getSizeOfArgType(const Expr *E) { 11040 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 11041 return SizeOf->getTypeOfArgument(); 11042 return QualType(); 11043 } 11044 11045 namespace { 11046 11047 struct SearchNonTrivialToInitializeField 11048 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 11049 using Super = 11050 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 11051 11052 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 11053 11054 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 11055 SourceLocation SL) { 11056 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 11057 asDerived().visitArray(PDIK, AT, SL); 11058 return; 11059 } 11060 11061 Super::visitWithKind(PDIK, FT, SL); 11062 } 11063 11064 void visitARCStrong(QualType FT, SourceLocation SL) { 11065 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 11066 } 11067 void visitARCWeak(QualType FT, SourceLocation SL) { 11068 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 11069 } 11070 void visitStruct(QualType FT, SourceLocation SL) { 11071 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 11072 visit(FD->getType(), FD->getLocation()); 11073 } 11074 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 11075 const ArrayType *AT, SourceLocation SL) { 11076 visit(getContext().getBaseElementType(AT), SL); 11077 } 11078 void visitTrivial(QualType FT, SourceLocation SL) {} 11079 11080 static void diag(QualType RT, const Expr *E, Sema &S) { 11081 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 11082 } 11083 11084 ASTContext &getContext() { return S.getASTContext(); } 11085 11086 const Expr *E; 11087 Sema &S; 11088 }; 11089 11090 struct SearchNonTrivialToCopyField 11091 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 11092 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 11093 11094 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 11095 11096 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 11097 SourceLocation SL) { 11098 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 11099 asDerived().visitArray(PCK, AT, SL); 11100 return; 11101 } 11102 11103 Super::visitWithKind(PCK, FT, SL); 11104 } 11105 11106 void visitARCStrong(QualType FT, SourceLocation SL) { 11107 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 11108 } 11109 void visitARCWeak(QualType FT, SourceLocation SL) { 11110 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 11111 } 11112 void visitStruct(QualType FT, SourceLocation SL) { 11113 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 11114 visit(FD->getType(), FD->getLocation()); 11115 } 11116 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 11117 SourceLocation SL) { 11118 visit(getContext().getBaseElementType(AT), SL); 11119 } 11120 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 11121 SourceLocation SL) {} 11122 void visitTrivial(QualType FT, SourceLocation SL) {} 11123 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 11124 11125 static void diag(QualType RT, const Expr *E, Sema &S) { 11126 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 11127 } 11128 11129 ASTContext &getContext() { return S.getASTContext(); } 11130 11131 const Expr *E; 11132 Sema &S; 11133 }; 11134 11135 } 11136 11137 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 11138 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 11139 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 11140 11141 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 11142 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 11143 return false; 11144 11145 return doesExprLikelyComputeSize(BO->getLHS()) || 11146 doesExprLikelyComputeSize(BO->getRHS()); 11147 } 11148 11149 return getAsSizeOfExpr(SizeofExpr) != nullptr; 11150 } 11151 11152 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 11153 /// 11154 /// \code 11155 /// #define MACRO 0 11156 /// foo(MACRO); 11157 /// foo(0); 11158 /// \endcode 11159 /// 11160 /// This should return true for the first call to foo, but not for the second 11161 /// (regardless of whether foo is a macro or function). 11162 static bool isArgumentExpandedFromMacro(SourceManager &SM, 11163 SourceLocation CallLoc, 11164 SourceLocation ArgLoc) { 11165 if (!CallLoc.isMacroID()) 11166 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 11167 11168 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 11169 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 11170 } 11171 11172 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 11173 /// last two arguments transposed. 11174 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 11175 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 11176 return; 11177 11178 const Expr *SizeArg = 11179 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 11180 11181 auto isLiteralZero = [](const Expr *E) { 11182 return (isa<IntegerLiteral>(E) && 11183 cast<IntegerLiteral>(E)->getValue() == 0) || 11184 (isa<CharacterLiteral>(E) && 11185 cast<CharacterLiteral>(E)->getValue() == 0); 11186 }; 11187 11188 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 11189 SourceLocation CallLoc = Call->getRParenLoc(); 11190 SourceManager &SM = S.getSourceManager(); 11191 if (isLiteralZero(SizeArg) && 11192 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 11193 11194 SourceLocation DiagLoc = SizeArg->getExprLoc(); 11195 11196 // Some platforms #define bzero to __builtin_memset. See if this is the 11197 // case, and if so, emit a better diagnostic. 11198 if (BId == Builtin::BIbzero || 11199 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 11200 CallLoc, SM, S.getLangOpts()) == "bzero")) { 11201 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 11202 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 11203 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 11204 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 11205 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 11206 } 11207 return; 11208 } 11209 11210 // If the second argument to a memset is a sizeof expression and the third 11211 // isn't, this is also likely an error. This should catch 11212 // 'memset(buf, sizeof(buf), 0xff)'. 11213 if (BId == Builtin::BImemset && 11214 doesExprLikelyComputeSize(Call->getArg(1)) && 11215 !doesExprLikelyComputeSize(Call->getArg(2))) { 11216 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 11217 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 11218 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 11219 return; 11220 } 11221 } 11222 11223 /// Check for dangerous or invalid arguments to memset(). 11224 /// 11225 /// This issues warnings on known problematic, dangerous or unspecified 11226 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 11227 /// function calls. 11228 /// 11229 /// \param Call The call expression to diagnose. 11230 void Sema::CheckMemaccessArguments(const CallExpr *Call, 11231 unsigned BId, 11232 IdentifierInfo *FnName) { 11233 assert(BId != 0); 11234 11235 // It is possible to have a non-standard definition of memset. Validate 11236 // we have enough arguments, and if not, abort further checking. 11237 unsigned ExpectedNumArgs = 11238 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 11239 if (Call->getNumArgs() < ExpectedNumArgs) 11240 return; 11241 11242 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 11243 BId == Builtin::BIstrndup ? 1 : 2); 11244 unsigned LenArg = 11245 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 11246 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 11247 11248 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 11249 Call->getBeginLoc(), Call->getRParenLoc())) 11250 return; 11251 11252 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 11253 CheckMemaccessSize(*this, BId, Call); 11254 11255 // We have special checking when the length is a sizeof expression. 11256 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 11257 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 11258 llvm::FoldingSetNodeID SizeOfArgID; 11259 11260 // Although widely used, 'bzero' is not a standard function. Be more strict 11261 // with the argument types before allowing diagnostics and only allow the 11262 // form bzero(ptr, sizeof(...)). 11263 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 11264 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 11265 return; 11266 11267 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 11268 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 11269 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 11270 11271 QualType DestTy = Dest->getType(); 11272 QualType PointeeTy; 11273 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 11274 PointeeTy = DestPtrTy->getPointeeType(); 11275 11276 // Never warn about void type pointers. This can be used to suppress 11277 // false positives. 11278 if (PointeeTy->isVoidType()) 11279 continue; 11280 11281 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 11282 // actually comparing the expressions for equality. Because computing the 11283 // expression IDs can be expensive, we only do this if the diagnostic is 11284 // enabled. 11285 if (SizeOfArg && 11286 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 11287 SizeOfArg->getExprLoc())) { 11288 // We only compute IDs for expressions if the warning is enabled, and 11289 // cache the sizeof arg's ID. 11290 if (SizeOfArgID == llvm::FoldingSetNodeID()) 11291 SizeOfArg->Profile(SizeOfArgID, Context, true); 11292 llvm::FoldingSetNodeID DestID; 11293 Dest->Profile(DestID, Context, true); 11294 if (DestID == SizeOfArgID) { 11295 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 11296 // over sizeof(src) as well. 11297 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 11298 StringRef ReadableName = FnName->getName(); 11299 11300 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 11301 if (UnaryOp->getOpcode() == UO_AddrOf) 11302 ActionIdx = 1; // If its an address-of operator, just remove it. 11303 if (!PointeeTy->isIncompleteType() && 11304 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 11305 ActionIdx = 2; // If the pointee's size is sizeof(char), 11306 // suggest an explicit length. 11307 11308 // If the function is defined as a builtin macro, do not show macro 11309 // expansion. 11310 SourceLocation SL = SizeOfArg->getExprLoc(); 11311 SourceRange DSR = Dest->getSourceRange(); 11312 SourceRange SSR = SizeOfArg->getSourceRange(); 11313 SourceManager &SM = getSourceManager(); 11314 11315 if (SM.isMacroArgExpansion(SL)) { 11316 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 11317 SL = SM.getSpellingLoc(SL); 11318 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 11319 SM.getSpellingLoc(DSR.getEnd())); 11320 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 11321 SM.getSpellingLoc(SSR.getEnd())); 11322 } 11323 11324 DiagRuntimeBehavior(SL, SizeOfArg, 11325 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 11326 << ReadableName 11327 << PointeeTy 11328 << DestTy 11329 << DSR 11330 << SSR); 11331 DiagRuntimeBehavior(SL, SizeOfArg, 11332 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 11333 << ActionIdx 11334 << SSR); 11335 11336 break; 11337 } 11338 } 11339 11340 // Also check for cases where the sizeof argument is the exact same 11341 // type as the memory argument, and where it points to a user-defined 11342 // record type. 11343 if (SizeOfArgTy != QualType()) { 11344 if (PointeeTy->isRecordType() && 11345 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 11346 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 11347 PDiag(diag::warn_sizeof_pointer_type_memaccess) 11348 << FnName << SizeOfArgTy << ArgIdx 11349 << PointeeTy << Dest->getSourceRange() 11350 << LenExpr->getSourceRange()); 11351 break; 11352 } 11353 } 11354 } else if (DestTy->isArrayType()) { 11355 PointeeTy = DestTy; 11356 } 11357 11358 if (PointeeTy == QualType()) 11359 continue; 11360 11361 // Always complain about dynamic classes. 11362 bool IsContained; 11363 if (const CXXRecordDecl *ContainedRD = 11364 getContainedDynamicClass(PointeeTy, IsContained)) { 11365 11366 unsigned OperationType = 0; 11367 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp; 11368 // "overwritten" if we're warning about the destination for any call 11369 // but memcmp; otherwise a verb appropriate to the call. 11370 if (ArgIdx != 0 || IsCmp) { 11371 if (BId == Builtin::BImemcpy) 11372 OperationType = 1; 11373 else if(BId == Builtin::BImemmove) 11374 OperationType = 2; 11375 else if (IsCmp) 11376 OperationType = 3; 11377 } 11378 11379 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 11380 PDiag(diag::warn_dyn_class_memaccess) 11381 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName 11382 << IsContained << ContainedRD << OperationType 11383 << Call->getCallee()->getSourceRange()); 11384 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 11385 BId != Builtin::BImemset) 11386 DiagRuntimeBehavior( 11387 Dest->getExprLoc(), Dest, 11388 PDiag(diag::warn_arc_object_memaccess) 11389 << ArgIdx << FnName << PointeeTy 11390 << Call->getCallee()->getSourceRange()); 11391 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 11392 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 11393 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 11394 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 11395 PDiag(diag::warn_cstruct_memaccess) 11396 << ArgIdx << FnName << PointeeTy << 0); 11397 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 11398 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 11399 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 11400 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 11401 PDiag(diag::warn_cstruct_memaccess) 11402 << ArgIdx << FnName << PointeeTy << 1); 11403 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 11404 } else { 11405 continue; 11406 } 11407 } else 11408 continue; 11409 11410 DiagRuntimeBehavior( 11411 Dest->getExprLoc(), Dest, 11412 PDiag(diag::note_bad_memaccess_silence) 11413 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 11414 break; 11415 } 11416 } 11417 11418 // A little helper routine: ignore addition and subtraction of integer literals. 11419 // This intentionally does not ignore all integer constant expressions because 11420 // we don't want to remove sizeof(). 11421 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 11422 Ex = Ex->IgnoreParenCasts(); 11423 11424 while (true) { 11425 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 11426 if (!BO || !BO->isAdditiveOp()) 11427 break; 11428 11429 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 11430 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 11431 11432 if (isa<IntegerLiteral>(RHS)) 11433 Ex = LHS; 11434 else if (isa<IntegerLiteral>(LHS)) 11435 Ex = RHS; 11436 else 11437 break; 11438 } 11439 11440 return Ex; 11441 } 11442 11443 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 11444 ASTContext &Context) { 11445 // Only handle constant-sized or VLAs, but not flexible members. 11446 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 11447 // Only issue the FIXIT for arrays of size > 1. 11448 if (CAT->getSize().getSExtValue() <= 1) 11449 return false; 11450 } else if (!Ty->isVariableArrayType()) { 11451 return false; 11452 } 11453 return true; 11454 } 11455 11456 // Warn if the user has made the 'size' argument to strlcpy or strlcat 11457 // be the size of the source, instead of the destination. 11458 void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 11459 IdentifierInfo *FnName) { 11460 11461 // Don't crash if the user has the wrong number of arguments 11462 unsigned NumArgs = Call->getNumArgs(); 11463 if ((NumArgs != 3) && (NumArgs != 4)) 11464 return; 11465 11466 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 11467 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 11468 const Expr *CompareWithSrc = nullptr; 11469 11470 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 11471 Call->getBeginLoc(), Call->getRParenLoc())) 11472 return; 11473 11474 // Look for 'strlcpy(dst, x, sizeof(x))' 11475 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 11476 CompareWithSrc = Ex; 11477 else { 11478 // Look for 'strlcpy(dst, x, strlen(x))' 11479 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 11480 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 11481 SizeCall->getNumArgs() == 1) 11482 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 11483 } 11484 } 11485 11486 if (!CompareWithSrc) 11487 return; 11488 11489 // Determine if the argument to sizeof/strlen is equal to the source 11490 // argument. In principle there's all kinds of things you could do 11491 // here, for instance creating an == expression and evaluating it with 11492 // EvaluateAsBooleanCondition, but this uses a more direct technique: 11493 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 11494 if (!SrcArgDRE) 11495 return; 11496 11497 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 11498 if (!CompareWithSrcDRE || 11499 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 11500 return; 11501 11502 const Expr *OriginalSizeArg = Call->getArg(2); 11503 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 11504 << OriginalSizeArg->getSourceRange() << FnName; 11505 11506 // Output a FIXIT hint if the destination is an array (rather than a 11507 // pointer to an array). This could be enhanced to handle some 11508 // pointers if we know the actual size, like if DstArg is 'array+2' 11509 // we could say 'sizeof(array)-2'. 11510 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 11511 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 11512 return; 11513 11514 SmallString<128> sizeString; 11515 llvm::raw_svector_ostream OS(sizeString); 11516 OS << "sizeof("; 11517 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11518 OS << ")"; 11519 11520 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 11521 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 11522 OS.str()); 11523 } 11524 11525 /// Check if two expressions refer to the same declaration. 11526 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 11527 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 11528 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 11529 return D1->getDecl() == D2->getDecl(); 11530 return false; 11531 } 11532 11533 static const Expr *getStrlenExprArg(const Expr *E) { 11534 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 11535 const FunctionDecl *FD = CE->getDirectCallee(); 11536 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 11537 return nullptr; 11538 return CE->getArg(0)->IgnoreParenCasts(); 11539 } 11540 return nullptr; 11541 } 11542 11543 // Warn on anti-patterns as the 'size' argument to strncat. 11544 // The correct size argument should look like following: 11545 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 11546 void Sema::CheckStrncatArguments(const CallExpr *CE, 11547 IdentifierInfo *FnName) { 11548 // Don't crash if the user has the wrong number of arguments. 11549 if (CE->getNumArgs() < 3) 11550 return; 11551 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 11552 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 11553 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 11554 11555 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 11556 CE->getRParenLoc())) 11557 return; 11558 11559 // Identify common expressions, which are wrongly used as the size argument 11560 // to strncat and may lead to buffer overflows. 11561 unsigned PatternType = 0; 11562 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 11563 // - sizeof(dst) 11564 if (referToTheSameDecl(SizeOfArg, DstArg)) 11565 PatternType = 1; 11566 // - sizeof(src) 11567 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 11568 PatternType = 2; 11569 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 11570 if (BE->getOpcode() == BO_Sub) { 11571 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 11572 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 11573 // - sizeof(dst) - strlen(dst) 11574 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 11575 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 11576 PatternType = 1; 11577 // - sizeof(src) - (anything) 11578 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 11579 PatternType = 2; 11580 } 11581 } 11582 11583 if (PatternType == 0) 11584 return; 11585 11586 // Generate the diagnostic. 11587 SourceLocation SL = LenArg->getBeginLoc(); 11588 SourceRange SR = LenArg->getSourceRange(); 11589 SourceManager &SM = getSourceManager(); 11590 11591 // If the function is defined as a builtin macro, do not show macro expansion. 11592 if (SM.isMacroArgExpansion(SL)) { 11593 SL = SM.getSpellingLoc(SL); 11594 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 11595 SM.getSpellingLoc(SR.getEnd())); 11596 } 11597 11598 // Check if the destination is an array (rather than a pointer to an array). 11599 QualType DstTy = DstArg->getType(); 11600 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 11601 Context); 11602 if (!isKnownSizeArray) { 11603 if (PatternType == 1) 11604 Diag(SL, diag::warn_strncat_wrong_size) << SR; 11605 else 11606 Diag(SL, diag::warn_strncat_src_size) << SR; 11607 return; 11608 } 11609 11610 if (PatternType == 1) 11611 Diag(SL, diag::warn_strncat_large_size) << SR; 11612 else 11613 Diag(SL, diag::warn_strncat_src_size) << SR; 11614 11615 SmallString<128> sizeString; 11616 llvm::raw_svector_ostream OS(sizeString); 11617 OS << "sizeof("; 11618 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11619 OS << ") - "; 11620 OS << "strlen("; 11621 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11622 OS << ") - 1"; 11623 11624 Diag(SL, diag::note_strncat_wrong_size) 11625 << FixItHint::CreateReplacement(SR, OS.str()); 11626 } 11627 11628 namespace { 11629 void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName, 11630 const UnaryOperator *UnaryExpr, const Decl *D) { 11631 if (isa<FieldDecl, FunctionDecl, VarDecl>(D)) { 11632 S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object) 11633 << CalleeName << 0 /*object: */ << cast<NamedDecl>(D); 11634 return; 11635 } 11636 } 11637 11638 void CheckFreeArgumentsAddressof(Sema &S, const std::string &CalleeName, 11639 const UnaryOperator *UnaryExpr) { 11640 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(UnaryExpr->getSubExpr())) { 11641 const Decl *D = Lvalue->getDecl(); 11642 if (isa<DeclaratorDecl>(D)) 11643 if (!dyn_cast<DeclaratorDecl>(D)->getType()->isReferenceType()) 11644 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, D); 11645 } 11646 11647 if (const auto *Lvalue = dyn_cast<MemberExpr>(UnaryExpr->getSubExpr())) 11648 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, 11649 Lvalue->getMemberDecl()); 11650 } 11651 11652 void CheckFreeArgumentsPlus(Sema &S, const std::string &CalleeName, 11653 const UnaryOperator *UnaryExpr) { 11654 const auto *Lambda = dyn_cast<LambdaExpr>( 11655 UnaryExpr->getSubExpr()->IgnoreImplicitAsWritten()->IgnoreParens()); 11656 if (!Lambda) 11657 return; 11658 11659 S.Diag(Lambda->getBeginLoc(), diag::warn_free_nonheap_object) 11660 << CalleeName << 2 /*object: lambda expression*/; 11661 } 11662 11663 void CheckFreeArgumentsStackArray(Sema &S, const std::string &CalleeName, 11664 const DeclRefExpr *Lvalue) { 11665 const auto *Var = dyn_cast<VarDecl>(Lvalue->getDecl()); 11666 if (Var == nullptr) 11667 return; 11668 11669 S.Diag(Lvalue->getBeginLoc(), diag::warn_free_nonheap_object) 11670 << CalleeName << 0 /*object: */ << Var; 11671 } 11672 11673 void CheckFreeArgumentsCast(Sema &S, const std::string &CalleeName, 11674 const CastExpr *Cast) { 11675 SmallString<128> SizeString; 11676 llvm::raw_svector_ostream OS(SizeString); 11677 11678 clang::CastKind Kind = Cast->getCastKind(); 11679 if (Kind == clang::CK_BitCast && 11680 !Cast->getSubExpr()->getType()->isFunctionPointerType()) 11681 return; 11682 if (Kind == clang::CK_IntegralToPointer && 11683 !isa<IntegerLiteral>( 11684 Cast->getSubExpr()->IgnoreParenImpCasts()->IgnoreParens())) 11685 return; 11686 11687 switch (Cast->getCastKind()) { 11688 case clang::CK_BitCast: 11689 case clang::CK_IntegralToPointer: 11690 case clang::CK_FunctionToPointerDecay: 11691 OS << '\''; 11692 Cast->printPretty(OS, nullptr, S.getPrintingPolicy()); 11693 OS << '\''; 11694 break; 11695 default: 11696 return; 11697 } 11698 11699 S.Diag(Cast->getBeginLoc(), diag::warn_free_nonheap_object) 11700 << CalleeName << 0 /*object: */ << OS.str(); 11701 } 11702 } // namespace 11703 11704 /// Alerts the user that they are attempting to free a non-malloc'd object. 11705 void Sema::CheckFreeArguments(const CallExpr *E) { 11706 const std::string CalleeName = 11707 cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString(); 11708 11709 { // Prefer something that doesn't involve a cast to make things simpler. 11710 const Expr *Arg = E->getArg(0)->IgnoreParenCasts(); 11711 if (const auto *UnaryExpr = dyn_cast<UnaryOperator>(Arg)) 11712 switch (UnaryExpr->getOpcode()) { 11713 case UnaryOperator::Opcode::UO_AddrOf: 11714 return CheckFreeArgumentsAddressof(*this, CalleeName, UnaryExpr); 11715 case UnaryOperator::Opcode::UO_Plus: 11716 return CheckFreeArgumentsPlus(*this, CalleeName, UnaryExpr); 11717 default: 11718 break; 11719 } 11720 11721 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Arg)) 11722 if (Lvalue->getType()->isArrayType()) 11723 return CheckFreeArgumentsStackArray(*this, CalleeName, Lvalue); 11724 11725 if (const auto *Label = dyn_cast<AddrLabelExpr>(Arg)) { 11726 Diag(Label->getBeginLoc(), diag::warn_free_nonheap_object) 11727 << CalleeName << 0 /*object: */ << Label->getLabel()->getIdentifier(); 11728 return; 11729 } 11730 11731 if (isa<BlockExpr>(Arg)) { 11732 Diag(Arg->getBeginLoc(), diag::warn_free_nonheap_object) 11733 << CalleeName << 1 /*object: block*/; 11734 return; 11735 } 11736 } 11737 // Maybe the cast was important, check after the other cases. 11738 if (const auto *Cast = dyn_cast<CastExpr>(E->getArg(0))) 11739 return CheckFreeArgumentsCast(*this, CalleeName, Cast); 11740 } 11741 11742 void 11743 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 11744 SourceLocation ReturnLoc, 11745 bool isObjCMethod, 11746 const AttrVec *Attrs, 11747 const FunctionDecl *FD) { 11748 // Check if the return value is null but should not be. 11749 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 11750 (!isObjCMethod && isNonNullType(Context, lhsType))) && 11751 CheckNonNullExpr(*this, RetValExp)) 11752 Diag(ReturnLoc, diag::warn_null_ret) 11753 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 11754 11755 // C++11 [basic.stc.dynamic.allocation]p4: 11756 // If an allocation function declared with a non-throwing 11757 // exception-specification fails to allocate storage, it shall return 11758 // a null pointer. Any other allocation function that fails to allocate 11759 // storage shall indicate failure only by throwing an exception [...] 11760 if (FD) { 11761 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 11762 if (Op == OO_New || Op == OO_Array_New) { 11763 const FunctionProtoType *Proto 11764 = FD->getType()->castAs<FunctionProtoType>(); 11765 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 11766 CheckNonNullExpr(*this, RetValExp)) 11767 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 11768 << FD << getLangOpts().CPlusPlus11; 11769 } 11770 } 11771 11772 // PPC MMA non-pointer types are not allowed as return type. Checking the type 11773 // here prevent the user from using a PPC MMA type as trailing return type. 11774 if (Context.getTargetInfo().getTriple().isPPC64()) 11775 CheckPPCMMAType(RetValExp->getType(), ReturnLoc); 11776 } 11777 11778 /// Check for comparisons of floating-point values using == and !=. Issue a 11779 /// warning if the comparison is not likely to do what the programmer intended. 11780 void Sema::CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS, 11781 BinaryOperatorKind Opcode) { 11782 // Match and capture subexpressions such as "(float) X == 0.1". 11783 FloatingLiteral *FPLiteral; 11784 CastExpr *FPCast; 11785 auto getCastAndLiteral = [&FPLiteral, &FPCast](Expr *L, Expr *R) { 11786 FPLiteral = dyn_cast<FloatingLiteral>(L->IgnoreParens()); 11787 FPCast = dyn_cast<CastExpr>(R->IgnoreParens()); 11788 return FPLiteral && FPCast; 11789 }; 11790 11791 if (getCastAndLiteral(LHS, RHS) || getCastAndLiteral(RHS, LHS)) { 11792 auto *SourceTy = FPCast->getSubExpr()->getType()->getAs<BuiltinType>(); 11793 auto *TargetTy = FPLiteral->getType()->getAs<BuiltinType>(); 11794 if (SourceTy && TargetTy && SourceTy->isFloatingPoint() && 11795 TargetTy->isFloatingPoint()) { 11796 bool Lossy; 11797 llvm::APFloat TargetC = FPLiteral->getValue(); 11798 TargetC.convert(Context.getFloatTypeSemantics(QualType(SourceTy, 0)), 11799 llvm::APFloat::rmNearestTiesToEven, &Lossy); 11800 if (Lossy) { 11801 // If the literal cannot be represented in the source type, then a 11802 // check for == is always false and check for != is always true. 11803 Diag(Loc, diag::warn_float_compare_literal) 11804 << (Opcode == BO_EQ) << QualType(SourceTy, 0) 11805 << LHS->getSourceRange() << RHS->getSourceRange(); 11806 return; 11807 } 11808 } 11809 } 11810 11811 // Match a more general floating-point equality comparison (-Wfloat-equal). 11812 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 11813 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 11814 11815 // Special case: check for x == x (which is OK). 11816 // Do not emit warnings for such cases. 11817 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 11818 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 11819 if (DRL->getDecl() == DRR->getDecl()) 11820 return; 11821 11822 // Special case: check for comparisons against literals that can be exactly 11823 // represented by APFloat. In such cases, do not emit a warning. This 11824 // is a heuristic: often comparison against such literals are used to 11825 // detect if a value in a variable has not changed. This clearly can 11826 // lead to false negatives. 11827 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 11828 if (FLL->isExact()) 11829 return; 11830 } else 11831 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 11832 if (FLR->isExact()) 11833 return; 11834 11835 // Check for comparisons with builtin types. 11836 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 11837 if (CL->getBuiltinCallee()) 11838 return; 11839 11840 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 11841 if (CR->getBuiltinCallee()) 11842 return; 11843 11844 // Emit the diagnostic. 11845 Diag(Loc, diag::warn_floatingpoint_eq) 11846 << LHS->getSourceRange() << RHS->getSourceRange(); 11847 } 11848 11849 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 11850 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 11851 11852 namespace { 11853 11854 /// Structure recording the 'active' range of an integer-valued 11855 /// expression. 11856 struct IntRange { 11857 /// The number of bits active in the int. Note that this includes exactly one 11858 /// sign bit if !NonNegative. 11859 unsigned Width; 11860 11861 /// True if the int is known not to have negative values. If so, all leading 11862 /// bits before Width are known zero, otherwise they are known to be the 11863 /// same as the MSB within Width. 11864 bool NonNegative; 11865 11866 IntRange(unsigned Width, bool NonNegative) 11867 : Width(Width), NonNegative(NonNegative) {} 11868 11869 /// Number of bits excluding the sign bit. 11870 unsigned valueBits() const { 11871 return NonNegative ? Width : Width - 1; 11872 } 11873 11874 /// Returns the range of the bool type. 11875 static IntRange forBoolType() { 11876 return IntRange(1, true); 11877 } 11878 11879 /// Returns the range of an opaque value of the given integral type. 11880 static IntRange forValueOfType(ASTContext &C, QualType T) { 11881 return forValueOfCanonicalType(C, 11882 T->getCanonicalTypeInternal().getTypePtr()); 11883 } 11884 11885 /// Returns the range of an opaque value of a canonical integral type. 11886 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 11887 assert(T->isCanonicalUnqualified()); 11888 11889 if (const VectorType *VT = dyn_cast<VectorType>(T)) 11890 T = VT->getElementType().getTypePtr(); 11891 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 11892 T = CT->getElementType().getTypePtr(); 11893 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 11894 T = AT->getValueType().getTypePtr(); 11895 11896 if (!C.getLangOpts().CPlusPlus) { 11897 // For enum types in C code, use the underlying datatype. 11898 if (const EnumType *ET = dyn_cast<EnumType>(T)) 11899 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 11900 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 11901 // For enum types in C++, use the known bit width of the enumerators. 11902 EnumDecl *Enum = ET->getDecl(); 11903 // In C++11, enums can have a fixed underlying type. Use this type to 11904 // compute the range. 11905 if (Enum->isFixed()) { 11906 return IntRange(C.getIntWidth(QualType(T, 0)), 11907 !ET->isSignedIntegerOrEnumerationType()); 11908 } 11909 11910 unsigned NumPositive = Enum->getNumPositiveBits(); 11911 unsigned NumNegative = Enum->getNumNegativeBits(); 11912 11913 if (NumNegative == 0) 11914 return IntRange(NumPositive, true/*NonNegative*/); 11915 else 11916 return IntRange(std::max(NumPositive + 1, NumNegative), 11917 false/*NonNegative*/); 11918 } 11919 11920 if (const auto *EIT = dyn_cast<BitIntType>(T)) 11921 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 11922 11923 const BuiltinType *BT = cast<BuiltinType>(T); 11924 assert(BT->isInteger()); 11925 11926 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 11927 } 11928 11929 /// Returns the "target" range of a canonical integral type, i.e. 11930 /// the range of values expressible in the type. 11931 /// 11932 /// This matches forValueOfCanonicalType except that enums have the 11933 /// full range of their type, not the range of their enumerators. 11934 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 11935 assert(T->isCanonicalUnqualified()); 11936 11937 if (const VectorType *VT = dyn_cast<VectorType>(T)) 11938 T = VT->getElementType().getTypePtr(); 11939 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 11940 T = CT->getElementType().getTypePtr(); 11941 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 11942 T = AT->getValueType().getTypePtr(); 11943 if (const EnumType *ET = dyn_cast<EnumType>(T)) 11944 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 11945 11946 if (const auto *EIT = dyn_cast<BitIntType>(T)) 11947 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 11948 11949 const BuiltinType *BT = cast<BuiltinType>(T); 11950 assert(BT->isInteger()); 11951 11952 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 11953 } 11954 11955 /// Returns the supremum of two ranges: i.e. their conservative merge. 11956 static IntRange join(IntRange L, IntRange R) { 11957 bool Unsigned = L.NonNegative && R.NonNegative; 11958 return IntRange(std::max(L.valueBits(), R.valueBits()) + !Unsigned, 11959 L.NonNegative && R.NonNegative); 11960 } 11961 11962 /// Return the range of a bitwise-AND of the two ranges. 11963 static IntRange bit_and(IntRange L, IntRange R) { 11964 unsigned Bits = std::max(L.Width, R.Width); 11965 bool NonNegative = false; 11966 if (L.NonNegative) { 11967 Bits = std::min(Bits, L.Width); 11968 NonNegative = true; 11969 } 11970 if (R.NonNegative) { 11971 Bits = std::min(Bits, R.Width); 11972 NonNegative = true; 11973 } 11974 return IntRange(Bits, NonNegative); 11975 } 11976 11977 /// Return the range of a sum of the two ranges. 11978 static IntRange sum(IntRange L, IntRange R) { 11979 bool Unsigned = L.NonNegative && R.NonNegative; 11980 return IntRange(std::max(L.valueBits(), R.valueBits()) + 1 + !Unsigned, 11981 Unsigned); 11982 } 11983 11984 /// Return the range of a difference of the two ranges. 11985 static IntRange difference(IntRange L, IntRange R) { 11986 // We need a 1-bit-wider range if: 11987 // 1) LHS can be negative: least value can be reduced. 11988 // 2) RHS can be negative: greatest value can be increased. 11989 bool CanWiden = !L.NonNegative || !R.NonNegative; 11990 bool Unsigned = L.NonNegative && R.Width == 0; 11991 return IntRange(std::max(L.valueBits(), R.valueBits()) + CanWiden + 11992 !Unsigned, 11993 Unsigned); 11994 } 11995 11996 /// Return the range of a product of the two ranges. 11997 static IntRange product(IntRange L, IntRange R) { 11998 // If both LHS and RHS can be negative, we can form 11999 // -2^L * -2^R = 2^(L + R) 12000 // which requires L + R + 1 value bits to represent. 12001 bool CanWiden = !L.NonNegative && !R.NonNegative; 12002 bool Unsigned = L.NonNegative && R.NonNegative; 12003 return IntRange(L.valueBits() + R.valueBits() + CanWiden + !Unsigned, 12004 Unsigned); 12005 } 12006 12007 /// Return the range of a remainder operation between the two ranges. 12008 static IntRange rem(IntRange L, IntRange R) { 12009 // The result of a remainder can't be larger than the result of 12010 // either side. The sign of the result is the sign of the LHS. 12011 bool Unsigned = L.NonNegative; 12012 return IntRange(std::min(L.valueBits(), R.valueBits()) + !Unsigned, 12013 Unsigned); 12014 } 12015 }; 12016 12017 } // namespace 12018 12019 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 12020 unsigned MaxWidth) { 12021 if (value.isSigned() && value.isNegative()) 12022 return IntRange(value.getMinSignedBits(), false); 12023 12024 if (value.getBitWidth() > MaxWidth) 12025 value = value.trunc(MaxWidth); 12026 12027 // isNonNegative() just checks the sign bit without considering 12028 // signedness. 12029 return IntRange(value.getActiveBits(), true); 12030 } 12031 12032 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 12033 unsigned MaxWidth) { 12034 if (result.isInt()) 12035 return GetValueRange(C, result.getInt(), MaxWidth); 12036 12037 if (result.isVector()) { 12038 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 12039 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 12040 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 12041 R = IntRange::join(R, El); 12042 } 12043 return R; 12044 } 12045 12046 if (result.isComplexInt()) { 12047 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 12048 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 12049 return IntRange::join(R, I); 12050 } 12051 12052 // This can happen with lossless casts to intptr_t of "based" lvalues. 12053 // Assume it might use arbitrary bits. 12054 // FIXME: The only reason we need to pass the type in here is to get 12055 // the sign right on this one case. It would be nice if APValue 12056 // preserved this. 12057 assert(result.isLValue() || result.isAddrLabelDiff()); 12058 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 12059 } 12060 12061 static QualType GetExprType(const Expr *E) { 12062 QualType Ty = E->getType(); 12063 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 12064 Ty = AtomicRHS->getValueType(); 12065 return Ty; 12066 } 12067 12068 /// Pseudo-evaluate the given integer expression, estimating the 12069 /// range of values it might take. 12070 /// 12071 /// \param MaxWidth The width to which the value will be truncated. 12072 /// \param Approximate If \c true, return a likely range for the result: in 12073 /// particular, assume that arithmetic on narrower types doesn't leave 12074 /// those types. If \c false, return a range including all possible 12075 /// result values. 12076 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth, 12077 bool InConstantContext, bool Approximate) { 12078 E = E->IgnoreParens(); 12079 12080 // Try a full evaluation first. 12081 Expr::EvalResult result; 12082 if (E->EvaluateAsRValue(result, C, InConstantContext)) 12083 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 12084 12085 // I think we only want to look through implicit casts here; if the 12086 // user has an explicit widening cast, we should treat the value as 12087 // being of the new, wider type. 12088 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 12089 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 12090 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext, 12091 Approximate); 12092 12093 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 12094 12095 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 12096 CE->getCastKind() == CK_BooleanToSignedIntegral; 12097 12098 // Assume that non-integer casts can span the full range of the type. 12099 if (!isIntegerCast) 12100 return OutputTypeRange; 12101 12102 IntRange SubRange = GetExprRange(C, CE->getSubExpr(), 12103 std::min(MaxWidth, OutputTypeRange.Width), 12104 InConstantContext, Approximate); 12105 12106 // Bail out if the subexpr's range is as wide as the cast type. 12107 if (SubRange.Width >= OutputTypeRange.Width) 12108 return OutputTypeRange; 12109 12110 // Otherwise, we take the smaller width, and we're non-negative if 12111 // either the output type or the subexpr is. 12112 return IntRange(SubRange.Width, 12113 SubRange.NonNegative || OutputTypeRange.NonNegative); 12114 } 12115 12116 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 12117 // If we can fold the condition, just take that operand. 12118 bool CondResult; 12119 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 12120 return GetExprRange(C, 12121 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(), 12122 MaxWidth, InConstantContext, Approximate); 12123 12124 // Otherwise, conservatively merge. 12125 // GetExprRange requires an integer expression, but a throw expression 12126 // results in a void type. 12127 Expr *E = CO->getTrueExpr(); 12128 IntRange L = E->getType()->isVoidType() 12129 ? IntRange{0, true} 12130 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 12131 E = CO->getFalseExpr(); 12132 IntRange R = E->getType()->isVoidType() 12133 ? IntRange{0, true} 12134 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 12135 return IntRange::join(L, R); 12136 } 12137 12138 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 12139 IntRange (*Combine)(IntRange, IntRange) = IntRange::join; 12140 12141 switch (BO->getOpcode()) { 12142 case BO_Cmp: 12143 llvm_unreachable("builtin <=> should have class type"); 12144 12145 // Boolean-valued operations are single-bit and positive. 12146 case BO_LAnd: 12147 case BO_LOr: 12148 case BO_LT: 12149 case BO_GT: 12150 case BO_LE: 12151 case BO_GE: 12152 case BO_EQ: 12153 case BO_NE: 12154 return IntRange::forBoolType(); 12155 12156 // The type of the assignments is the type of the LHS, so the RHS 12157 // is not necessarily the same type. 12158 case BO_MulAssign: 12159 case BO_DivAssign: 12160 case BO_RemAssign: 12161 case BO_AddAssign: 12162 case BO_SubAssign: 12163 case BO_XorAssign: 12164 case BO_OrAssign: 12165 // TODO: bitfields? 12166 return IntRange::forValueOfType(C, GetExprType(E)); 12167 12168 // Simple assignments just pass through the RHS, which will have 12169 // been coerced to the LHS type. 12170 case BO_Assign: 12171 // TODO: bitfields? 12172 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 12173 Approximate); 12174 12175 // Operations with opaque sources are black-listed. 12176 case BO_PtrMemD: 12177 case BO_PtrMemI: 12178 return IntRange::forValueOfType(C, GetExprType(E)); 12179 12180 // Bitwise-and uses the *infinum* of the two source ranges. 12181 case BO_And: 12182 case BO_AndAssign: 12183 Combine = IntRange::bit_and; 12184 break; 12185 12186 // Left shift gets black-listed based on a judgement call. 12187 case BO_Shl: 12188 // ...except that we want to treat '1 << (blah)' as logically 12189 // positive. It's an important idiom. 12190 if (IntegerLiteral *I 12191 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 12192 if (I->getValue() == 1) { 12193 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 12194 return IntRange(R.Width, /*NonNegative*/ true); 12195 } 12196 } 12197 LLVM_FALLTHROUGH; 12198 12199 case BO_ShlAssign: 12200 return IntRange::forValueOfType(C, GetExprType(E)); 12201 12202 // Right shift by a constant can narrow its left argument. 12203 case BO_Shr: 12204 case BO_ShrAssign: { 12205 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext, 12206 Approximate); 12207 12208 // If the shift amount is a positive constant, drop the width by 12209 // that much. 12210 if (Optional<llvm::APSInt> shift = 12211 BO->getRHS()->getIntegerConstantExpr(C)) { 12212 if (shift->isNonNegative()) { 12213 unsigned zext = shift->getZExtValue(); 12214 if (zext >= L.Width) 12215 L.Width = (L.NonNegative ? 0 : 1); 12216 else 12217 L.Width -= zext; 12218 } 12219 } 12220 12221 return L; 12222 } 12223 12224 // Comma acts as its right operand. 12225 case BO_Comma: 12226 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 12227 Approximate); 12228 12229 case BO_Add: 12230 if (!Approximate) 12231 Combine = IntRange::sum; 12232 break; 12233 12234 case BO_Sub: 12235 if (BO->getLHS()->getType()->isPointerType()) 12236 return IntRange::forValueOfType(C, GetExprType(E)); 12237 if (!Approximate) 12238 Combine = IntRange::difference; 12239 break; 12240 12241 case BO_Mul: 12242 if (!Approximate) 12243 Combine = IntRange::product; 12244 break; 12245 12246 // The width of a division result is mostly determined by the size 12247 // of the LHS. 12248 case BO_Div: { 12249 // Don't 'pre-truncate' the operands. 12250 unsigned opWidth = C.getIntWidth(GetExprType(E)); 12251 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, 12252 Approximate); 12253 12254 // If the divisor is constant, use that. 12255 if (Optional<llvm::APSInt> divisor = 12256 BO->getRHS()->getIntegerConstantExpr(C)) { 12257 unsigned log2 = divisor->logBase2(); // floor(log_2(divisor)) 12258 if (log2 >= L.Width) 12259 L.Width = (L.NonNegative ? 0 : 1); 12260 else 12261 L.Width = std::min(L.Width - log2, MaxWidth); 12262 return L; 12263 } 12264 12265 // Otherwise, just use the LHS's width. 12266 // FIXME: This is wrong if the LHS could be its minimal value and the RHS 12267 // could be -1. 12268 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, 12269 Approximate); 12270 return IntRange(L.Width, L.NonNegative && R.NonNegative); 12271 } 12272 12273 case BO_Rem: 12274 Combine = IntRange::rem; 12275 break; 12276 12277 // The default behavior is okay for these. 12278 case BO_Xor: 12279 case BO_Or: 12280 break; 12281 } 12282 12283 // Combine the two ranges, but limit the result to the type in which we 12284 // performed the computation. 12285 QualType T = GetExprType(E); 12286 unsigned opWidth = C.getIntWidth(T); 12287 IntRange L = 12288 GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, Approximate); 12289 IntRange R = 12290 GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, Approximate); 12291 IntRange C = Combine(L, R); 12292 C.NonNegative |= T->isUnsignedIntegerOrEnumerationType(); 12293 C.Width = std::min(C.Width, MaxWidth); 12294 return C; 12295 } 12296 12297 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 12298 switch (UO->getOpcode()) { 12299 // Boolean-valued operations are white-listed. 12300 case UO_LNot: 12301 return IntRange::forBoolType(); 12302 12303 // Operations with opaque sources are black-listed. 12304 case UO_Deref: 12305 case UO_AddrOf: // should be impossible 12306 return IntRange::forValueOfType(C, GetExprType(E)); 12307 12308 default: 12309 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext, 12310 Approximate); 12311 } 12312 } 12313 12314 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 12315 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext, 12316 Approximate); 12317 12318 if (const auto *BitField = E->getSourceBitField()) 12319 return IntRange(BitField->getBitWidthValue(C), 12320 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 12321 12322 return IntRange::forValueOfType(C, GetExprType(E)); 12323 } 12324 12325 static IntRange GetExprRange(ASTContext &C, const Expr *E, 12326 bool InConstantContext, bool Approximate) { 12327 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext, 12328 Approximate); 12329 } 12330 12331 /// Checks whether the given value, which currently has the given 12332 /// source semantics, has the same value when coerced through the 12333 /// target semantics. 12334 static bool IsSameFloatAfterCast(const llvm::APFloat &value, 12335 const llvm::fltSemantics &Src, 12336 const llvm::fltSemantics &Tgt) { 12337 llvm::APFloat truncated = value; 12338 12339 bool ignored; 12340 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 12341 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 12342 12343 return truncated.bitwiseIsEqual(value); 12344 } 12345 12346 /// Checks whether the given value, which currently has the given 12347 /// source semantics, has the same value when coerced through the 12348 /// target semantics. 12349 /// 12350 /// The value might be a vector of floats (or a complex number). 12351 static bool IsSameFloatAfterCast(const APValue &value, 12352 const llvm::fltSemantics &Src, 12353 const llvm::fltSemantics &Tgt) { 12354 if (value.isFloat()) 12355 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 12356 12357 if (value.isVector()) { 12358 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 12359 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 12360 return false; 12361 return true; 12362 } 12363 12364 assert(value.isComplexFloat()); 12365 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 12366 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 12367 } 12368 12369 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC, 12370 bool IsListInit = false); 12371 12372 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 12373 // Suppress cases where we are comparing against an enum constant. 12374 if (const DeclRefExpr *DR = 12375 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 12376 if (isa<EnumConstantDecl>(DR->getDecl())) 12377 return true; 12378 12379 // Suppress cases where the value is expanded from a macro, unless that macro 12380 // is how a language represents a boolean literal. This is the case in both C 12381 // and Objective-C. 12382 SourceLocation BeginLoc = E->getBeginLoc(); 12383 if (BeginLoc.isMacroID()) { 12384 StringRef MacroName = Lexer::getImmediateMacroName( 12385 BeginLoc, S.getSourceManager(), S.getLangOpts()); 12386 return MacroName != "YES" && MacroName != "NO" && 12387 MacroName != "true" && MacroName != "false"; 12388 } 12389 12390 return false; 12391 } 12392 12393 static bool isKnownToHaveUnsignedValue(Expr *E) { 12394 return E->getType()->isIntegerType() && 12395 (!E->getType()->isSignedIntegerType() || 12396 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 12397 } 12398 12399 namespace { 12400 /// The promoted range of values of a type. In general this has the 12401 /// following structure: 12402 /// 12403 /// |-----------| . . . |-----------| 12404 /// ^ ^ ^ ^ 12405 /// Min HoleMin HoleMax Max 12406 /// 12407 /// ... where there is only a hole if a signed type is promoted to unsigned 12408 /// (in which case Min and Max are the smallest and largest representable 12409 /// values). 12410 struct PromotedRange { 12411 // Min, or HoleMax if there is a hole. 12412 llvm::APSInt PromotedMin; 12413 // Max, or HoleMin if there is a hole. 12414 llvm::APSInt PromotedMax; 12415 12416 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 12417 if (R.Width == 0) 12418 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 12419 else if (R.Width >= BitWidth && !Unsigned) { 12420 // Promotion made the type *narrower*. This happens when promoting 12421 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 12422 // Treat all values of 'signed int' as being in range for now. 12423 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 12424 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 12425 } else { 12426 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 12427 .extOrTrunc(BitWidth); 12428 PromotedMin.setIsUnsigned(Unsigned); 12429 12430 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 12431 .extOrTrunc(BitWidth); 12432 PromotedMax.setIsUnsigned(Unsigned); 12433 } 12434 } 12435 12436 // Determine whether this range is contiguous (has no hole). 12437 bool isContiguous() const { return PromotedMin <= PromotedMax; } 12438 12439 // Where a constant value is within the range. 12440 enum ComparisonResult { 12441 LT = 0x1, 12442 LE = 0x2, 12443 GT = 0x4, 12444 GE = 0x8, 12445 EQ = 0x10, 12446 NE = 0x20, 12447 InRangeFlag = 0x40, 12448 12449 Less = LE | LT | NE, 12450 Min = LE | InRangeFlag, 12451 InRange = InRangeFlag, 12452 Max = GE | InRangeFlag, 12453 Greater = GE | GT | NE, 12454 12455 OnlyValue = LE | GE | EQ | InRangeFlag, 12456 InHole = NE 12457 }; 12458 12459 ComparisonResult compare(const llvm::APSInt &Value) const { 12460 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 12461 Value.isUnsigned() == PromotedMin.isUnsigned()); 12462 if (!isContiguous()) { 12463 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 12464 if (Value.isMinValue()) return Min; 12465 if (Value.isMaxValue()) return Max; 12466 if (Value >= PromotedMin) return InRange; 12467 if (Value <= PromotedMax) return InRange; 12468 return InHole; 12469 } 12470 12471 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 12472 case -1: return Less; 12473 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 12474 case 1: 12475 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 12476 case -1: return InRange; 12477 case 0: return Max; 12478 case 1: return Greater; 12479 } 12480 } 12481 12482 llvm_unreachable("impossible compare result"); 12483 } 12484 12485 static llvm::Optional<StringRef> 12486 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 12487 if (Op == BO_Cmp) { 12488 ComparisonResult LTFlag = LT, GTFlag = GT; 12489 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 12490 12491 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 12492 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 12493 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 12494 return llvm::None; 12495 } 12496 12497 ComparisonResult TrueFlag, FalseFlag; 12498 if (Op == BO_EQ) { 12499 TrueFlag = EQ; 12500 FalseFlag = NE; 12501 } else if (Op == BO_NE) { 12502 TrueFlag = NE; 12503 FalseFlag = EQ; 12504 } else { 12505 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 12506 TrueFlag = LT; 12507 FalseFlag = GE; 12508 } else { 12509 TrueFlag = GT; 12510 FalseFlag = LE; 12511 } 12512 if (Op == BO_GE || Op == BO_LE) 12513 std::swap(TrueFlag, FalseFlag); 12514 } 12515 if (R & TrueFlag) 12516 return StringRef("true"); 12517 if (R & FalseFlag) 12518 return StringRef("false"); 12519 return llvm::None; 12520 } 12521 }; 12522 } 12523 12524 static bool HasEnumType(Expr *E) { 12525 // Strip off implicit integral promotions. 12526 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 12527 if (ICE->getCastKind() != CK_IntegralCast && 12528 ICE->getCastKind() != CK_NoOp) 12529 break; 12530 E = ICE->getSubExpr(); 12531 } 12532 12533 return E->getType()->isEnumeralType(); 12534 } 12535 12536 static int classifyConstantValue(Expr *Constant) { 12537 // The values of this enumeration are used in the diagnostics 12538 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 12539 enum ConstantValueKind { 12540 Miscellaneous = 0, 12541 LiteralTrue, 12542 LiteralFalse 12543 }; 12544 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 12545 return BL->getValue() ? ConstantValueKind::LiteralTrue 12546 : ConstantValueKind::LiteralFalse; 12547 return ConstantValueKind::Miscellaneous; 12548 } 12549 12550 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 12551 Expr *Constant, Expr *Other, 12552 const llvm::APSInt &Value, 12553 bool RhsConstant) { 12554 if (S.inTemplateInstantiation()) 12555 return false; 12556 12557 Expr *OriginalOther = Other; 12558 12559 Constant = Constant->IgnoreParenImpCasts(); 12560 Other = Other->IgnoreParenImpCasts(); 12561 12562 // Suppress warnings on tautological comparisons between values of the same 12563 // enumeration type. There are only two ways we could warn on this: 12564 // - If the constant is outside the range of representable values of 12565 // the enumeration. In such a case, we should warn about the cast 12566 // to enumeration type, not about the comparison. 12567 // - If the constant is the maximum / minimum in-range value. For an 12568 // enumeratin type, such comparisons can be meaningful and useful. 12569 if (Constant->getType()->isEnumeralType() && 12570 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 12571 return false; 12572 12573 IntRange OtherValueRange = GetExprRange( 12574 S.Context, Other, S.isConstantEvaluated(), /*Approximate*/ false); 12575 12576 QualType OtherT = Other->getType(); 12577 if (const auto *AT = OtherT->getAs<AtomicType>()) 12578 OtherT = AT->getValueType(); 12579 IntRange OtherTypeRange = IntRange::forValueOfType(S.Context, OtherT); 12580 12581 // Special case for ObjC BOOL on targets where its a typedef for a signed char 12582 // (Namely, macOS). FIXME: IntRange::forValueOfType should do this. 12583 bool IsObjCSignedCharBool = S.getLangOpts().ObjC && 12584 S.NSAPIObj->isObjCBOOLType(OtherT) && 12585 OtherT->isSpecificBuiltinType(BuiltinType::SChar); 12586 12587 // Whether we're treating Other as being a bool because of the form of 12588 // expression despite it having another type (typically 'int' in C). 12589 bool OtherIsBooleanDespiteType = 12590 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 12591 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool) 12592 OtherTypeRange = OtherValueRange = IntRange::forBoolType(); 12593 12594 // Check if all values in the range of possible values of this expression 12595 // lead to the same comparison outcome. 12596 PromotedRange OtherPromotedValueRange(OtherValueRange, Value.getBitWidth(), 12597 Value.isUnsigned()); 12598 auto Cmp = OtherPromotedValueRange.compare(Value); 12599 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 12600 if (!Result) 12601 return false; 12602 12603 // Also consider the range determined by the type alone. This allows us to 12604 // classify the warning under the proper diagnostic group. 12605 bool TautologicalTypeCompare = false; 12606 { 12607 PromotedRange OtherPromotedTypeRange(OtherTypeRange, Value.getBitWidth(), 12608 Value.isUnsigned()); 12609 auto TypeCmp = OtherPromotedTypeRange.compare(Value); 12610 if (auto TypeResult = PromotedRange::constantValue(E->getOpcode(), TypeCmp, 12611 RhsConstant)) { 12612 TautologicalTypeCompare = true; 12613 Cmp = TypeCmp; 12614 Result = TypeResult; 12615 } 12616 } 12617 12618 // Don't warn if the non-constant operand actually always evaluates to the 12619 // same value. 12620 if (!TautologicalTypeCompare && OtherValueRange.Width == 0) 12621 return false; 12622 12623 // Suppress the diagnostic for an in-range comparison if the constant comes 12624 // from a macro or enumerator. We don't want to diagnose 12625 // 12626 // some_long_value <= INT_MAX 12627 // 12628 // when sizeof(int) == sizeof(long). 12629 bool InRange = Cmp & PromotedRange::InRangeFlag; 12630 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 12631 return false; 12632 12633 // A comparison of an unsigned bit-field against 0 is really a type problem, 12634 // even though at the type level the bit-field might promote to 'signed int'. 12635 if (Other->refersToBitField() && InRange && Value == 0 && 12636 Other->getType()->isUnsignedIntegerOrEnumerationType()) 12637 TautologicalTypeCompare = true; 12638 12639 // If this is a comparison to an enum constant, include that 12640 // constant in the diagnostic. 12641 const EnumConstantDecl *ED = nullptr; 12642 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 12643 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 12644 12645 // Should be enough for uint128 (39 decimal digits) 12646 SmallString<64> PrettySourceValue; 12647 llvm::raw_svector_ostream OS(PrettySourceValue); 12648 if (ED) { 12649 OS << '\'' << *ED << "' (" << Value << ")"; 12650 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>( 12651 Constant->IgnoreParenImpCasts())) { 12652 OS << (BL->getValue() ? "YES" : "NO"); 12653 } else { 12654 OS << Value; 12655 } 12656 12657 if (!TautologicalTypeCompare) { 12658 S.Diag(E->getOperatorLoc(), diag::warn_tautological_compare_value_range) 12659 << RhsConstant << OtherValueRange.Width << OtherValueRange.NonNegative 12660 << E->getOpcodeStr() << OS.str() << *Result 12661 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 12662 return true; 12663 } 12664 12665 if (IsObjCSignedCharBool) { 12666 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 12667 S.PDiag(diag::warn_tautological_compare_objc_bool) 12668 << OS.str() << *Result); 12669 return true; 12670 } 12671 12672 // FIXME: We use a somewhat different formatting for the in-range cases and 12673 // cases involving boolean values for historical reasons. We should pick a 12674 // consistent way of presenting these diagnostics. 12675 if (!InRange || Other->isKnownToHaveBooleanValue()) { 12676 12677 S.DiagRuntimeBehavior( 12678 E->getOperatorLoc(), E, 12679 S.PDiag(!InRange ? diag::warn_out_of_range_compare 12680 : diag::warn_tautological_bool_compare) 12681 << OS.str() << classifyConstantValue(Constant) << OtherT 12682 << OtherIsBooleanDespiteType << *Result 12683 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 12684 } else { 12685 bool IsCharTy = OtherT.withoutLocalFastQualifiers() == S.Context.CharTy; 12686 unsigned Diag = 12687 (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 12688 ? (HasEnumType(OriginalOther) 12689 ? diag::warn_unsigned_enum_always_true_comparison 12690 : IsCharTy ? diag::warn_unsigned_char_always_true_comparison 12691 : diag::warn_unsigned_always_true_comparison) 12692 : diag::warn_tautological_constant_compare; 12693 12694 S.Diag(E->getOperatorLoc(), Diag) 12695 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 12696 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 12697 } 12698 12699 return true; 12700 } 12701 12702 /// Analyze the operands of the given comparison. Implements the 12703 /// fallback case from AnalyzeComparison. 12704 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 12705 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12706 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12707 } 12708 12709 /// Implements -Wsign-compare. 12710 /// 12711 /// \param E the binary operator to check for warnings 12712 static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 12713 // The type the comparison is being performed in. 12714 QualType T = E->getLHS()->getType(); 12715 12716 // Only analyze comparison operators where both sides have been converted to 12717 // the same type. 12718 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 12719 return AnalyzeImpConvsInComparison(S, E); 12720 12721 // Don't analyze value-dependent comparisons directly. 12722 if (E->isValueDependent()) 12723 return AnalyzeImpConvsInComparison(S, E); 12724 12725 Expr *LHS = E->getLHS(); 12726 Expr *RHS = E->getRHS(); 12727 12728 if (T->isIntegralType(S.Context)) { 12729 Optional<llvm::APSInt> RHSValue = RHS->getIntegerConstantExpr(S.Context); 12730 Optional<llvm::APSInt> LHSValue = LHS->getIntegerConstantExpr(S.Context); 12731 12732 // We don't care about expressions whose result is a constant. 12733 if (RHSValue && LHSValue) 12734 return AnalyzeImpConvsInComparison(S, E); 12735 12736 // We only care about expressions where just one side is literal 12737 if ((bool)RHSValue ^ (bool)LHSValue) { 12738 // Is the constant on the RHS or LHS? 12739 const bool RhsConstant = (bool)RHSValue; 12740 Expr *Const = RhsConstant ? RHS : LHS; 12741 Expr *Other = RhsConstant ? LHS : RHS; 12742 const llvm::APSInt &Value = RhsConstant ? *RHSValue : *LHSValue; 12743 12744 // Check whether an integer constant comparison results in a value 12745 // of 'true' or 'false'. 12746 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 12747 return AnalyzeImpConvsInComparison(S, E); 12748 } 12749 } 12750 12751 if (!T->hasUnsignedIntegerRepresentation()) { 12752 // We don't do anything special if this isn't an unsigned integral 12753 // comparison: we're only interested in integral comparisons, and 12754 // signed comparisons only happen in cases we don't care to warn about. 12755 return AnalyzeImpConvsInComparison(S, E); 12756 } 12757 12758 LHS = LHS->IgnoreParenImpCasts(); 12759 RHS = RHS->IgnoreParenImpCasts(); 12760 12761 if (!S.getLangOpts().CPlusPlus) { 12762 // Avoid warning about comparison of integers with different signs when 12763 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 12764 // the type of `E`. 12765 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 12766 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 12767 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 12768 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 12769 } 12770 12771 // Check to see if one of the (unmodified) operands is of different 12772 // signedness. 12773 Expr *signedOperand, *unsignedOperand; 12774 if (LHS->getType()->hasSignedIntegerRepresentation()) { 12775 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 12776 "unsigned comparison between two signed integer expressions?"); 12777 signedOperand = LHS; 12778 unsignedOperand = RHS; 12779 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 12780 signedOperand = RHS; 12781 unsignedOperand = LHS; 12782 } else { 12783 return AnalyzeImpConvsInComparison(S, E); 12784 } 12785 12786 // Otherwise, calculate the effective range of the signed operand. 12787 IntRange signedRange = GetExprRange( 12788 S.Context, signedOperand, S.isConstantEvaluated(), /*Approximate*/ true); 12789 12790 // Go ahead and analyze implicit conversions in the operands. Note 12791 // that we skip the implicit conversions on both sides. 12792 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 12793 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 12794 12795 // If the signed range is non-negative, -Wsign-compare won't fire. 12796 if (signedRange.NonNegative) 12797 return; 12798 12799 // For (in)equality comparisons, if the unsigned operand is a 12800 // constant which cannot collide with a overflowed signed operand, 12801 // then reinterpreting the signed operand as unsigned will not 12802 // change the result of the comparison. 12803 if (E->isEqualityOp()) { 12804 unsigned comparisonWidth = S.Context.getIntWidth(T); 12805 IntRange unsignedRange = 12806 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated(), 12807 /*Approximate*/ true); 12808 12809 // We should never be unable to prove that the unsigned operand is 12810 // non-negative. 12811 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 12812 12813 if (unsignedRange.Width < comparisonWidth) 12814 return; 12815 } 12816 12817 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 12818 S.PDiag(diag::warn_mixed_sign_comparison) 12819 << LHS->getType() << RHS->getType() 12820 << LHS->getSourceRange() << RHS->getSourceRange()); 12821 } 12822 12823 /// Analyzes an attempt to assign the given value to a bitfield. 12824 /// 12825 /// Returns true if there was something fishy about the attempt. 12826 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 12827 SourceLocation InitLoc) { 12828 assert(Bitfield->isBitField()); 12829 if (Bitfield->isInvalidDecl()) 12830 return false; 12831 12832 // White-list bool bitfields. 12833 QualType BitfieldType = Bitfield->getType(); 12834 if (BitfieldType->isBooleanType()) 12835 return false; 12836 12837 if (BitfieldType->isEnumeralType()) { 12838 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl(); 12839 // If the underlying enum type was not explicitly specified as an unsigned 12840 // type and the enum contain only positive values, MSVC++ will cause an 12841 // inconsistency by storing this as a signed type. 12842 if (S.getLangOpts().CPlusPlus11 && 12843 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 12844 BitfieldEnumDecl->getNumPositiveBits() > 0 && 12845 BitfieldEnumDecl->getNumNegativeBits() == 0) { 12846 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 12847 << BitfieldEnumDecl; 12848 } 12849 } 12850 12851 if (Bitfield->getType()->isBooleanType()) 12852 return false; 12853 12854 // Ignore value- or type-dependent expressions. 12855 if (Bitfield->getBitWidth()->isValueDependent() || 12856 Bitfield->getBitWidth()->isTypeDependent() || 12857 Init->isValueDependent() || 12858 Init->isTypeDependent()) 12859 return false; 12860 12861 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 12862 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 12863 12864 Expr::EvalResult Result; 12865 if (!OriginalInit->EvaluateAsInt(Result, S.Context, 12866 Expr::SE_AllowSideEffects)) { 12867 // The RHS is not constant. If the RHS has an enum type, make sure the 12868 // bitfield is wide enough to hold all the values of the enum without 12869 // truncation. 12870 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 12871 EnumDecl *ED = EnumTy->getDecl(); 12872 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 12873 12874 // Enum types are implicitly signed on Windows, so check if there are any 12875 // negative enumerators to see if the enum was intended to be signed or 12876 // not. 12877 bool SignedEnum = ED->getNumNegativeBits() > 0; 12878 12879 // Check for surprising sign changes when assigning enum values to a 12880 // bitfield of different signedness. If the bitfield is signed and we 12881 // have exactly the right number of bits to store this unsigned enum, 12882 // suggest changing the enum to an unsigned type. This typically happens 12883 // on Windows where unfixed enums always use an underlying type of 'int'. 12884 unsigned DiagID = 0; 12885 if (SignedEnum && !SignedBitfield) { 12886 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 12887 } else if (SignedBitfield && !SignedEnum && 12888 ED->getNumPositiveBits() == FieldWidth) { 12889 DiagID = diag::warn_signed_bitfield_enum_conversion; 12890 } 12891 12892 if (DiagID) { 12893 S.Diag(InitLoc, DiagID) << Bitfield << ED; 12894 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 12895 SourceRange TypeRange = 12896 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 12897 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 12898 << SignedEnum << TypeRange; 12899 } 12900 12901 // Compute the required bitwidth. If the enum has negative values, we need 12902 // one more bit than the normal number of positive bits to represent the 12903 // sign bit. 12904 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 12905 ED->getNumNegativeBits()) 12906 : ED->getNumPositiveBits(); 12907 12908 // Check the bitwidth. 12909 if (BitsNeeded > FieldWidth) { 12910 Expr *WidthExpr = Bitfield->getBitWidth(); 12911 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 12912 << Bitfield << ED; 12913 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 12914 << BitsNeeded << ED << WidthExpr->getSourceRange(); 12915 } 12916 } 12917 12918 return false; 12919 } 12920 12921 llvm::APSInt Value = Result.Val.getInt(); 12922 12923 unsigned OriginalWidth = Value.getBitWidth(); 12924 12925 if (!Value.isSigned() || Value.isNegative()) 12926 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 12927 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 12928 OriginalWidth = Value.getMinSignedBits(); 12929 12930 if (OriginalWidth <= FieldWidth) 12931 return false; 12932 12933 // Compute the value which the bitfield will contain. 12934 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 12935 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 12936 12937 // Check whether the stored value is equal to the original value. 12938 TruncatedValue = TruncatedValue.extend(OriginalWidth); 12939 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 12940 return false; 12941 12942 // Special-case bitfields of width 1: booleans are naturally 0/1, and 12943 // therefore don't strictly fit into a signed bitfield of width 1. 12944 if (FieldWidth == 1 && Value == 1) 12945 return false; 12946 12947 std::string PrettyValue = toString(Value, 10); 12948 std::string PrettyTrunc = toString(TruncatedValue, 10); 12949 12950 S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant) 12951 << PrettyValue << PrettyTrunc << OriginalInit->getType() 12952 << Init->getSourceRange(); 12953 12954 return true; 12955 } 12956 12957 /// Analyze the given simple or compound assignment for warning-worthy 12958 /// operations. 12959 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 12960 // Just recurse on the LHS. 12961 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12962 12963 // We want to recurse on the RHS as normal unless we're assigning to 12964 // a bitfield. 12965 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 12966 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 12967 E->getOperatorLoc())) { 12968 // Recurse, ignoring any implicit conversions on the RHS. 12969 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 12970 E->getOperatorLoc()); 12971 } 12972 } 12973 12974 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12975 12976 // Diagnose implicitly sequentially-consistent atomic assignment. 12977 if (E->getLHS()->getType()->isAtomicType()) 12978 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 12979 } 12980 12981 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 12982 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 12983 SourceLocation CContext, unsigned diag, 12984 bool pruneControlFlow = false) { 12985 if (pruneControlFlow) { 12986 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12987 S.PDiag(diag) 12988 << SourceType << T << E->getSourceRange() 12989 << SourceRange(CContext)); 12990 return; 12991 } 12992 S.Diag(E->getExprLoc(), diag) 12993 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 12994 } 12995 12996 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 12997 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 12998 SourceLocation CContext, 12999 unsigned diag, bool pruneControlFlow = false) { 13000 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 13001 } 13002 13003 static bool isObjCSignedCharBool(Sema &S, QualType Ty) { 13004 return Ty->isSpecificBuiltinType(BuiltinType::SChar) && 13005 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty); 13006 } 13007 13008 static void adornObjCBoolConversionDiagWithTernaryFixit( 13009 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) { 13010 Expr *Ignored = SourceExpr->IgnoreImplicit(); 13011 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored)) 13012 Ignored = OVE->getSourceExpr(); 13013 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) || 13014 isa<BinaryOperator>(Ignored) || 13015 isa<CXXOperatorCallExpr>(Ignored); 13016 SourceLocation EndLoc = S.getLocForEndOfToken(SourceExpr->getEndLoc()); 13017 if (NeedsParens) 13018 Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(") 13019 << FixItHint::CreateInsertion(EndLoc, ")"); 13020 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO"); 13021 } 13022 13023 /// Diagnose an implicit cast from a floating point value to an integer value. 13024 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 13025 SourceLocation CContext) { 13026 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 13027 const bool PruneWarnings = S.inTemplateInstantiation(); 13028 13029 Expr *InnerE = E->IgnoreParenImpCasts(); 13030 // We also want to warn on, e.g., "int i = -1.234" 13031 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 13032 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 13033 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 13034 13035 const bool IsLiteral = 13036 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 13037 13038 llvm::APFloat Value(0.0); 13039 bool IsConstant = 13040 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 13041 if (!IsConstant) { 13042 if (isObjCSignedCharBool(S, T)) { 13043 return adornObjCBoolConversionDiagWithTernaryFixit( 13044 S, E, 13045 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool) 13046 << E->getType()); 13047 } 13048 13049 return DiagnoseImpCast(S, E, T, CContext, 13050 diag::warn_impcast_float_integer, PruneWarnings); 13051 } 13052 13053 bool isExact = false; 13054 13055 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 13056 T->hasUnsignedIntegerRepresentation()); 13057 llvm::APFloat::opStatus Result = Value.convertToInteger( 13058 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 13059 13060 // FIXME: Force the precision of the source value down so we don't print 13061 // digits which are usually useless (we don't really care here if we 13062 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 13063 // would automatically print the shortest representation, but it's a bit 13064 // tricky to implement. 13065 SmallString<16> PrettySourceValue; 13066 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 13067 precision = (precision * 59 + 195) / 196; 13068 Value.toString(PrettySourceValue, precision); 13069 13070 if (isObjCSignedCharBool(S, T) && IntegerValue != 0 && IntegerValue != 1) { 13071 return adornObjCBoolConversionDiagWithTernaryFixit( 13072 S, E, 13073 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool) 13074 << PrettySourceValue); 13075 } 13076 13077 if (Result == llvm::APFloat::opOK && isExact) { 13078 if (IsLiteral) return; 13079 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 13080 PruneWarnings); 13081 } 13082 13083 // Conversion of a floating-point value to a non-bool integer where the 13084 // integral part cannot be represented by the integer type is undefined. 13085 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 13086 return DiagnoseImpCast( 13087 S, E, T, CContext, 13088 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 13089 : diag::warn_impcast_float_to_integer_out_of_range, 13090 PruneWarnings); 13091 13092 unsigned DiagID = 0; 13093 if (IsLiteral) { 13094 // Warn on floating point literal to integer. 13095 DiagID = diag::warn_impcast_literal_float_to_integer; 13096 } else if (IntegerValue == 0) { 13097 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 13098 return DiagnoseImpCast(S, E, T, CContext, 13099 diag::warn_impcast_float_integer, PruneWarnings); 13100 } 13101 // Warn on non-zero to zero conversion. 13102 DiagID = diag::warn_impcast_float_to_integer_zero; 13103 } else { 13104 if (IntegerValue.isUnsigned()) { 13105 if (!IntegerValue.isMaxValue()) { 13106 return DiagnoseImpCast(S, E, T, CContext, 13107 diag::warn_impcast_float_integer, PruneWarnings); 13108 } 13109 } else { // IntegerValue.isSigned() 13110 if (!IntegerValue.isMaxSignedValue() && 13111 !IntegerValue.isMinSignedValue()) { 13112 return DiagnoseImpCast(S, E, T, CContext, 13113 diag::warn_impcast_float_integer, PruneWarnings); 13114 } 13115 } 13116 // Warn on evaluatable floating point expression to integer conversion. 13117 DiagID = diag::warn_impcast_float_to_integer; 13118 } 13119 13120 SmallString<16> PrettyTargetValue; 13121 if (IsBool) 13122 PrettyTargetValue = Value.isZero() ? "false" : "true"; 13123 else 13124 IntegerValue.toString(PrettyTargetValue); 13125 13126 if (PruneWarnings) { 13127 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13128 S.PDiag(DiagID) 13129 << E->getType() << T.getUnqualifiedType() 13130 << PrettySourceValue << PrettyTargetValue 13131 << E->getSourceRange() << SourceRange(CContext)); 13132 } else { 13133 S.Diag(E->getExprLoc(), DiagID) 13134 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 13135 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 13136 } 13137 } 13138 13139 /// Analyze the given compound assignment for the possible losing of 13140 /// floating-point precision. 13141 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 13142 assert(isa<CompoundAssignOperator>(E) && 13143 "Must be compound assignment operation"); 13144 // Recurse on the LHS and RHS in here 13145 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 13146 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 13147 13148 if (E->getLHS()->getType()->isAtomicType()) 13149 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 13150 13151 // Now check the outermost expression 13152 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 13153 const auto *RBT = cast<CompoundAssignOperator>(E) 13154 ->getComputationResultType() 13155 ->getAs<BuiltinType>(); 13156 13157 // The below checks assume source is floating point. 13158 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 13159 13160 // If source is floating point but target is an integer. 13161 if (ResultBT->isInteger()) 13162 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(), 13163 E->getExprLoc(), diag::warn_impcast_float_integer); 13164 13165 if (!ResultBT->isFloatingPoint()) 13166 return; 13167 13168 // If both source and target are floating points, warn about losing precision. 13169 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 13170 QualType(ResultBT, 0), QualType(RBT, 0)); 13171 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 13172 // warn about dropping FP rank. 13173 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 13174 diag::warn_impcast_float_result_precision); 13175 } 13176 13177 static std::string PrettyPrintInRange(const llvm::APSInt &Value, 13178 IntRange Range) { 13179 if (!Range.Width) return "0"; 13180 13181 llvm::APSInt ValueInRange = Value; 13182 ValueInRange.setIsSigned(!Range.NonNegative); 13183 ValueInRange = ValueInRange.trunc(Range.Width); 13184 return toString(ValueInRange, 10); 13185 } 13186 13187 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 13188 if (!isa<ImplicitCastExpr>(Ex)) 13189 return false; 13190 13191 Expr *InnerE = Ex->IgnoreParenImpCasts(); 13192 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 13193 const Type *Source = 13194 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 13195 if (Target->isDependentType()) 13196 return false; 13197 13198 const BuiltinType *FloatCandidateBT = 13199 dyn_cast<BuiltinType>(ToBool ? Source : Target); 13200 const Type *BoolCandidateType = ToBool ? Target : Source; 13201 13202 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 13203 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 13204 } 13205 13206 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 13207 SourceLocation CC) { 13208 unsigned NumArgs = TheCall->getNumArgs(); 13209 for (unsigned i = 0; i < NumArgs; ++i) { 13210 Expr *CurrA = TheCall->getArg(i); 13211 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 13212 continue; 13213 13214 bool IsSwapped = ((i > 0) && 13215 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 13216 IsSwapped |= ((i < (NumArgs - 1)) && 13217 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 13218 if (IsSwapped) { 13219 // Warn on this floating-point to bool conversion. 13220 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 13221 CurrA->getType(), CC, 13222 diag::warn_impcast_floating_point_to_bool); 13223 } 13224 } 13225 } 13226 13227 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 13228 SourceLocation CC) { 13229 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 13230 E->getExprLoc())) 13231 return; 13232 13233 // Don't warn on functions which have return type nullptr_t. 13234 if (isa<CallExpr>(E)) 13235 return; 13236 13237 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 13238 const Expr::NullPointerConstantKind NullKind = 13239 E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull); 13240 if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr) 13241 return; 13242 13243 // Return if target type is a safe conversion. 13244 if (T->isAnyPointerType() || T->isBlockPointerType() || 13245 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 13246 return; 13247 13248 SourceLocation Loc = E->getSourceRange().getBegin(); 13249 13250 // Venture through the macro stacks to get to the source of macro arguments. 13251 // The new location is a better location than the complete location that was 13252 // passed in. 13253 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 13254 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 13255 13256 // __null is usually wrapped in a macro. Go up a macro if that is the case. 13257 if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) { 13258 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 13259 Loc, S.SourceMgr, S.getLangOpts()); 13260 if (MacroName == "NULL") 13261 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 13262 } 13263 13264 // Only warn if the null and context location are in the same macro expansion. 13265 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 13266 return; 13267 13268 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 13269 << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC) 13270 << FixItHint::CreateReplacement(Loc, 13271 S.getFixItZeroLiteralForType(T, Loc)); 13272 } 13273 13274 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 13275 ObjCArrayLiteral *ArrayLiteral); 13276 13277 static void 13278 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 13279 ObjCDictionaryLiteral *DictionaryLiteral); 13280 13281 /// Check a single element within a collection literal against the 13282 /// target element type. 13283 static void checkObjCCollectionLiteralElement(Sema &S, 13284 QualType TargetElementType, 13285 Expr *Element, 13286 unsigned ElementKind) { 13287 // Skip a bitcast to 'id' or qualified 'id'. 13288 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 13289 if (ICE->getCastKind() == CK_BitCast && 13290 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 13291 Element = ICE->getSubExpr(); 13292 } 13293 13294 QualType ElementType = Element->getType(); 13295 ExprResult ElementResult(Element); 13296 if (ElementType->getAs<ObjCObjectPointerType>() && 13297 S.CheckSingleAssignmentConstraints(TargetElementType, 13298 ElementResult, 13299 false, false) 13300 != Sema::Compatible) { 13301 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 13302 << ElementType << ElementKind << TargetElementType 13303 << Element->getSourceRange(); 13304 } 13305 13306 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 13307 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 13308 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 13309 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 13310 } 13311 13312 /// Check an Objective-C array literal being converted to the given 13313 /// target type. 13314 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 13315 ObjCArrayLiteral *ArrayLiteral) { 13316 if (!S.NSArrayDecl) 13317 return; 13318 13319 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 13320 if (!TargetObjCPtr) 13321 return; 13322 13323 if (TargetObjCPtr->isUnspecialized() || 13324 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 13325 != S.NSArrayDecl->getCanonicalDecl()) 13326 return; 13327 13328 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 13329 if (TypeArgs.size() != 1) 13330 return; 13331 13332 QualType TargetElementType = TypeArgs[0]; 13333 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 13334 checkObjCCollectionLiteralElement(S, TargetElementType, 13335 ArrayLiteral->getElement(I), 13336 0); 13337 } 13338 } 13339 13340 /// Check an Objective-C dictionary literal being converted to the given 13341 /// target type. 13342 static void 13343 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 13344 ObjCDictionaryLiteral *DictionaryLiteral) { 13345 if (!S.NSDictionaryDecl) 13346 return; 13347 13348 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 13349 if (!TargetObjCPtr) 13350 return; 13351 13352 if (TargetObjCPtr->isUnspecialized() || 13353 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 13354 != S.NSDictionaryDecl->getCanonicalDecl()) 13355 return; 13356 13357 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 13358 if (TypeArgs.size() != 2) 13359 return; 13360 13361 QualType TargetKeyType = TypeArgs[0]; 13362 QualType TargetObjectType = TypeArgs[1]; 13363 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 13364 auto Element = DictionaryLiteral->getKeyValueElement(I); 13365 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 13366 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 13367 } 13368 } 13369 13370 // Helper function to filter out cases for constant width constant conversion. 13371 // Don't warn on char array initialization or for non-decimal values. 13372 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 13373 SourceLocation CC) { 13374 // If initializing from a constant, and the constant starts with '0', 13375 // then it is a binary, octal, or hexadecimal. Allow these constants 13376 // to fill all the bits, even if there is a sign change. 13377 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 13378 const char FirstLiteralCharacter = 13379 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 13380 if (FirstLiteralCharacter == '0') 13381 return false; 13382 } 13383 13384 // If the CC location points to a '{', and the type is char, then assume 13385 // assume it is an array initialization. 13386 if (CC.isValid() && T->isCharType()) { 13387 const char FirstContextCharacter = 13388 S.getSourceManager().getCharacterData(CC)[0]; 13389 if (FirstContextCharacter == '{') 13390 return false; 13391 } 13392 13393 return true; 13394 } 13395 13396 static const IntegerLiteral *getIntegerLiteral(Expr *E) { 13397 const auto *IL = dyn_cast<IntegerLiteral>(E); 13398 if (!IL) { 13399 if (auto *UO = dyn_cast<UnaryOperator>(E)) { 13400 if (UO->getOpcode() == UO_Minus) 13401 return dyn_cast<IntegerLiteral>(UO->getSubExpr()); 13402 } 13403 } 13404 13405 return IL; 13406 } 13407 13408 static void DiagnoseIntInBoolContext(Sema &S, Expr *E) { 13409 E = E->IgnoreParenImpCasts(); 13410 SourceLocation ExprLoc = E->getExprLoc(); 13411 13412 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 13413 BinaryOperator::Opcode Opc = BO->getOpcode(); 13414 Expr::EvalResult Result; 13415 // Do not diagnose unsigned shifts. 13416 if (Opc == BO_Shl) { 13417 const auto *LHS = getIntegerLiteral(BO->getLHS()); 13418 const auto *RHS = getIntegerLiteral(BO->getRHS()); 13419 if (LHS && LHS->getValue() == 0) 13420 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0; 13421 else if (!E->isValueDependent() && LHS && RHS && 13422 RHS->getValue().isNonNegative() && 13423 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) 13424 S.Diag(ExprLoc, diag::warn_left_shift_always) 13425 << (Result.Val.getInt() != 0); 13426 else if (E->getType()->isSignedIntegerType()) 13427 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E; 13428 } 13429 } 13430 13431 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 13432 const auto *LHS = getIntegerLiteral(CO->getTrueExpr()); 13433 const auto *RHS = getIntegerLiteral(CO->getFalseExpr()); 13434 if (!LHS || !RHS) 13435 return; 13436 if ((LHS->getValue() == 0 || LHS->getValue() == 1) && 13437 (RHS->getValue() == 0 || RHS->getValue() == 1)) 13438 // Do not diagnose common idioms. 13439 return; 13440 if (LHS->getValue() != 0 && RHS->getValue() != 0) 13441 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true); 13442 } 13443 } 13444 13445 static void CheckImplicitConversion(Sema &S, Expr *E, QualType T, 13446 SourceLocation CC, 13447 bool *ICContext = nullptr, 13448 bool IsListInit = false) { 13449 if (E->isTypeDependent() || E->isValueDependent()) return; 13450 13451 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 13452 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 13453 if (Source == Target) return; 13454 if (Target->isDependentType()) return; 13455 13456 // If the conversion context location is invalid don't complain. We also 13457 // don't want to emit a warning if the issue occurs from the expansion of 13458 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 13459 // delay this check as long as possible. Once we detect we are in that 13460 // scenario, we just return. 13461 if (CC.isInvalid()) 13462 return; 13463 13464 if (Source->isAtomicType()) 13465 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 13466 13467 // Diagnose implicit casts to bool. 13468 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 13469 if (isa<StringLiteral>(E)) 13470 // Warn on string literal to bool. Checks for string literals in logical 13471 // and expressions, for instance, assert(0 && "error here"), are 13472 // prevented by a check in AnalyzeImplicitConversions(). 13473 return DiagnoseImpCast(S, E, T, CC, 13474 diag::warn_impcast_string_literal_to_bool); 13475 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 13476 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 13477 // This covers the literal expressions that evaluate to Objective-C 13478 // objects. 13479 return DiagnoseImpCast(S, E, T, CC, 13480 diag::warn_impcast_objective_c_literal_to_bool); 13481 } 13482 if (Source->isPointerType() || Source->canDecayToPointerType()) { 13483 // Warn on pointer to bool conversion that is always true. 13484 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 13485 SourceRange(CC)); 13486 } 13487 } 13488 13489 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL 13490 // is a typedef for signed char (macOS), then that constant value has to be 1 13491 // or 0. 13492 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) { 13493 Expr::EvalResult Result; 13494 if (E->EvaluateAsInt(Result, S.getASTContext(), 13495 Expr::SE_AllowSideEffects)) { 13496 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) { 13497 adornObjCBoolConversionDiagWithTernaryFixit( 13498 S, E, 13499 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool) 13500 << toString(Result.Val.getInt(), 10)); 13501 } 13502 return; 13503 } 13504 } 13505 13506 // Check implicit casts from Objective-C collection literals to specialized 13507 // collection types, e.g., NSArray<NSString *> *. 13508 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 13509 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 13510 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 13511 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 13512 13513 // Strip vector types. 13514 if (isa<VectorType>(Source)) { 13515 if (Target->isVLSTBuiltinType() && 13516 (S.Context.areCompatibleSveTypes(QualType(Target, 0), 13517 QualType(Source, 0)) || 13518 S.Context.areLaxCompatibleSveTypes(QualType(Target, 0), 13519 QualType(Source, 0)))) 13520 return; 13521 13522 if (!isa<VectorType>(Target)) { 13523 if (S.SourceMgr.isInSystemMacro(CC)) 13524 return; 13525 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 13526 } 13527 13528 // If the vector cast is cast between two vectors of the same size, it is 13529 // a bitcast, not a conversion. 13530 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 13531 return; 13532 13533 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 13534 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 13535 } 13536 if (auto VecTy = dyn_cast<VectorType>(Target)) 13537 Target = VecTy->getElementType().getTypePtr(); 13538 13539 // Strip complex types. 13540 if (isa<ComplexType>(Source)) { 13541 if (!isa<ComplexType>(Target)) { 13542 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 13543 return; 13544 13545 return DiagnoseImpCast(S, E, T, CC, 13546 S.getLangOpts().CPlusPlus 13547 ? diag::err_impcast_complex_scalar 13548 : diag::warn_impcast_complex_scalar); 13549 } 13550 13551 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 13552 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 13553 } 13554 13555 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 13556 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 13557 13558 // Strip SVE vector types 13559 if (SourceBT && SourceBT->isVLSTBuiltinType()) { 13560 // Need the original target type for vector type checks 13561 const Type *OriginalTarget = S.Context.getCanonicalType(T).getTypePtr(); 13562 // Handle conversion from scalable to fixed when msve-vector-bits is 13563 // specified 13564 if (S.Context.areCompatibleSveTypes(QualType(OriginalTarget, 0), 13565 QualType(Source, 0)) || 13566 S.Context.areLaxCompatibleSveTypes(QualType(OriginalTarget, 0), 13567 QualType(Source, 0))) 13568 return; 13569 13570 // If the vector cast is cast between two vectors of the same size, it is 13571 // a bitcast, not a conversion. 13572 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 13573 return; 13574 13575 Source = SourceBT->getSveEltType(S.Context).getTypePtr(); 13576 } 13577 13578 if (TargetBT && TargetBT->isVLSTBuiltinType()) 13579 Target = TargetBT->getSveEltType(S.Context).getTypePtr(); 13580 13581 // If the source is floating point... 13582 if (SourceBT && SourceBT->isFloatingPoint()) { 13583 // ...and the target is floating point... 13584 if (TargetBT && TargetBT->isFloatingPoint()) { 13585 // ...then warn if we're dropping FP rank. 13586 13587 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 13588 QualType(SourceBT, 0), QualType(TargetBT, 0)); 13589 if (Order > 0) { 13590 // Don't warn about float constants that are precisely 13591 // representable in the target type. 13592 Expr::EvalResult result; 13593 if (E->EvaluateAsRValue(result, S.Context)) { 13594 // Value might be a float, a float vector, or a float complex. 13595 if (IsSameFloatAfterCast(result.Val, 13596 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 13597 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 13598 return; 13599 } 13600 13601 if (S.SourceMgr.isInSystemMacro(CC)) 13602 return; 13603 13604 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 13605 } 13606 // ... or possibly if we're increasing rank, too 13607 else if (Order < 0) { 13608 if (S.SourceMgr.isInSystemMacro(CC)) 13609 return; 13610 13611 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 13612 } 13613 return; 13614 } 13615 13616 // If the target is integral, always warn. 13617 if (TargetBT && TargetBT->isInteger()) { 13618 if (S.SourceMgr.isInSystemMacro(CC)) 13619 return; 13620 13621 DiagnoseFloatingImpCast(S, E, T, CC); 13622 } 13623 13624 // Detect the case where a call result is converted from floating-point to 13625 // to bool, and the final argument to the call is converted from bool, to 13626 // discover this typo: 13627 // 13628 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 13629 // 13630 // FIXME: This is an incredibly special case; is there some more general 13631 // way to detect this class of misplaced-parentheses bug? 13632 if (Target->isBooleanType() && isa<CallExpr>(E)) { 13633 // Check last argument of function call to see if it is an 13634 // implicit cast from a type matching the type the result 13635 // is being cast to. 13636 CallExpr *CEx = cast<CallExpr>(E); 13637 if (unsigned NumArgs = CEx->getNumArgs()) { 13638 Expr *LastA = CEx->getArg(NumArgs - 1); 13639 Expr *InnerE = LastA->IgnoreParenImpCasts(); 13640 if (isa<ImplicitCastExpr>(LastA) && 13641 InnerE->getType()->isBooleanType()) { 13642 // Warn on this floating-point to bool conversion 13643 DiagnoseImpCast(S, E, T, CC, 13644 diag::warn_impcast_floating_point_to_bool); 13645 } 13646 } 13647 } 13648 return; 13649 } 13650 13651 // Valid casts involving fixed point types should be accounted for here. 13652 if (Source->isFixedPointType()) { 13653 if (Target->isUnsaturatedFixedPointType()) { 13654 Expr::EvalResult Result; 13655 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects, 13656 S.isConstantEvaluated())) { 13657 llvm::APFixedPoint Value = Result.Val.getFixedPoint(); 13658 llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(T); 13659 llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(T); 13660 if (Value > MaxVal || Value < MinVal) { 13661 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13662 S.PDiag(diag::warn_impcast_fixed_point_range) 13663 << Value.toString() << T 13664 << E->getSourceRange() 13665 << clang::SourceRange(CC)); 13666 return; 13667 } 13668 } 13669 } else if (Target->isIntegerType()) { 13670 Expr::EvalResult Result; 13671 if (!S.isConstantEvaluated() && 13672 E->EvaluateAsFixedPoint(Result, S.Context, 13673 Expr::SE_AllowSideEffects)) { 13674 llvm::APFixedPoint FXResult = Result.Val.getFixedPoint(); 13675 13676 bool Overflowed; 13677 llvm::APSInt IntResult = FXResult.convertToInt( 13678 S.Context.getIntWidth(T), 13679 Target->isSignedIntegerOrEnumerationType(), &Overflowed); 13680 13681 if (Overflowed) { 13682 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13683 S.PDiag(diag::warn_impcast_fixed_point_range) 13684 << FXResult.toString() << T 13685 << E->getSourceRange() 13686 << clang::SourceRange(CC)); 13687 return; 13688 } 13689 } 13690 } 13691 } else if (Target->isUnsaturatedFixedPointType()) { 13692 if (Source->isIntegerType()) { 13693 Expr::EvalResult Result; 13694 if (!S.isConstantEvaluated() && 13695 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 13696 llvm::APSInt Value = Result.Val.getInt(); 13697 13698 bool Overflowed; 13699 llvm::APFixedPoint IntResult = llvm::APFixedPoint::getFromIntValue( 13700 Value, S.Context.getFixedPointSemantics(T), &Overflowed); 13701 13702 if (Overflowed) { 13703 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13704 S.PDiag(diag::warn_impcast_fixed_point_range) 13705 << toString(Value, /*Radix=*/10) << T 13706 << E->getSourceRange() 13707 << clang::SourceRange(CC)); 13708 return; 13709 } 13710 } 13711 } 13712 } 13713 13714 // If we are casting an integer type to a floating point type without 13715 // initialization-list syntax, we might lose accuracy if the floating 13716 // point type has a narrower significand than the integer type. 13717 if (SourceBT && TargetBT && SourceBT->isIntegerType() && 13718 TargetBT->isFloatingType() && !IsListInit) { 13719 // Determine the number of precision bits in the source integer type. 13720 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated(), 13721 /*Approximate*/ true); 13722 unsigned int SourcePrecision = SourceRange.Width; 13723 13724 // Determine the number of precision bits in the 13725 // target floating point type. 13726 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision( 13727 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 13728 13729 if (SourcePrecision > 0 && TargetPrecision > 0 && 13730 SourcePrecision > TargetPrecision) { 13731 13732 if (Optional<llvm::APSInt> SourceInt = 13733 E->getIntegerConstantExpr(S.Context)) { 13734 // If the source integer is a constant, convert it to the target 13735 // floating point type. Issue a warning if the value changes 13736 // during the whole conversion. 13737 llvm::APFloat TargetFloatValue( 13738 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 13739 llvm::APFloat::opStatus ConversionStatus = 13740 TargetFloatValue.convertFromAPInt( 13741 *SourceInt, SourceBT->isSignedInteger(), 13742 llvm::APFloat::rmNearestTiesToEven); 13743 13744 if (ConversionStatus != llvm::APFloat::opOK) { 13745 SmallString<32> PrettySourceValue; 13746 SourceInt->toString(PrettySourceValue, 10); 13747 SmallString<32> PrettyTargetValue; 13748 TargetFloatValue.toString(PrettyTargetValue, TargetPrecision); 13749 13750 S.DiagRuntimeBehavior( 13751 E->getExprLoc(), E, 13752 S.PDiag(diag::warn_impcast_integer_float_precision_constant) 13753 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13754 << E->getSourceRange() << clang::SourceRange(CC)); 13755 } 13756 } else { 13757 // Otherwise, the implicit conversion may lose precision. 13758 DiagnoseImpCast(S, E, T, CC, 13759 diag::warn_impcast_integer_float_precision); 13760 } 13761 } 13762 } 13763 13764 DiagnoseNullConversion(S, E, T, CC); 13765 13766 S.DiscardMisalignedMemberAddress(Target, E); 13767 13768 if (Target->isBooleanType()) 13769 DiagnoseIntInBoolContext(S, E); 13770 13771 if (!Source->isIntegerType() || !Target->isIntegerType()) 13772 return; 13773 13774 // TODO: remove this early return once the false positives for constant->bool 13775 // in templates, macros, etc, are reduced or removed. 13776 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 13777 return; 13778 13779 if (isObjCSignedCharBool(S, T) && !Source->isCharType() && 13780 !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) { 13781 return adornObjCBoolConversionDiagWithTernaryFixit( 13782 S, E, 13783 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool) 13784 << E->getType()); 13785 } 13786 13787 IntRange SourceTypeRange = 13788 IntRange::forTargetOfCanonicalType(S.Context, Source); 13789 IntRange LikelySourceRange = 13790 GetExprRange(S.Context, E, S.isConstantEvaluated(), /*Approximate*/ true); 13791 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 13792 13793 if (LikelySourceRange.Width > TargetRange.Width) { 13794 // If the source is a constant, use a default-on diagnostic. 13795 // TODO: this should happen for bitfield stores, too. 13796 Expr::EvalResult Result; 13797 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects, 13798 S.isConstantEvaluated())) { 13799 llvm::APSInt Value(32); 13800 Value = Result.Val.getInt(); 13801 13802 if (S.SourceMgr.isInSystemMacro(CC)) 13803 return; 13804 13805 std::string PrettySourceValue = toString(Value, 10); 13806 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 13807 13808 S.DiagRuntimeBehavior( 13809 E->getExprLoc(), E, 13810 S.PDiag(diag::warn_impcast_integer_precision_constant) 13811 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13812 << E->getSourceRange() << SourceRange(CC)); 13813 return; 13814 } 13815 13816 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 13817 if (S.SourceMgr.isInSystemMacro(CC)) 13818 return; 13819 13820 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 13821 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 13822 /* pruneControlFlow */ true); 13823 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 13824 } 13825 13826 if (TargetRange.Width > SourceTypeRange.Width) { 13827 if (auto *UO = dyn_cast<UnaryOperator>(E)) 13828 if (UO->getOpcode() == UO_Minus) 13829 if (Source->isUnsignedIntegerType()) { 13830 if (Target->isUnsignedIntegerType()) 13831 return DiagnoseImpCast(S, E, T, CC, 13832 diag::warn_impcast_high_order_zero_bits); 13833 if (Target->isSignedIntegerType()) 13834 return DiagnoseImpCast(S, E, T, CC, 13835 diag::warn_impcast_nonnegative_result); 13836 } 13837 } 13838 13839 if (TargetRange.Width == LikelySourceRange.Width && 13840 !TargetRange.NonNegative && LikelySourceRange.NonNegative && 13841 Source->isSignedIntegerType()) { 13842 // Warn when doing a signed to signed conversion, warn if the positive 13843 // source value is exactly the width of the target type, which will 13844 // cause a negative value to be stored. 13845 13846 Expr::EvalResult Result; 13847 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) && 13848 !S.SourceMgr.isInSystemMacro(CC)) { 13849 llvm::APSInt Value = Result.Val.getInt(); 13850 if (isSameWidthConstantConversion(S, E, T, CC)) { 13851 std::string PrettySourceValue = toString(Value, 10); 13852 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 13853 13854 S.DiagRuntimeBehavior( 13855 E->getExprLoc(), E, 13856 S.PDiag(diag::warn_impcast_integer_precision_constant) 13857 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13858 << E->getSourceRange() << SourceRange(CC)); 13859 return; 13860 } 13861 } 13862 13863 // Fall through for non-constants to give a sign conversion warning. 13864 } 13865 13866 if ((!isa<EnumType>(Target) || !isa<EnumType>(Source)) && 13867 ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) || 13868 (!TargetRange.NonNegative && LikelySourceRange.NonNegative && 13869 LikelySourceRange.Width == TargetRange.Width))) { 13870 if (S.SourceMgr.isInSystemMacro(CC)) 13871 return; 13872 13873 unsigned DiagID = diag::warn_impcast_integer_sign; 13874 13875 // Traditionally, gcc has warned about this under -Wsign-compare. 13876 // We also want to warn about it in -Wconversion. 13877 // So if -Wconversion is off, use a completely identical diagnostic 13878 // in the sign-compare group. 13879 // The conditional-checking code will 13880 if (ICContext) { 13881 DiagID = diag::warn_impcast_integer_sign_conditional; 13882 *ICContext = true; 13883 } 13884 13885 return DiagnoseImpCast(S, E, T, CC, DiagID); 13886 } 13887 13888 // Diagnose conversions between different enumeration types. 13889 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 13890 // type, to give us better diagnostics. 13891 QualType SourceType = E->getType(); 13892 if (!S.getLangOpts().CPlusPlus) { 13893 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 13894 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 13895 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 13896 SourceType = S.Context.getTypeDeclType(Enum); 13897 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 13898 } 13899 } 13900 13901 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 13902 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 13903 if (SourceEnum->getDecl()->hasNameForLinkage() && 13904 TargetEnum->getDecl()->hasNameForLinkage() && 13905 SourceEnum != TargetEnum) { 13906 if (S.SourceMgr.isInSystemMacro(CC)) 13907 return; 13908 13909 return DiagnoseImpCast(S, E, SourceType, T, CC, 13910 diag::warn_impcast_different_enum_types); 13911 } 13912 } 13913 13914 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 13915 SourceLocation CC, QualType T); 13916 13917 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 13918 SourceLocation CC, bool &ICContext) { 13919 E = E->IgnoreParenImpCasts(); 13920 13921 if (auto *CO = dyn_cast<AbstractConditionalOperator>(E)) 13922 return CheckConditionalOperator(S, CO, CC, T); 13923 13924 AnalyzeImplicitConversions(S, E, CC); 13925 if (E->getType() != T) 13926 return CheckImplicitConversion(S, E, T, CC, &ICContext); 13927 } 13928 13929 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 13930 SourceLocation CC, QualType T) { 13931 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 13932 13933 Expr *TrueExpr = E->getTrueExpr(); 13934 if (auto *BCO = dyn_cast<BinaryConditionalOperator>(E)) 13935 TrueExpr = BCO->getCommon(); 13936 13937 bool Suspicious = false; 13938 CheckConditionalOperand(S, TrueExpr, T, CC, Suspicious); 13939 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 13940 13941 if (T->isBooleanType()) 13942 DiagnoseIntInBoolContext(S, E); 13943 13944 // If -Wconversion would have warned about either of the candidates 13945 // for a signedness conversion to the context type... 13946 if (!Suspicious) return; 13947 13948 // ...but it's currently ignored... 13949 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 13950 return; 13951 13952 // ...then check whether it would have warned about either of the 13953 // candidates for a signedness conversion to the condition type. 13954 if (E->getType() == T) return; 13955 13956 Suspicious = false; 13957 CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(), 13958 E->getType(), CC, &Suspicious); 13959 if (!Suspicious) 13960 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 13961 E->getType(), CC, &Suspicious); 13962 } 13963 13964 /// Check conversion of given expression to boolean. 13965 /// Input argument E is a logical expression. 13966 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 13967 if (S.getLangOpts().Bool) 13968 return; 13969 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 13970 return; 13971 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 13972 } 13973 13974 namespace { 13975 struct AnalyzeImplicitConversionsWorkItem { 13976 Expr *E; 13977 SourceLocation CC; 13978 bool IsListInit; 13979 }; 13980 } 13981 13982 /// Data recursive variant of AnalyzeImplicitConversions. Subexpressions 13983 /// that should be visited are added to WorkList. 13984 static void AnalyzeImplicitConversions( 13985 Sema &S, AnalyzeImplicitConversionsWorkItem Item, 13986 llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) { 13987 Expr *OrigE = Item.E; 13988 SourceLocation CC = Item.CC; 13989 13990 QualType T = OrigE->getType(); 13991 Expr *E = OrigE->IgnoreParenImpCasts(); 13992 13993 // Propagate whether we are in a C++ list initialization expression. 13994 // If so, we do not issue warnings for implicit int-float conversion 13995 // precision loss, because C++11 narrowing already handles it. 13996 bool IsListInit = Item.IsListInit || 13997 (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus); 13998 13999 if (E->isTypeDependent() || E->isValueDependent()) 14000 return; 14001 14002 Expr *SourceExpr = E; 14003 // Examine, but don't traverse into the source expression of an 14004 // OpaqueValueExpr, since it may have multiple parents and we don't want to 14005 // emit duplicate diagnostics. Its fine to examine the form or attempt to 14006 // evaluate it in the context of checking the specific conversion to T though. 14007 if (auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 14008 if (auto *Src = OVE->getSourceExpr()) 14009 SourceExpr = Src; 14010 14011 if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr)) 14012 if (UO->getOpcode() == UO_Not && 14013 UO->getSubExpr()->isKnownToHaveBooleanValue()) 14014 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool) 14015 << OrigE->getSourceRange() << T->isBooleanType() 14016 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!"); 14017 14018 if (const auto *BO = dyn_cast<BinaryOperator>(SourceExpr)) 14019 if ((BO->getOpcode() == BO_And || BO->getOpcode() == BO_Or) && 14020 BO->getLHS()->isKnownToHaveBooleanValue() && 14021 BO->getRHS()->isKnownToHaveBooleanValue() && 14022 BO->getLHS()->HasSideEffects(S.Context) && 14023 BO->getRHS()->HasSideEffects(S.Context)) { 14024 S.Diag(BO->getBeginLoc(), diag::warn_bitwise_instead_of_logical) 14025 << (BO->getOpcode() == BO_And ? "&" : "|") << OrigE->getSourceRange() 14026 << FixItHint::CreateReplacement( 14027 BO->getOperatorLoc(), 14028 (BO->getOpcode() == BO_And ? "&&" : "||")); 14029 S.Diag(BO->getBeginLoc(), diag::note_cast_operand_to_int); 14030 } 14031 14032 // For conditional operators, we analyze the arguments as if they 14033 // were being fed directly into the output. 14034 if (auto *CO = dyn_cast<AbstractConditionalOperator>(SourceExpr)) { 14035 CheckConditionalOperator(S, CO, CC, T); 14036 return; 14037 } 14038 14039 // Check implicit argument conversions for function calls. 14040 if (CallExpr *Call = dyn_cast<CallExpr>(SourceExpr)) 14041 CheckImplicitArgumentConversions(S, Call, CC); 14042 14043 // Go ahead and check any implicit conversions we might have skipped. 14044 // The non-canonical typecheck is just an optimization; 14045 // CheckImplicitConversion will filter out dead implicit conversions. 14046 if (SourceExpr->getType() != T) 14047 CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit); 14048 14049 // Now continue drilling into this expression. 14050 14051 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 14052 // The bound subexpressions in a PseudoObjectExpr are not reachable 14053 // as transitive children. 14054 // FIXME: Use a more uniform representation for this. 14055 for (auto *SE : POE->semantics()) 14056 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 14057 WorkList.push_back({OVE->getSourceExpr(), CC, IsListInit}); 14058 } 14059 14060 // Skip past explicit casts. 14061 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 14062 E = CE->getSubExpr()->IgnoreParenImpCasts(); 14063 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 14064 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 14065 WorkList.push_back({E, CC, IsListInit}); 14066 return; 14067 } 14068 14069 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 14070 // Do a somewhat different check with comparison operators. 14071 if (BO->isComparisonOp()) 14072 return AnalyzeComparison(S, BO); 14073 14074 // And with simple assignments. 14075 if (BO->getOpcode() == BO_Assign) 14076 return AnalyzeAssignment(S, BO); 14077 // And with compound assignments. 14078 if (BO->isAssignmentOp()) 14079 return AnalyzeCompoundAssignment(S, BO); 14080 } 14081 14082 // These break the otherwise-useful invariant below. Fortunately, 14083 // we don't really need to recurse into them, because any internal 14084 // expressions should have been analyzed already when they were 14085 // built into statements. 14086 if (isa<StmtExpr>(E)) return; 14087 14088 // Don't descend into unevaluated contexts. 14089 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 14090 14091 // Now just recurse over the expression's children. 14092 CC = E->getExprLoc(); 14093 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 14094 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 14095 for (Stmt *SubStmt : E->children()) { 14096 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 14097 if (!ChildExpr) 14098 continue; 14099 14100 if (auto *CSE = dyn_cast<CoroutineSuspendExpr>(E)) 14101 if (ChildExpr == CSE->getOperand()) 14102 // Do not recurse over a CoroutineSuspendExpr's operand. 14103 // The operand is also a subexpression of getCommonExpr(), and 14104 // recursing into it directly would produce duplicate diagnostics. 14105 continue; 14106 14107 if (IsLogicalAndOperator && 14108 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 14109 // Ignore checking string literals that are in logical and operators. 14110 // This is a common pattern for asserts. 14111 continue; 14112 WorkList.push_back({ChildExpr, CC, IsListInit}); 14113 } 14114 14115 if (BO && BO->isLogicalOp()) { 14116 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 14117 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 14118 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 14119 14120 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 14121 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 14122 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 14123 } 14124 14125 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 14126 if (U->getOpcode() == UO_LNot) { 14127 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 14128 } else if (U->getOpcode() != UO_AddrOf) { 14129 if (U->getSubExpr()->getType()->isAtomicType()) 14130 S.Diag(U->getSubExpr()->getBeginLoc(), 14131 diag::warn_atomic_implicit_seq_cst); 14132 } 14133 } 14134 } 14135 14136 /// AnalyzeImplicitConversions - Find and report any interesting 14137 /// implicit conversions in the given expression. There are a couple 14138 /// of competing diagnostics here, -Wconversion and -Wsign-compare. 14139 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC, 14140 bool IsListInit/*= false*/) { 14141 llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList; 14142 WorkList.push_back({OrigE, CC, IsListInit}); 14143 while (!WorkList.empty()) 14144 AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList); 14145 } 14146 14147 /// Diagnose integer type and any valid implicit conversion to it. 14148 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 14149 // Taking into account implicit conversions, 14150 // allow any integer. 14151 if (!E->getType()->isIntegerType()) { 14152 S.Diag(E->getBeginLoc(), 14153 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 14154 return true; 14155 } 14156 // Potentially emit standard warnings for implicit conversions if enabled 14157 // using -Wconversion. 14158 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 14159 return false; 14160 } 14161 14162 // Helper function for Sema::DiagnoseAlwaysNonNullPointer. 14163 // Returns true when emitting a warning about taking the address of a reference. 14164 static bool CheckForReference(Sema &SemaRef, const Expr *E, 14165 const PartialDiagnostic &PD) { 14166 E = E->IgnoreParenImpCasts(); 14167 14168 const FunctionDecl *FD = nullptr; 14169 14170 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 14171 if (!DRE->getDecl()->getType()->isReferenceType()) 14172 return false; 14173 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 14174 if (!M->getMemberDecl()->getType()->isReferenceType()) 14175 return false; 14176 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 14177 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 14178 return false; 14179 FD = Call->getDirectCallee(); 14180 } else { 14181 return false; 14182 } 14183 14184 SemaRef.Diag(E->getExprLoc(), PD); 14185 14186 // If possible, point to location of function. 14187 if (FD) { 14188 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 14189 } 14190 14191 return true; 14192 } 14193 14194 // Returns true if the SourceLocation is expanded from any macro body. 14195 // Returns false if the SourceLocation is invalid, is from not in a macro 14196 // expansion, or is from expanded from a top-level macro argument. 14197 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 14198 if (Loc.isInvalid()) 14199 return false; 14200 14201 while (Loc.isMacroID()) { 14202 if (SM.isMacroBodyExpansion(Loc)) 14203 return true; 14204 Loc = SM.getImmediateMacroCallerLoc(Loc); 14205 } 14206 14207 return false; 14208 } 14209 14210 /// Diagnose pointers that are always non-null. 14211 /// \param E the expression containing the pointer 14212 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 14213 /// compared to a null pointer 14214 /// \param IsEqual True when the comparison is equal to a null pointer 14215 /// \param Range Extra SourceRange to highlight in the diagnostic 14216 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 14217 Expr::NullPointerConstantKind NullKind, 14218 bool IsEqual, SourceRange Range) { 14219 if (!E) 14220 return; 14221 14222 // Don't warn inside macros. 14223 if (E->getExprLoc().isMacroID()) { 14224 const SourceManager &SM = getSourceManager(); 14225 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 14226 IsInAnyMacroBody(SM, Range.getBegin())) 14227 return; 14228 } 14229 E = E->IgnoreImpCasts(); 14230 14231 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 14232 14233 if (isa<CXXThisExpr>(E)) { 14234 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 14235 : diag::warn_this_bool_conversion; 14236 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 14237 return; 14238 } 14239 14240 bool IsAddressOf = false; 14241 14242 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 14243 if (UO->getOpcode() != UO_AddrOf) 14244 return; 14245 IsAddressOf = true; 14246 E = UO->getSubExpr(); 14247 } 14248 14249 if (IsAddressOf) { 14250 unsigned DiagID = IsCompare 14251 ? diag::warn_address_of_reference_null_compare 14252 : diag::warn_address_of_reference_bool_conversion; 14253 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 14254 << IsEqual; 14255 if (CheckForReference(*this, E, PD)) { 14256 return; 14257 } 14258 } 14259 14260 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 14261 bool IsParam = isa<NonNullAttr>(NonnullAttr); 14262 std::string Str; 14263 llvm::raw_string_ostream S(Str); 14264 E->printPretty(S, nullptr, getPrintingPolicy()); 14265 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 14266 : diag::warn_cast_nonnull_to_bool; 14267 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 14268 << E->getSourceRange() << Range << IsEqual; 14269 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 14270 }; 14271 14272 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 14273 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 14274 if (auto *Callee = Call->getDirectCallee()) { 14275 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 14276 ComplainAboutNonnullParamOrCall(A); 14277 return; 14278 } 14279 } 14280 } 14281 14282 // Expect to find a single Decl. Skip anything more complicated. 14283 ValueDecl *D = nullptr; 14284 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 14285 D = R->getDecl(); 14286 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 14287 D = M->getMemberDecl(); 14288 } 14289 14290 // Weak Decls can be null. 14291 if (!D || D->isWeak()) 14292 return; 14293 14294 // Check for parameter decl with nonnull attribute 14295 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 14296 if (getCurFunction() && 14297 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 14298 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 14299 ComplainAboutNonnullParamOrCall(A); 14300 return; 14301 } 14302 14303 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 14304 // Skip function template not specialized yet. 14305 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 14306 return; 14307 auto ParamIter = llvm::find(FD->parameters(), PV); 14308 assert(ParamIter != FD->param_end()); 14309 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 14310 14311 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 14312 if (!NonNull->args_size()) { 14313 ComplainAboutNonnullParamOrCall(NonNull); 14314 return; 14315 } 14316 14317 for (const ParamIdx &ArgNo : NonNull->args()) { 14318 if (ArgNo.getASTIndex() == ParamNo) { 14319 ComplainAboutNonnullParamOrCall(NonNull); 14320 return; 14321 } 14322 } 14323 } 14324 } 14325 } 14326 } 14327 14328 QualType T = D->getType(); 14329 const bool IsArray = T->isArrayType(); 14330 const bool IsFunction = T->isFunctionType(); 14331 14332 // Address of function is used to silence the function warning. 14333 if (IsAddressOf && IsFunction) { 14334 return; 14335 } 14336 14337 // Found nothing. 14338 if (!IsAddressOf && !IsFunction && !IsArray) 14339 return; 14340 14341 // Pretty print the expression for the diagnostic. 14342 std::string Str; 14343 llvm::raw_string_ostream S(Str); 14344 E->printPretty(S, nullptr, getPrintingPolicy()); 14345 14346 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 14347 : diag::warn_impcast_pointer_to_bool; 14348 enum { 14349 AddressOf, 14350 FunctionPointer, 14351 ArrayPointer 14352 } DiagType; 14353 if (IsAddressOf) 14354 DiagType = AddressOf; 14355 else if (IsFunction) 14356 DiagType = FunctionPointer; 14357 else if (IsArray) 14358 DiagType = ArrayPointer; 14359 else 14360 llvm_unreachable("Could not determine diagnostic."); 14361 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 14362 << Range << IsEqual; 14363 14364 if (!IsFunction) 14365 return; 14366 14367 // Suggest '&' to silence the function warning. 14368 Diag(E->getExprLoc(), diag::note_function_warning_silence) 14369 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 14370 14371 // Check to see if '()' fixit should be emitted. 14372 QualType ReturnType; 14373 UnresolvedSet<4> NonTemplateOverloads; 14374 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 14375 if (ReturnType.isNull()) 14376 return; 14377 14378 if (IsCompare) { 14379 // There are two cases here. If there is null constant, the only suggest 14380 // for a pointer return type. If the null is 0, then suggest if the return 14381 // type is a pointer or an integer type. 14382 if (!ReturnType->isPointerType()) { 14383 if (NullKind == Expr::NPCK_ZeroExpression || 14384 NullKind == Expr::NPCK_ZeroLiteral) { 14385 if (!ReturnType->isIntegerType()) 14386 return; 14387 } else { 14388 return; 14389 } 14390 } 14391 } else { // !IsCompare 14392 // For function to bool, only suggest if the function pointer has bool 14393 // return type. 14394 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 14395 return; 14396 } 14397 Diag(E->getExprLoc(), diag::note_function_to_function_call) 14398 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 14399 } 14400 14401 /// Diagnoses "dangerous" implicit conversions within the given 14402 /// expression (which is a full expression). Implements -Wconversion 14403 /// and -Wsign-compare. 14404 /// 14405 /// \param CC the "context" location of the implicit conversion, i.e. 14406 /// the most location of the syntactic entity requiring the implicit 14407 /// conversion 14408 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 14409 // Don't diagnose in unevaluated contexts. 14410 if (isUnevaluatedContext()) 14411 return; 14412 14413 // Don't diagnose for value- or type-dependent expressions. 14414 if (E->isTypeDependent() || E->isValueDependent()) 14415 return; 14416 14417 // Check for array bounds violations in cases where the check isn't triggered 14418 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 14419 // ArraySubscriptExpr is on the RHS of a variable initialization. 14420 CheckArrayAccess(E); 14421 14422 // This is not the right CC for (e.g.) a variable initialization. 14423 AnalyzeImplicitConversions(*this, E, CC); 14424 } 14425 14426 /// CheckBoolLikeConversion - Check conversion of given expression to boolean. 14427 /// Input argument E is a logical expression. 14428 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 14429 ::CheckBoolLikeConversion(*this, E, CC); 14430 } 14431 14432 /// Diagnose when expression is an integer constant expression and its evaluation 14433 /// results in integer overflow 14434 void Sema::CheckForIntOverflow (Expr *E) { 14435 // Use a work list to deal with nested struct initializers. 14436 SmallVector<Expr *, 2> Exprs(1, E); 14437 14438 do { 14439 Expr *OriginalE = Exprs.pop_back_val(); 14440 Expr *E = OriginalE->IgnoreParenCasts(); 14441 14442 if (isa<BinaryOperator>(E)) { 14443 E->EvaluateForOverflow(Context); 14444 continue; 14445 } 14446 14447 if (auto InitList = dyn_cast<InitListExpr>(OriginalE)) 14448 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 14449 else if (isa<ObjCBoxedExpr>(OriginalE)) 14450 E->EvaluateForOverflow(Context); 14451 else if (auto Call = dyn_cast<CallExpr>(E)) 14452 Exprs.append(Call->arg_begin(), Call->arg_end()); 14453 else if (auto Message = dyn_cast<ObjCMessageExpr>(E)) 14454 Exprs.append(Message->arg_begin(), Message->arg_end()); 14455 } while (!Exprs.empty()); 14456 } 14457 14458 namespace { 14459 14460 /// Visitor for expressions which looks for unsequenced operations on the 14461 /// same object. 14462 class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> { 14463 using Base = ConstEvaluatedExprVisitor<SequenceChecker>; 14464 14465 /// A tree of sequenced regions within an expression. Two regions are 14466 /// unsequenced if one is an ancestor or a descendent of the other. When we 14467 /// finish processing an expression with sequencing, such as a comma 14468 /// expression, we fold its tree nodes into its parent, since they are 14469 /// unsequenced with respect to nodes we will visit later. 14470 class SequenceTree { 14471 struct Value { 14472 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 14473 unsigned Parent : 31; 14474 unsigned Merged : 1; 14475 }; 14476 SmallVector<Value, 8> Values; 14477 14478 public: 14479 /// A region within an expression which may be sequenced with respect 14480 /// to some other region. 14481 class Seq { 14482 friend class SequenceTree; 14483 14484 unsigned Index; 14485 14486 explicit Seq(unsigned N) : Index(N) {} 14487 14488 public: 14489 Seq() : Index(0) {} 14490 }; 14491 14492 SequenceTree() { Values.push_back(Value(0)); } 14493 Seq root() const { return Seq(0); } 14494 14495 /// Create a new sequence of operations, which is an unsequenced 14496 /// subset of \p Parent. This sequence of operations is sequenced with 14497 /// respect to other children of \p Parent. 14498 Seq allocate(Seq Parent) { 14499 Values.push_back(Value(Parent.Index)); 14500 return Seq(Values.size() - 1); 14501 } 14502 14503 /// Merge a sequence of operations into its parent. 14504 void merge(Seq S) { 14505 Values[S.Index].Merged = true; 14506 } 14507 14508 /// Determine whether two operations are unsequenced. This operation 14509 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 14510 /// should have been merged into its parent as appropriate. 14511 bool isUnsequenced(Seq Cur, Seq Old) { 14512 unsigned C = representative(Cur.Index); 14513 unsigned Target = representative(Old.Index); 14514 while (C >= Target) { 14515 if (C == Target) 14516 return true; 14517 C = Values[C].Parent; 14518 } 14519 return false; 14520 } 14521 14522 private: 14523 /// Pick a representative for a sequence. 14524 unsigned representative(unsigned K) { 14525 if (Values[K].Merged) 14526 // Perform path compression as we go. 14527 return Values[K].Parent = representative(Values[K].Parent); 14528 return K; 14529 } 14530 }; 14531 14532 /// An object for which we can track unsequenced uses. 14533 using Object = const NamedDecl *; 14534 14535 /// Different flavors of object usage which we track. We only track the 14536 /// least-sequenced usage of each kind. 14537 enum UsageKind { 14538 /// A read of an object. Multiple unsequenced reads are OK. 14539 UK_Use, 14540 14541 /// A modification of an object which is sequenced before the value 14542 /// computation of the expression, such as ++n in C++. 14543 UK_ModAsValue, 14544 14545 /// A modification of an object which is not sequenced before the value 14546 /// computation of the expression, such as n++. 14547 UK_ModAsSideEffect, 14548 14549 UK_Count = UK_ModAsSideEffect + 1 14550 }; 14551 14552 /// Bundle together a sequencing region and the expression corresponding 14553 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo. 14554 struct Usage { 14555 const Expr *UsageExpr; 14556 SequenceTree::Seq Seq; 14557 14558 Usage() : UsageExpr(nullptr) {} 14559 }; 14560 14561 struct UsageInfo { 14562 Usage Uses[UK_Count]; 14563 14564 /// Have we issued a diagnostic for this object already? 14565 bool Diagnosed; 14566 14567 UsageInfo() : Diagnosed(false) {} 14568 }; 14569 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 14570 14571 Sema &SemaRef; 14572 14573 /// Sequenced regions within the expression. 14574 SequenceTree Tree; 14575 14576 /// Declaration modifications and references which we have seen. 14577 UsageInfoMap UsageMap; 14578 14579 /// The region we are currently within. 14580 SequenceTree::Seq Region; 14581 14582 /// Filled in with declarations which were modified as a side-effect 14583 /// (that is, post-increment operations). 14584 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 14585 14586 /// Expressions to check later. We defer checking these to reduce 14587 /// stack usage. 14588 SmallVectorImpl<const Expr *> &WorkList; 14589 14590 /// RAII object wrapping the visitation of a sequenced subexpression of an 14591 /// expression. At the end of this process, the side-effects of the evaluation 14592 /// become sequenced with respect to the value computation of the result, so 14593 /// we downgrade any UK_ModAsSideEffect within the evaluation to 14594 /// UK_ModAsValue. 14595 struct SequencedSubexpression { 14596 SequencedSubexpression(SequenceChecker &Self) 14597 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 14598 Self.ModAsSideEffect = &ModAsSideEffect; 14599 } 14600 14601 ~SequencedSubexpression() { 14602 for (const std::pair<Object, Usage> &M : llvm::reverse(ModAsSideEffect)) { 14603 // Add a new usage with usage kind UK_ModAsValue, and then restore 14604 // the previous usage with UK_ModAsSideEffect (thus clearing it if 14605 // the previous one was empty). 14606 UsageInfo &UI = Self.UsageMap[M.first]; 14607 auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect]; 14608 Self.addUsage(M.first, UI, SideEffectUsage.UsageExpr, UK_ModAsValue); 14609 SideEffectUsage = M.second; 14610 } 14611 Self.ModAsSideEffect = OldModAsSideEffect; 14612 } 14613 14614 SequenceChecker &Self; 14615 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 14616 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 14617 }; 14618 14619 /// RAII object wrapping the visitation of a subexpression which we might 14620 /// choose to evaluate as a constant. If any subexpression is evaluated and 14621 /// found to be non-constant, this allows us to suppress the evaluation of 14622 /// the outer expression. 14623 class EvaluationTracker { 14624 public: 14625 EvaluationTracker(SequenceChecker &Self) 14626 : Self(Self), Prev(Self.EvalTracker) { 14627 Self.EvalTracker = this; 14628 } 14629 14630 ~EvaluationTracker() { 14631 Self.EvalTracker = Prev; 14632 if (Prev) 14633 Prev->EvalOK &= EvalOK; 14634 } 14635 14636 bool evaluate(const Expr *E, bool &Result) { 14637 if (!EvalOK || E->isValueDependent()) 14638 return false; 14639 EvalOK = E->EvaluateAsBooleanCondition( 14640 Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated()); 14641 return EvalOK; 14642 } 14643 14644 private: 14645 SequenceChecker &Self; 14646 EvaluationTracker *Prev; 14647 bool EvalOK = true; 14648 } *EvalTracker = nullptr; 14649 14650 /// Find the object which is produced by the specified expression, 14651 /// if any. 14652 Object getObject(const Expr *E, bool Mod) const { 14653 E = E->IgnoreParenCasts(); 14654 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 14655 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 14656 return getObject(UO->getSubExpr(), Mod); 14657 } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 14658 if (BO->getOpcode() == BO_Comma) 14659 return getObject(BO->getRHS(), Mod); 14660 if (Mod && BO->isAssignmentOp()) 14661 return getObject(BO->getLHS(), Mod); 14662 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 14663 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 14664 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 14665 return ME->getMemberDecl(); 14666 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 14667 // FIXME: If this is a reference, map through to its value. 14668 return DRE->getDecl(); 14669 return nullptr; 14670 } 14671 14672 /// Note that an object \p O was modified or used by an expression 14673 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for 14674 /// the object \p O as obtained via the \p UsageMap. 14675 void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) { 14676 // Get the old usage for the given object and usage kind. 14677 Usage &U = UI.Uses[UK]; 14678 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) { 14679 // If we have a modification as side effect and are in a sequenced 14680 // subexpression, save the old Usage so that we can restore it later 14681 // in SequencedSubexpression::~SequencedSubexpression. 14682 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 14683 ModAsSideEffect->push_back(std::make_pair(O, U)); 14684 // Then record the new usage with the current sequencing region. 14685 U.UsageExpr = UsageExpr; 14686 U.Seq = Region; 14687 } 14688 } 14689 14690 /// Check whether a modification or use of an object \p O in an expression 14691 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is 14692 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap. 14693 /// \p IsModMod is true when we are checking for a mod-mod unsequenced 14694 /// usage and false we are checking for a mod-use unsequenced usage. 14695 void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, 14696 UsageKind OtherKind, bool IsModMod) { 14697 if (UI.Diagnosed) 14698 return; 14699 14700 const Usage &U = UI.Uses[OtherKind]; 14701 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) 14702 return; 14703 14704 const Expr *Mod = U.UsageExpr; 14705 const Expr *ModOrUse = UsageExpr; 14706 if (OtherKind == UK_Use) 14707 std::swap(Mod, ModOrUse); 14708 14709 SemaRef.DiagRuntimeBehavior( 14710 Mod->getExprLoc(), {Mod, ModOrUse}, 14711 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod 14712 : diag::warn_unsequenced_mod_use) 14713 << O << SourceRange(ModOrUse->getExprLoc())); 14714 UI.Diagnosed = true; 14715 } 14716 14717 // A note on note{Pre, Post}{Use, Mod}: 14718 // 14719 // (It helps to follow the algorithm with an expression such as 14720 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced 14721 // operations before C++17 and both are well-defined in C++17). 14722 // 14723 // When visiting a node which uses/modify an object we first call notePreUse 14724 // or notePreMod before visiting its sub-expression(s). At this point the 14725 // children of the current node have not yet been visited and so the eventual 14726 // uses/modifications resulting from the children of the current node have not 14727 // been recorded yet. 14728 // 14729 // We then visit the children of the current node. After that notePostUse or 14730 // notePostMod is called. These will 1) detect an unsequenced modification 14731 // as side effect (as in "k++ + k") and 2) add a new usage with the 14732 // appropriate usage kind. 14733 // 14734 // We also have to be careful that some operation sequences modification as 14735 // side effect as well (for example: || or ,). To account for this we wrap 14736 // the visitation of such a sub-expression (for example: the LHS of || or ,) 14737 // with SequencedSubexpression. SequencedSubexpression is an RAII object 14738 // which record usages which are modifications as side effect, and then 14739 // downgrade them (or more accurately restore the previous usage which was a 14740 // modification as side effect) when exiting the scope of the sequenced 14741 // subexpression. 14742 14743 void notePreUse(Object O, const Expr *UseExpr) { 14744 UsageInfo &UI = UsageMap[O]; 14745 // Uses conflict with other modifications. 14746 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false); 14747 } 14748 14749 void notePostUse(Object O, const Expr *UseExpr) { 14750 UsageInfo &UI = UsageMap[O]; 14751 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsSideEffect, 14752 /*IsModMod=*/false); 14753 addUsage(O, UI, UseExpr, /*UsageKind=*/UK_Use); 14754 } 14755 14756 void notePreMod(Object O, const Expr *ModExpr) { 14757 UsageInfo &UI = UsageMap[O]; 14758 // Modifications conflict with other modifications and with uses. 14759 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true); 14760 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false); 14761 } 14762 14763 void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) { 14764 UsageInfo &UI = UsageMap[O]; 14765 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsSideEffect, 14766 /*IsModMod=*/true); 14767 addUsage(O, UI, ModExpr, /*UsageKind=*/UK); 14768 } 14769 14770 public: 14771 SequenceChecker(Sema &S, const Expr *E, 14772 SmallVectorImpl<const Expr *> &WorkList) 14773 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 14774 Visit(E); 14775 // Silence a -Wunused-private-field since WorkList is now unused. 14776 // TODO: Evaluate if it can be used, and if not remove it. 14777 (void)this->WorkList; 14778 } 14779 14780 void VisitStmt(const Stmt *S) { 14781 // Skip all statements which aren't expressions for now. 14782 } 14783 14784 void VisitExpr(const Expr *E) { 14785 // By default, just recurse to evaluated subexpressions. 14786 Base::VisitStmt(E); 14787 } 14788 14789 void VisitCastExpr(const CastExpr *E) { 14790 Object O = Object(); 14791 if (E->getCastKind() == CK_LValueToRValue) 14792 O = getObject(E->getSubExpr(), false); 14793 14794 if (O) 14795 notePreUse(O, E); 14796 VisitExpr(E); 14797 if (O) 14798 notePostUse(O, E); 14799 } 14800 14801 void VisitSequencedExpressions(const Expr *SequencedBefore, 14802 const Expr *SequencedAfter) { 14803 SequenceTree::Seq BeforeRegion = Tree.allocate(Region); 14804 SequenceTree::Seq AfterRegion = Tree.allocate(Region); 14805 SequenceTree::Seq OldRegion = Region; 14806 14807 { 14808 SequencedSubexpression SeqBefore(*this); 14809 Region = BeforeRegion; 14810 Visit(SequencedBefore); 14811 } 14812 14813 Region = AfterRegion; 14814 Visit(SequencedAfter); 14815 14816 Region = OldRegion; 14817 14818 Tree.merge(BeforeRegion); 14819 Tree.merge(AfterRegion); 14820 } 14821 14822 void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) { 14823 // C++17 [expr.sub]p1: 14824 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The 14825 // expression E1 is sequenced before the expression E2. 14826 if (SemaRef.getLangOpts().CPlusPlus17) 14827 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS()); 14828 else { 14829 Visit(ASE->getLHS()); 14830 Visit(ASE->getRHS()); 14831 } 14832 } 14833 14834 void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 14835 void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 14836 void VisitBinPtrMem(const BinaryOperator *BO) { 14837 // C++17 [expr.mptr.oper]p4: 14838 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...] 14839 // the expression E1 is sequenced before the expression E2. 14840 if (SemaRef.getLangOpts().CPlusPlus17) 14841 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14842 else { 14843 Visit(BO->getLHS()); 14844 Visit(BO->getRHS()); 14845 } 14846 } 14847 14848 void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); } 14849 void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); } 14850 void VisitBinShlShr(const BinaryOperator *BO) { 14851 // C++17 [expr.shift]p4: 14852 // The expression E1 is sequenced before the expression E2. 14853 if (SemaRef.getLangOpts().CPlusPlus17) 14854 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14855 else { 14856 Visit(BO->getLHS()); 14857 Visit(BO->getRHS()); 14858 } 14859 } 14860 14861 void VisitBinComma(const BinaryOperator *BO) { 14862 // C++11 [expr.comma]p1: 14863 // Every value computation and side effect associated with the left 14864 // expression is sequenced before every value computation and side 14865 // effect associated with the right expression. 14866 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14867 } 14868 14869 void VisitBinAssign(const BinaryOperator *BO) { 14870 SequenceTree::Seq RHSRegion; 14871 SequenceTree::Seq LHSRegion; 14872 if (SemaRef.getLangOpts().CPlusPlus17) { 14873 RHSRegion = Tree.allocate(Region); 14874 LHSRegion = Tree.allocate(Region); 14875 } else { 14876 RHSRegion = Region; 14877 LHSRegion = Region; 14878 } 14879 SequenceTree::Seq OldRegion = Region; 14880 14881 // C++11 [expr.ass]p1: 14882 // [...] the assignment is sequenced after the value computation 14883 // of the right and left operands, [...] 14884 // 14885 // so check it before inspecting the operands and update the 14886 // map afterwards. 14887 Object O = getObject(BO->getLHS(), /*Mod=*/true); 14888 if (O) 14889 notePreMod(O, BO); 14890 14891 if (SemaRef.getLangOpts().CPlusPlus17) { 14892 // C++17 [expr.ass]p1: 14893 // [...] The right operand is sequenced before the left operand. [...] 14894 { 14895 SequencedSubexpression SeqBefore(*this); 14896 Region = RHSRegion; 14897 Visit(BO->getRHS()); 14898 } 14899 14900 Region = LHSRegion; 14901 Visit(BO->getLHS()); 14902 14903 if (O && isa<CompoundAssignOperator>(BO)) 14904 notePostUse(O, BO); 14905 14906 } else { 14907 // C++11 does not specify any sequencing between the LHS and RHS. 14908 Region = LHSRegion; 14909 Visit(BO->getLHS()); 14910 14911 if (O && isa<CompoundAssignOperator>(BO)) 14912 notePostUse(O, BO); 14913 14914 Region = RHSRegion; 14915 Visit(BO->getRHS()); 14916 } 14917 14918 // C++11 [expr.ass]p1: 14919 // the assignment is sequenced [...] before the value computation of the 14920 // assignment expression. 14921 // C11 6.5.16/3 has no such rule. 14922 Region = OldRegion; 14923 if (O) 14924 notePostMod(O, BO, 14925 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 14926 : UK_ModAsSideEffect); 14927 if (SemaRef.getLangOpts().CPlusPlus17) { 14928 Tree.merge(RHSRegion); 14929 Tree.merge(LHSRegion); 14930 } 14931 } 14932 14933 void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) { 14934 VisitBinAssign(CAO); 14935 } 14936 14937 void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 14938 void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 14939 void VisitUnaryPreIncDec(const UnaryOperator *UO) { 14940 Object O = getObject(UO->getSubExpr(), true); 14941 if (!O) 14942 return VisitExpr(UO); 14943 14944 notePreMod(O, UO); 14945 Visit(UO->getSubExpr()); 14946 // C++11 [expr.pre.incr]p1: 14947 // the expression ++x is equivalent to x+=1 14948 notePostMod(O, UO, 14949 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 14950 : UK_ModAsSideEffect); 14951 } 14952 14953 void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 14954 void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 14955 void VisitUnaryPostIncDec(const UnaryOperator *UO) { 14956 Object O = getObject(UO->getSubExpr(), true); 14957 if (!O) 14958 return VisitExpr(UO); 14959 14960 notePreMod(O, UO); 14961 Visit(UO->getSubExpr()); 14962 notePostMod(O, UO, UK_ModAsSideEffect); 14963 } 14964 14965 void VisitBinLOr(const BinaryOperator *BO) { 14966 // C++11 [expr.log.or]p2: 14967 // If the second expression is evaluated, every value computation and 14968 // side effect associated with the first expression is sequenced before 14969 // every value computation and side effect associated with the 14970 // second expression. 14971 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 14972 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 14973 SequenceTree::Seq OldRegion = Region; 14974 14975 EvaluationTracker Eval(*this); 14976 { 14977 SequencedSubexpression Sequenced(*this); 14978 Region = LHSRegion; 14979 Visit(BO->getLHS()); 14980 } 14981 14982 // C++11 [expr.log.or]p1: 14983 // [...] the second operand is not evaluated if the first operand 14984 // evaluates to true. 14985 bool EvalResult = false; 14986 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 14987 bool ShouldVisitRHS = !EvalOK || (EvalOK && !EvalResult); 14988 if (ShouldVisitRHS) { 14989 Region = RHSRegion; 14990 Visit(BO->getRHS()); 14991 } 14992 14993 Region = OldRegion; 14994 Tree.merge(LHSRegion); 14995 Tree.merge(RHSRegion); 14996 } 14997 14998 void VisitBinLAnd(const BinaryOperator *BO) { 14999 // C++11 [expr.log.and]p2: 15000 // If the second expression is evaluated, every value computation and 15001 // side effect associated with the first expression is sequenced before 15002 // every value computation and side effect associated with the 15003 // second expression. 15004 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 15005 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 15006 SequenceTree::Seq OldRegion = Region; 15007 15008 EvaluationTracker Eval(*this); 15009 { 15010 SequencedSubexpression Sequenced(*this); 15011 Region = LHSRegion; 15012 Visit(BO->getLHS()); 15013 } 15014 15015 // C++11 [expr.log.and]p1: 15016 // [...] the second operand is not evaluated if the first operand is false. 15017 bool EvalResult = false; 15018 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 15019 bool ShouldVisitRHS = !EvalOK || (EvalOK && EvalResult); 15020 if (ShouldVisitRHS) { 15021 Region = RHSRegion; 15022 Visit(BO->getRHS()); 15023 } 15024 15025 Region = OldRegion; 15026 Tree.merge(LHSRegion); 15027 Tree.merge(RHSRegion); 15028 } 15029 15030 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) { 15031 // C++11 [expr.cond]p1: 15032 // [...] Every value computation and side effect associated with the first 15033 // expression is sequenced before every value computation and side effect 15034 // associated with the second or third expression. 15035 SequenceTree::Seq ConditionRegion = Tree.allocate(Region); 15036 15037 // No sequencing is specified between the true and false expression. 15038 // However since exactly one of both is going to be evaluated we can 15039 // consider them to be sequenced. This is needed to avoid warning on 15040 // something like "x ? y+= 1 : y += 2;" in the case where we will visit 15041 // both the true and false expressions because we can't evaluate x. 15042 // This will still allow us to detect an expression like (pre C++17) 15043 // "(x ? y += 1 : y += 2) = y". 15044 // 15045 // We don't wrap the visitation of the true and false expression with 15046 // SequencedSubexpression because we don't want to downgrade modifications 15047 // as side effect in the true and false expressions after the visition 15048 // is done. (for example in the expression "(x ? y++ : y++) + y" we should 15049 // not warn between the two "y++", but we should warn between the "y++" 15050 // and the "y". 15051 SequenceTree::Seq TrueRegion = Tree.allocate(Region); 15052 SequenceTree::Seq FalseRegion = Tree.allocate(Region); 15053 SequenceTree::Seq OldRegion = Region; 15054 15055 EvaluationTracker Eval(*this); 15056 { 15057 SequencedSubexpression Sequenced(*this); 15058 Region = ConditionRegion; 15059 Visit(CO->getCond()); 15060 } 15061 15062 // C++11 [expr.cond]p1: 15063 // [...] The first expression is contextually converted to bool (Clause 4). 15064 // It is evaluated and if it is true, the result of the conditional 15065 // expression is the value of the second expression, otherwise that of the 15066 // third expression. Only one of the second and third expressions is 15067 // evaluated. [...] 15068 bool EvalResult = false; 15069 bool EvalOK = Eval.evaluate(CO->getCond(), EvalResult); 15070 bool ShouldVisitTrueExpr = !EvalOK || (EvalOK && EvalResult); 15071 bool ShouldVisitFalseExpr = !EvalOK || (EvalOK && !EvalResult); 15072 if (ShouldVisitTrueExpr) { 15073 Region = TrueRegion; 15074 Visit(CO->getTrueExpr()); 15075 } 15076 if (ShouldVisitFalseExpr) { 15077 Region = FalseRegion; 15078 Visit(CO->getFalseExpr()); 15079 } 15080 15081 Region = OldRegion; 15082 Tree.merge(ConditionRegion); 15083 Tree.merge(TrueRegion); 15084 Tree.merge(FalseRegion); 15085 } 15086 15087 void VisitCallExpr(const CallExpr *CE) { 15088 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 15089 15090 if (CE->isUnevaluatedBuiltinCall(Context)) 15091 return; 15092 15093 // C++11 [intro.execution]p15: 15094 // When calling a function [...], every value computation and side effect 15095 // associated with any argument expression, or with the postfix expression 15096 // designating the called function, is sequenced before execution of every 15097 // expression or statement in the body of the function [and thus before 15098 // the value computation of its result]. 15099 SequencedSubexpression Sequenced(*this); 15100 SemaRef.runWithSufficientStackSpace(CE->getExprLoc(), [&] { 15101 // C++17 [expr.call]p5 15102 // The postfix-expression is sequenced before each expression in the 15103 // expression-list and any default argument. [...] 15104 SequenceTree::Seq CalleeRegion; 15105 SequenceTree::Seq OtherRegion; 15106 if (SemaRef.getLangOpts().CPlusPlus17) { 15107 CalleeRegion = Tree.allocate(Region); 15108 OtherRegion = Tree.allocate(Region); 15109 } else { 15110 CalleeRegion = Region; 15111 OtherRegion = Region; 15112 } 15113 SequenceTree::Seq OldRegion = Region; 15114 15115 // Visit the callee expression first. 15116 Region = CalleeRegion; 15117 if (SemaRef.getLangOpts().CPlusPlus17) { 15118 SequencedSubexpression Sequenced(*this); 15119 Visit(CE->getCallee()); 15120 } else { 15121 Visit(CE->getCallee()); 15122 } 15123 15124 // Then visit the argument expressions. 15125 Region = OtherRegion; 15126 for (const Expr *Argument : CE->arguments()) 15127 Visit(Argument); 15128 15129 Region = OldRegion; 15130 if (SemaRef.getLangOpts().CPlusPlus17) { 15131 Tree.merge(CalleeRegion); 15132 Tree.merge(OtherRegion); 15133 } 15134 }); 15135 } 15136 15137 void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) { 15138 // C++17 [over.match.oper]p2: 15139 // [...] the operator notation is first transformed to the equivalent 15140 // function-call notation as summarized in Table 12 (where @ denotes one 15141 // of the operators covered in the specified subclause). However, the 15142 // operands are sequenced in the order prescribed for the built-in 15143 // operator (Clause 8). 15144 // 15145 // From the above only overloaded binary operators and overloaded call 15146 // operators have sequencing rules in C++17 that we need to handle 15147 // separately. 15148 if (!SemaRef.getLangOpts().CPlusPlus17 || 15149 (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call)) 15150 return VisitCallExpr(CXXOCE); 15151 15152 enum { 15153 NoSequencing, 15154 LHSBeforeRHS, 15155 RHSBeforeLHS, 15156 LHSBeforeRest 15157 } SequencingKind; 15158 switch (CXXOCE->getOperator()) { 15159 case OO_Equal: 15160 case OO_PlusEqual: 15161 case OO_MinusEqual: 15162 case OO_StarEqual: 15163 case OO_SlashEqual: 15164 case OO_PercentEqual: 15165 case OO_CaretEqual: 15166 case OO_AmpEqual: 15167 case OO_PipeEqual: 15168 case OO_LessLessEqual: 15169 case OO_GreaterGreaterEqual: 15170 SequencingKind = RHSBeforeLHS; 15171 break; 15172 15173 case OO_LessLess: 15174 case OO_GreaterGreater: 15175 case OO_AmpAmp: 15176 case OO_PipePipe: 15177 case OO_Comma: 15178 case OO_ArrowStar: 15179 case OO_Subscript: 15180 SequencingKind = LHSBeforeRHS; 15181 break; 15182 15183 case OO_Call: 15184 SequencingKind = LHSBeforeRest; 15185 break; 15186 15187 default: 15188 SequencingKind = NoSequencing; 15189 break; 15190 } 15191 15192 if (SequencingKind == NoSequencing) 15193 return VisitCallExpr(CXXOCE); 15194 15195 // This is a call, so all subexpressions are sequenced before the result. 15196 SequencedSubexpression Sequenced(*this); 15197 15198 SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] { 15199 assert(SemaRef.getLangOpts().CPlusPlus17 && 15200 "Should only get there with C++17 and above!"); 15201 assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) && 15202 "Should only get there with an overloaded binary operator" 15203 " or an overloaded call operator!"); 15204 15205 if (SequencingKind == LHSBeforeRest) { 15206 assert(CXXOCE->getOperator() == OO_Call && 15207 "We should only have an overloaded call operator here!"); 15208 15209 // This is very similar to VisitCallExpr, except that we only have the 15210 // C++17 case. The postfix-expression is the first argument of the 15211 // CXXOperatorCallExpr. The expressions in the expression-list, if any, 15212 // are in the following arguments. 15213 // 15214 // Note that we intentionally do not visit the callee expression since 15215 // it is just a decayed reference to a function. 15216 SequenceTree::Seq PostfixExprRegion = Tree.allocate(Region); 15217 SequenceTree::Seq ArgsRegion = Tree.allocate(Region); 15218 SequenceTree::Seq OldRegion = Region; 15219 15220 assert(CXXOCE->getNumArgs() >= 1 && 15221 "An overloaded call operator must have at least one argument" 15222 " for the postfix-expression!"); 15223 const Expr *PostfixExpr = CXXOCE->getArgs()[0]; 15224 llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1, 15225 CXXOCE->getNumArgs() - 1); 15226 15227 // Visit the postfix-expression first. 15228 { 15229 Region = PostfixExprRegion; 15230 SequencedSubexpression Sequenced(*this); 15231 Visit(PostfixExpr); 15232 } 15233 15234 // Then visit the argument expressions. 15235 Region = ArgsRegion; 15236 for (const Expr *Arg : Args) 15237 Visit(Arg); 15238 15239 Region = OldRegion; 15240 Tree.merge(PostfixExprRegion); 15241 Tree.merge(ArgsRegion); 15242 } else { 15243 assert(CXXOCE->getNumArgs() == 2 && 15244 "Should only have two arguments here!"); 15245 assert((SequencingKind == LHSBeforeRHS || 15246 SequencingKind == RHSBeforeLHS) && 15247 "Unexpected sequencing kind!"); 15248 15249 // We do not visit the callee expression since it is just a decayed 15250 // reference to a function. 15251 const Expr *E1 = CXXOCE->getArg(0); 15252 const Expr *E2 = CXXOCE->getArg(1); 15253 if (SequencingKind == RHSBeforeLHS) 15254 std::swap(E1, E2); 15255 15256 return VisitSequencedExpressions(E1, E2); 15257 } 15258 }); 15259 } 15260 15261 void VisitCXXConstructExpr(const CXXConstructExpr *CCE) { 15262 // This is a call, so all subexpressions are sequenced before the result. 15263 SequencedSubexpression Sequenced(*this); 15264 15265 if (!CCE->isListInitialization()) 15266 return VisitExpr(CCE); 15267 15268 // In C++11, list initializations are sequenced. 15269 SmallVector<SequenceTree::Seq, 32> Elts; 15270 SequenceTree::Seq Parent = Region; 15271 for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(), 15272 E = CCE->arg_end(); 15273 I != E; ++I) { 15274 Region = Tree.allocate(Parent); 15275 Elts.push_back(Region); 15276 Visit(*I); 15277 } 15278 15279 // Forget that the initializers are sequenced. 15280 Region = Parent; 15281 for (unsigned I = 0; I < Elts.size(); ++I) 15282 Tree.merge(Elts[I]); 15283 } 15284 15285 void VisitInitListExpr(const InitListExpr *ILE) { 15286 if (!SemaRef.getLangOpts().CPlusPlus11) 15287 return VisitExpr(ILE); 15288 15289 // In C++11, list initializations are sequenced. 15290 SmallVector<SequenceTree::Seq, 32> Elts; 15291 SequenceTree::Seq Parent = Region; 15292 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 15293 const Expr *E = ILE->getInit(I); 15294 if (!E) 15295 continue; 15296 Region = Tree.allocate(Parent); 15297 Elts.push_back(Region); 15298 Visit(E); 15299 } 15300 15301 // Forget that the initializers are sequenced. 15302 Region = Parent; 15303 for (unsigned I = 0; I < Elts.size(); ++I) 15304 Tree.merge(Elts[I]); 15305 } 15306 }; 15307 15308 } // namespace 15309 15310 void Sema::CheckUnsequencedOperations(const Expr *E) { 15311 SmallVector<const Expr *, 8> WorkList; 15312 WorkList.push_back(E); 15313 while (!WorkList.empty()) { 15314 const Expr *Item = WorkList.pop_back_val(); 15315 SequenceChecker(*this, Item, WorkList); 15316 } 15317 } 15318 15319 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 15320 bool IsConstexpr) { 15321 llvm::SaveAndRestore<bool> ConstantContext( 15322 isConstantEvaluatedOverride, IsConstexpr || isa<ConstantExpr>(E)); 15323 CheckImplicitConversions(E, CheckLoc); 15324 if (!E->isInstantiationDependent()) 15325 CheckUnsequencedOperations(E); 15326 if (!IsConstexpr && !E->isValueDependent()) 15327 CheckForIntOverflow(E); 15328 DiagnoseMisalignedMembers(); 15329 } 15330 15331 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 15332 FieldDecl *BitField, 15333 Expr *Init) { 15334 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 15335 } 15336 15337 static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 15338 SourceLocation Loc) { 15339 if (!PType->isVariablyModifiedType()) 15340 return; 15341 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 15342 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 15343 return; 15344 } 15345 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 15346 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 15347 return; 15348 } 15349 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 15350 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 15351 return; 15352 } 15353 15354 const ArrayType *AT = S.Context.getAsArrayType(PType); 15355 if (!AT) 15356 return; 15357 15358 if (AT->getSizeModifier() != ArrayType::Star) { 15359 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 15360 return; 15361 } 15362 15363 S.Diag(Loc, diag::err_array_star_in_function_definition); 15364 } 15365 15366 /// CheckParmsForFunctionDef - Check that the parameters of the given 15367 /// function are appropriate for the definition of a function. This 15368 /// takes care of any checks that cannot be performed on the 15369 /// declaration itself, e.g., that the types of each of the function 15370 /// parameters are complete. 15371 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 15372 bool CheckParameterNames) { 15373 bool HasInvalidParm = false; 15374 for (ParmVarDecl *Param : Parameters) { 15375 // C99 6.7.5.3p4: the parameters in a parameter type list in a 15376 // function declarator that is part of a function definition of 15377 // that function shall not have incomplete type. 15378 // 15379 // This is also C++ [dcl.fct]p6. 15380 if (!Param->isInvalidDecl() && 15381 RequireCompleteType(Param->getLocation(), Param->getType(), 15382 diag::err_typecheck_decl_incomplete_type)) { 15383 Param->setInvalidDecl(); 15384 HasInvalidParm = true; 15385 } 15386 15387 // C99 6.9.1p5: If the declarator includes a parameter type list, the 15388 // declaration of each parameter shall include an identifier. 15389 if (CheckParameterNames && Param->getIdentifier() == nullptr && 15390 !Param->isImplicit() && !getLangOpts().CPlusPlus) { 15391 // Diagnose this as an extension in C17 and earlier. 15392 if (!getLangOpts().C2x) 15393 Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x); 15394 } 15395 15396 // C99 6.7.5.3p12: 15397 // If the function declarator is not part of a definition of that 15398 // function, parameters may have incomplete type and may use the [*] 15399 // notation in their sequences of declarator specifiers to specify 15400 // variable length array types. 15401 QualType PType = Param->getOriginalType(); 15402 // FIXME: This diagnostic should point the '[*]' if source-location 15403 // information is added for it. 15404 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 15405 15406 // If the parameter is a c++ class type and it has to be destructed in the 15407 // callee function, declare the destructor so that it can be called by the 15408 // callee function. Do not perform any direct access check on the dtor here. 15409 if (!Param->isInvalidDecl()) { 15410 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 15411 if (!ClassDecl->isInvalidDecl() && 15412 !ClassDecl->hasIrrelevantDestructor() && 15413 !ClassDecl->isDependentContext() && 15414 ClassDecl->isParamDestroyedInCallee()) { 15415 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 15416 MarkFunctionReferenced(Param->getLocation(), Destructor); 15417 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 15418 } 15419 } 15420 } 15421 15422 // Parameters with the pass_object_size attribute only need to be marked 15423 // constant at function definitions. Because we lack information about 15424 // whether we're on a declaration or definition when we're instantiating the 15425 // attribute, we need to check for constness here. 15426 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 15427 if (!Param->getType().isConstQualified()) 15428 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 15429 << Attr->getSpelling() << 1; 15430 15431 // Check for parameter names shadowing fields from the class. 15432 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) { 15433 // The owning context for the parameter should be the function, but we 15434 // want to see if this function's declaration context is a record. 15435 DeclContext *DC = Param->getDeclContext(); 15436 if (DC && DC->isFunctionOrMethod()) { 15437 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent())) 15438 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(), 15439 RD, /*DeclIsField*/ false); 15440 } 15441 } 15442 } 15443 15444 return HasInvalidParm; 15445 } 15446 15447 Optional<std::pair<CharUnits, CharUnits>> 15448 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx); 15449 15450 /// Compute the alignment and offset of the base class object given the 15451 /// derived-to-base cast expression and the alignment and offset of the derived 15452 /// class object. 15453 static std::pair<CharUnits, CharUnits> 15454 getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType, 15455 CharUnits BaseAlignment, CharUnits Offset, 15456 ASTContext &Ctx) { 15457 for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE; 15458 ++PathI) { 15459 const CXXBaseSpecifier *Base = *PathI; 15460 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl(); 15461 if (Base->isVirtual()) { 15462 // The complete object may have a lower alignment than the non-virtual 15463 // alignment of the base, in which case the base may be misaligned. Choose 15464 // the smaller of the non-virtual alignment and BaseAlignment, which is a 15465 // conservative lower bound of the complete object alignment. 15466 CharUnits NonVirtualAlignment = 15467 Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment(); 15468 BaseAlignment = std::min(BaseAlignment, NonVirtualAlignment); 15469 Offset = CharUnits::Zero(); 15470 } else { 15471 const ASTRecordLayout &RL = 15472 Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl()); 15473 Offset += RL.getBaseClassOffset(BaseDecl); 15474 } 15475 DerivedType = Base->getType(); 15476 } 15477 15478 return std::make_pair(BaseAlignment, Offset); 15479 } 15480 15481 /// Compute the alignment and offset of a binary additive operator. 15482 static Optional<std::pair<CharUnits, CharUnits>> 15483 getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE, 15484 bool IsSub, ASTContext &Ctx) { 15485 QualType PointeeType = PtrE->getType()->getPointeeType(); 15486 15487 if (!PointeeType->isConstantSizeType()) 15488 return llvm::None; 15489 15490 auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx); 15491 15492 if (!P) 15493 return llvm::None; 15494 15495 CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType); 15496 if (Optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) { 15497 CharUnits Offset = EltSize * IdxRes->getExtValue(); 15498 if (IsSub) 15499 Offset = -Offset; 15500 return std::make_pair(P->first, P->second + Offset); 15501 } 15502 15503 // If the integer expression isn't a constant expression, compute the lower 15504 // bound of the alignment using the alignment and offset of the pointer 15505 // expression and the element size. 15506 return std::make_pair( 15507 P->first.alignmentAtOffset(P->second).alignmentAtOffset(EltSize), 15508 CharUnits::Zero()); 15509 } 15510 15511 /// This helper function takes an lvalue expression and returns the alignment of 15512 /// a VarDecl and a constant offset from the VarDecl. 15513 Optional<std::pair<CharUnits, CharUnits>> 15514 static getBaseAlignmentAndOffsetFromLValue(const Expr *E, ASTContext &Ctx) { 15515 E = E->IgnoreParens(); 15516 switch (E->getStmtClass()) { 15517 default: 15518 break; 15519 case Stmt::CStyleCastExprClass: 15520 case Stmt::CXXStaticCastExprClass: 15521 case Stmt::ImplicitCastExprClass: { 15522 auto *CE = cast<CastExpr>(E); 15523 const Expr *From = CE->getSubExpr(); 15524 switch (CE->getCastKind()) { 15525 default: 15526 break; 15527 case CK_NoOp: 15528 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15529 case CK_UncheckedDerivedToBase: 15530 case CK_DerivedToBase: { 15531 auto P = getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15532 if (!P) 15533 break; 15534 return getDerivedToBaseAlignmentAndOffset(CE, From->getType(), P->first, 15535 P->second, Ctx); 15536 } 15537 } 15538 break; 15539 } 15540 case Stmt::ArraySubscriptExprClass: { 15541 auto *ASE = cast<ArraySubscriptExpr>(E); 15542 return getAlignmentAndOffsetFromBinAddOrSub(ASE->getBase(), ASE->getIdx(), 15543 false, Ctx); 15544 } 15545 case Stmt::DeclRefExprClass: { 15546 if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) { 15547 // FIXME: If VD is captured by copy or is an escaping __block variable, 15548 // use the alignment of VD's type. 15549 if (!VD->getType()->isReferenceType()) 15550 return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero()); 15551 if (VD->hasInit()) 15552 return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx); 15553 } 15554 break; 15555 } 15556 case Stmt::MemberExprClass: { 15557 auto *ME = cast<MemberExpr>(E); 15558 auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); 15559 if (!FD || FD->getType()->isReferenceType() || 15560 FD->getParent()->isInvalidDecl()) 15561 break; 15562 Optional<std::pair<CharUnits, CharUnits>> P; 15563 if (ME->isArrow()) 15564 P = getBaseAlignmentAndOffsetFromPtr(ME->getBase(), Ctx); 15565 else 15566 P = getBaseAlignmentAndOffsetFromLValue(ME->getBase(), Ctx); 15567 if (!P) 15568 break; 15569 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent()); 15570 uint64_t Offset = Layout.getFieldOffset(FD->getFieldIndex()); 15571 return std::make_pair(P->first, 15572 P->second + CharUnits::fromQuantity(Offset)); 15573 } 15574 case Stmt::UnaryOperatorClass: { 15575 auto *UO = cast<UnaryOperator>(E); 15576 switch (UO->getOpcode()) { 15577 default: 15578 break; 15579 case UO_Deref: 15580 return getBaseAlignmentAndOffsetFromPtr(UO->getSubExpr(), Ctx); 15581 } 15582 break; 15583 } 15584 case Stmt::BinaryOperatorClass: { 15585 auto *BO = cast<BinaryOperator>(E); 15586 auto Opcode = BO->getOpcode(); 15587 switch (Opcode) { 15588 default: 15589 break; 15590 case BO_Comma: 15591 return getBaseAlignmentAndOffsetFromLValue(BO->getRHS(), Ctx); 15592 } 15593 break; 15594 } 15595 } 15596 return llvm::None; 15597 } 15598 15599 /// This helper function takes a pointer expression and returns the alignment of 15600 /// a VarDecl and a constant offset from the VarDecl. 15601 Optional<std::pair<CharUnits, CharUnits>> 15602 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx) { 15603 E = E->IgnoreParens(); 15604 switch (E->getStmtClass()) { 15605 default: 15606 break; 15607 case Stmt::CStyleCastExprClass: 15608 case Stmt::CXXStaticCastExprClass: 15609 case Stmt::ImplicitCastExprClass: { 15610 auto *CE = cast<CastExpr>(E); 15611 const Expr *From = CE->getSubExpr(); 15612 switch (CE->getCastKind()) { 15613 default: 15614 break; 15615 case CK_NoOp: 15616 return getBaseAlignmentAndOffsetFromPtr(From, Ctx); 15617 case CK_ArrayToPointerDecay: 15618 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15619 case CK_UncheckedDerivedToBase: 15620 case CK_DerivedToBase: { 15621 auto P = getBaseAlignmentAndOffsetFromPtr(From, Ctx); 15622 if (!P) 15623 break; 15624 return getDerivedToBaseAlignmentAndOffset( 15625 CE, From->getType()->getPointeeType(), P->first, P->second, Ctx); 15626 } 15627 } 15628 break; 15629 } 15630 case Stmt::CXXThisExprClass: { 15631 auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl(); 15632 CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment(); 15633 return std::make_pair(Alignment, CharUnits::Zero()); 15634 } 15635 case Stmt::UnaryOperatorClass: { 15636 auto *UO = cast<UnaryOperator>(E); 15637 if (UO->getOpcode() == UO_AddrOf) 15638 return getBaseAlignmentAndOffsetFromLValue(UO->getSubExpr(), Ctx); 15639 break; 15640 } 15641 case Stmt::BinaryOperatorClass: { 15642 auto *BO = cast<BinaryOperator>(E); 15643 auto Opcode = BO->getOpcode(); 15644 switch (Opcode) { 15645 default: 15646 break; 15647 case BO_Add: 15648 case BO_Sub: { 15649 const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS(); 15650 if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType()) 15651 std::swap(LHS, RHS); 15652 return getAlignmentAndOffsetFromBinAddOrSub(LHS, RHS, Opcode == BO_Sub, 15653 Ctx); 15654 } 15655 case BO_Comma: 15656 return getBaseAlignmentAndOffsetFromPtr(BO->getRHS(), Ctx); 15657 } 15658 break; 15659 } 15660 } 15661 return llvm::None; 15662 } 15663 15664 static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) { 15665 // See if we can compute the alignment of a VarDecl and an offset from it. 15666 Optional<std::pair<CharUnits, CharUnits>> P = 15667 getBaseAlignmentAndOffsetFromPtr(E, S.Context); 15668 15669 if (P) 15670 return P->first.alignmentAtOffset(P->second); 15671 15672 // If that failed, return the type's alignment. 15673 return S.Context.getTypeAlignInChars(E->getType()->getPointeeType()); 15674 } 15675 15676 /// CheckCastAlign - Implements -Wcast-align, which warns when a 15677 /// pointer cast increases the alignment requirements. 15678 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 15679 // This is actually a lot of work to potentially be doing on every 15680 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 15681 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 15682 return; 15683 15684 // Ignore dependent types. 15685 if (T->isDependentType() || Op->getType()->isDependentType()) 15686 return; 15687 15688 // Require that the destination be a pointer type. 15689 const PointerType *DestPtr = T->getAs<PointerType>(); 15690 if (!DestPtr) return; 15691 15692 // If the destination has alignment 1, we're done. 15693 QualType DestPointee = DestPtr->getPointeeType(); 15694 if (DestPointee->isIncompleteType()) return; 15695 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 15696 if (DestAlign.isOne()) return; 15697 15698 // Require that the source be a pointer type. 15699 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 15700 if (!SrcPtr) return; 15701 QualType SrcPointee = SrcPtr->getPointeeType(); 15702 15703 // Explicitly allow casts from cv void*. We already implicitly 15704 // allowed casts to cv void*, since they have alignment 1. 15705 // Also allow casts involving incomplete types, which implicitly 15706 // includes 'void'. 15707 if (SrcPointee->isIncompleteType()) return; 15708 15709 CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this); 15710 15711 if (SrcAlign >= DestAlign) return; 15712 15713 Diag(TRange.getBegin(), diag::warn_cast_align) 15714 << Op->getType() << T 15715 << static_cast<unsigned>(SrcAlign.getQuantity()) 15716 << static_cast<unsigned>(DestAlign.getQuantity()) 15717 << TRange << Op->getSourceRange(); 15718 } 15719 15720 /// Check whether this array fits the idiom of a size-one tail padded 15721 /// array member of a struct. 15722 /// 15723 /// We avoid emitting out-of-bounds access warnings for such arrays as they are 15724 /// commonly used to emulate flexible arrays in C89 code. 15725 static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size, 15726 const NamedDecl *ND) { 15727 if (Size != 1 || !ND) return false; 15728 15729 const FieldDecl *FD = dyn_cast<FieldDecl>(ND); 15730 if (!FD) return false; 15731 15732 // Don't consider sizes resulting from macro expansions or template argument 15733 // substitution to form C89 tail-padded arrays. 15734 15735 TypeSourceInfo *TInfo = FD->getTypeSourceInfo(); 15736 while (TInfo) { 15737 TypeLoc TL = TInfo->getTypeLoc(); 15738 // Look through typedefs. 15739 if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) { 15740 const TypedefNameDecl *TDL = TTL.getTypedefNameDecl(); 15741 TInfo = TDL->getTypeSourceInfo(); 15742 continue; 15743 } 15744 if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) { 15745 const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr()); 15746 if (!SizeExpr || SizeExpr->getExprLoc().isMacroID()) 15747 return false; 15748 } 15749 break; 15750 } 15751 15752 const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext()); 15753 if (!RD) return false; 15754 if (RD->isUnion()) return false; 15755 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 15756 if (!CRD->isStandardLayout()) return false; 15757 } 15758 15759 // See if this is the last field decl in the record. 15760 const Decl *D = FD; 15761 while ((D = D->getNextDeclInContext())) 15762 if (isa<FieldDecl>(D)) 15763 return false; 15764 return true; 15765 } 15766 15767 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 15768 const ArraySubscriptExpr *ASE, 15769 bool AllowOnePastEnd, bool IndexNegated) { 15770 // Already diagnosed by the constant evaluator. 15771 if (isConstantEvaluated()) 15772 return; 15773 15774 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 15775 if (IndexExpr->isValueDependent()) 15776 return; 15777 15778 const Type *EffectiveType = 15779 BaseExpr->getType()->getPointeeOrArrayElementType(); 15780 BaseExpr = BaseExpr->IgnoreParenCasts(); 15781 const ConstantArrayType *ArrayTy = 15782 Context.getAsConstantArrayType(BaseExpr->getType()); 15783 15784 const Type *BaseType = 15785 ArrayTy == nullptr ? nullptr : ArrayTy->getElementType().getTypePtr(); 15786 bool IsUnboundedArray = (BaseType == nullptr); 15787 if (EffectiveType->isDependentType() || 15788 (!IsUnboundedArray && BaseType->isDependentType())) 15789 return; 15790 15791 Expr::EvalResult Result; 15792 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) 15793 return; 15794 15795 llvm::APSInt index = Result.Val.getInt(); 15796 if (IndexNegated) { 15797 index.setIsUnsigned(false); 15798 index = -index; 15799 } 15800 15801 const NamedDecl *ND = nullptr; 15802 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15803 ND = DRE->getDecl(); 15804 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 15805 ND = ME->getMemberDecl(); 15806 15807 if (IsUnboundedArray) { 15808 if (EffectiveType->isFunctionType()) 15809 return; 15810 if (index.isUnsigned() || !index.isNegative()) { 15811 const auto &ASTC = getASTContext(); 15812 unsigned AddrBits = 15813 ASTC.getTargetInfo().getPointerWidth(ASTC.getTargetAddressSpace( 15814 EffectiveType->getCanonicalTypeInternal())); 15815 if (index.getBitWidth() < AddrBits) 15816 index = index.zext(AddrBits); 15817 Optional<CharUnits> ElemCharUnits = 15818 ASTC.getTypeSizeInCharsIfKnown(EffectiveType); 15819 // PR50741 - If EffectiveType has unknown size (e.g., if it's a void 15820 // pointer) bounds-checking isn't meaningful. 15821 if (!ElemCharUnits) 15822 return; 15823 llvm::APInt ElemBytes(index.getBitWidth(), ElemCharUnits->getQuantity()); 15824 // If index has more active bits than address space, we already know 15825 // we have a bounds violation to warn about. Otherwise, compute 15826 // address of (index + 1)th element, and warn about bounds violation 15827 // only if that address exceeds address space. 15828 if (index.getActiveBits() <= AddrBits) { 15829 bool Overflow; 15830 llvm::APInt Product(index); 15831 Product += 1; 15832 Product = Product.umul_ov(ElemBytes, Overflow); 15833 if (!Overflow && Product.getActiveBits() <= AddrBits) 15834 return; 15835 } 15836 15837 // Need to compute max possible elements in address space, since that 15838 // is included in diag message. 15839 llvm::APInt MaxElems = llvm::APInt::getMaxValue(AddrBits); 15840 MaxElems = MaxElems.zext(std::max(AddrBits + 1, ElemBytes.getBitWidth())); 15841 MaxElems += 1; 15842 ElemBytes = ElemBytes.zextOrTrunc(MaxElems.getBitWidth()); 15843 MaxElems = MaxElems.udiv(ElemBytes); 15844 15845 unsigned DiagID = 15846 ASE ? diag::warn_array_index_exceeds_max_addressable_bounds 15847 : diag::warn_ptr_arith_exceeds_max_addressable_bounds; 15848 15849 // Diag message shows element size in bits and in "bytes" (platform- 15850 // dependent CharUnits) 15851 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15852 PDiag(DiagID) 15853 << toString(index, 10, true) << AddrBits 15854 << (unsigned)ASTC.toBits(*ElemCharUnits) 15855 << toString(ElemBytes, 10, false) 15856 << toString(MaxElems, 10, false) 15857 << (unsigned)MaxElems.getLimitedValue(~0U) 15858 << IndexExpr->getSourceRange()); 15859 15860 if (!ND) { 15861 // Try harder to find a NamedDecl to point at in the note. 15862 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 15863 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 15864 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15865 ND = DRE->getDecl(); 15866 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 15867 ND = ME->getMemberDecl(); 15868 } 15869 15870 if (ND) 15871 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 15872 PDiag(diag::note_array_declared_here) << ND); 15873 } 15874 return; 15875 } 15876 15877 if (index.isUnsigned() || !index.isNegative()) { 15878 // It is possible that the type of the base expression after 15879 // IgnoreParenCasts is incomplete, even though the type of the base 15880 // expression before IgnoreParenCasts is complete (see PR39746 for an 15881 // example). In this case we have no information about whether the array 15882 // access exceeds the array bounds. However we can still diagnose an array 15883 // access which precedes the array bounds. 15884 if (BaseType->isIncompleteType()) 15885 return; 15886 15887 llvm::APInt size = ArrayTy->getSize(); 15888 if (!size.isStrictlyPositive()) 15889 return; 15890 15891 if (BaseType != EffectiveType) { 15892 // Make sure we're comparing apples to apples when comparing index to size 15893 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 15894 uint64_t array_typesize = Context.getTypeSize(BaseType); 15895 // Handle ptrarith_typesize being zero, such as when casting to void* 15896 if (!ptrarith_typesize) ptrarith_typesize = 1; 15897 if (ptrarith_typesize != array_typesize) { 15898 // There's a cast to a different size type involved 15899 uint64_t ratio = array_typesize / ptrarith_typesize; 15900 // TODO: Be smarter about handling cases where array_typesize is not a 15901 // multiple of ptrarith_typesize 15902 if (ptrarith_typesize * ratio == array_typesize) 15903 size *= llvm::APInt(size.getBitWidth(), ratio); 15904 } 15905 } 15906 15907 if (size.getBitWidth() > index.getBitWidth()) 15908 index = index.zext(size.getBitWidth()); 15909 else if (size.getBitWidth() < index.getBitWidth()) 15910 size = size.zext(index.getBitWidth()); 15911 15912 // For array subscripting the index must be less than size, but for pointer 15913 // arithmetic also allow the index (offset) to be equal to size since 15914 // computing the next address after the end of the array is legal and 15915 // commonly done e.g. in C++ iterators and range-based for loops. 15916 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 15917 return; 15918 15919 // Also don't warn for arrays of size 1 which are members of some 15920 // structure. These are often used to approximate flexible arrays in C89 15921 // code. 15922 if (IsTailPaddedMemberArray(*this, size, ND)) 15923 return; 15924 15925 // Suppress the warning if the subscript expression (as identified by the 15926 // ']' location) and the index expression are both from macro expansions 15927 // within a system header. 15928 if (ASE) { 15929 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 15930 ASE->getRBracketLoc()); 15931 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 15932 SourceLocation IndexLoc = 15933 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 15934 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 15935 return; 15936 } 15937 } 15938 15939 unsigned DiagID = ASE ? diag::warn_array_index_exceeds_bounds 15940 : diag::warn_ptr_arith_exceeds_bounds; 15941 15942 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15943 PDiag(DiagID) << toString(index, 10, true) 15944 << toString(size, 10, true) 15945 << (unsigned)size.getLimitedValue(~0U) 15946 << IndexExpr->getSourceRange()); 15947 } else { 15948 unsigned DiagID = diag::warn_array_index_precedes_bounds; 15949 if (!ASE) { 15950 DiagID = diag::warn_ptr_arith_precedes_bounds; 15951 if (index.isNegative()) index = -index; 15952 } 15953 15954 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15955 PDiag(DiagID) << toString(index, 10, true) 15956 << IndexExpr->getSourceRange()); 15957 } 15958 15959 if (!ND) { 15960 // Try harder to find a NamedDecl to point at in the note. 15961 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 15962 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 15963 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15964 ND = DRE->getDecl(); 15965 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 15966 ND = ME->getMemberDecl(); 15967 } 15968 15969 if (ND) 15970 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 15971 PDiag(diag::note_array_declared_here) << ND); 15972 } 15973 15974 void Sema::CheckArrayAccess(const Expr *expr) { 15975 int AllowOnePastEnd = 0; 15976 while (expr) { 15977 expr = expr->IgnoreParenImpCasts(); 15978 switch (expr->getStmtClass()) { 15979 case Stmt::ArraySubscriptExprClass: { 15980 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 15981 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 15982 AllowOnePastEnd > 0); 15983 expr = ASE->getBase(); 15984 break; 15985 } 15986 case Stmt::MemberExprClass: { 15987 expr = cast<MemberExpr>(expr)->getBase(); 15988 break; 15989 } 15990 case Stmt::OMPArraySectionExprClass: { 15991 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 15992 if (ASE->getLowerBound()) 15993 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 15994 /*ASE=*/nullptr, AllowOnePastEnd > 0); 15995 return; 15996 } 15997 case Stmt::UnaryOperatorClass: { 15998 // Only unwrap the * and & unary operators 15999 const UnaryOperator *UO = cast<UnaryOperator>(expr); 16000 expr = UO->getSubExpr(); 16001 switch (UO->getOpcode()) { 16002 case UO_AddrOf: 16003 AllowOnePastEnd++; 16004 break; 16005 case UO_Deref: 16006 AllowOnePastEnd--; 16007 break; 16008 default: 16009 return; 16010 } 16011 break; 16012 } 16013 case Stmt::ConditionalOperatorClass: { 16014 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 16015 if (const Expr *lhs = cond->getLHS()) 16016 CheckArrayAccess(lhs); 16017 if (const Expr *rhs = cond->getRHS()) 16018 CheckArrayAccess(rhs); 16019 return; 16020 } 16021 case Stmt::CXXOperatorCallExprClass: { 16022 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 16023 for (const auto *Arg : OCE->arguments()) 16024 CheckArrayAccess(Arg); 16025 return; 16026 } 16027 default: 16028 return; 16029 } 16030 } 16031 } 16032 16033 //===--- CHECK: Objective-C retain cycles ----------------------------------// 16034 16035 namespace { 16036 16037 struct RetainCycleOwner { 16038 VarDecl *Variable = nullptr; 16039 SourceRange Range; 16040 SourceLocation Loc; 16041 bool Indirect = false; 16042 16043 RetainCycleOwner() = default; 16044 16045 void setLocsFrom(Expr *e) { 16046 Loc = e->getExprLoc(); 16047 Range = e->getSourceRange(); 16048 } 16049 }; 16050 16051 } // namespace 16052 16053 /// Consider whether capturing the given variable can possibly lead to 16054 /// a retain cycle. 16055 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 16056 // In ARC, it's captured strongly iff the variable has __strong 16057 // lifetime. In MRR, it's captured strongly if the variable is 16058 // __block and has an appropriate type. 16059 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 16060 return false; 16061 16062 owner.Variable = var; 16063 if (ref) 16064 owner.setLocsFrom(ref); 16065 return true; 16066 } 16067 16068 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 16069 while (true) { 16070 e = e->IgnoreParens(); 16071 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 16072 switch (cast->getCastKind()) { 16073 case CK_BitCast: 16074 case CK_LValueBitCast: 16075 case CK_LValueToRValue: 16076 case CK_ARCReclaimReturnedObject: 16077 e = cast->getSubExpr(); 16078 continue; 16079 16080 default: 16081 return false; 16082 } 16083 } 16084 16085 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 16086 ObjCIvarDecl *ivar = ref->getDecl(); 16087 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 16088 return false; 16089 16090 // Try to find a retain cycle in the base. 16091 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 16092 return false; 16093 16094 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 16095 owner.Indirect = true; 16096 return true; 16097 } 16098 16099 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 16100 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 16101 if (!var) return false; 16102 return considerVariable(var, ref, owner); 16103 } 16104 16105 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 16106 if (member->isArrow()) return false; 16107 16108 // Don't count this as an indirect ownership. 16109 e = member->getBase(); 16110 continue; 16111 } 16112 16113 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 16114 // Only pay attention to pseudo-objects on property references. 16115 ObjCPropertyRefExpr *pre 16116 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 16117 ->IgnoreParens()); 16118 if (!pre) return false; 16119 if (pre->isImplicitProperty()) return false; 16120 ObjCPropertyDecl *property = pre->getExplicitProperty(); 16121 if (!property->isRetaining() && 16122 !(property->getPropertyIvarDecl() && 16123 property->getPropertyIvarDecl()->getType() 16124 .getObjCLifetime() == Qualifiers::OCL_Strong)) 16125 return false; 16126 16127 owner.Indirect = true; 16128 if (pre->isSuperReceiver()) { 16129 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 16130 if (!owner.Variable) 16131 return false; 16132 owner.Loc = pre->getLocation(); 16133 owner.Range = pre->getSourceRange(); 16134 return true; 16135 } 16136 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 16137 ->getSourceExpr()); 16138 continue; 16139 } 16140 16141 // Array ivars? 16142 16143 return false; 16144 } 16145 } 16146 16147 namespace { 16148 16149 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 16150 ASTContext &Context; 16151 VarDecl *Variable; 16152 Expr *Capturer = nullptr; 16153 bool VarWillBeReased = false; 16154 16155 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 16156 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 16157 Context(Context), Variable(variable) {} 16158 16159 void VisitDeclRefExpr(DeclRefExpr *ref) { 16160 if (ref->getDecl() == Variable && !Capturer) 16161 Capturer = ref; 16162 } 16163 16164 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 16165 if (Capturer) return; 16166 Visit(ref->getBase()); 16167 if (Capturer && ref->isFreeIvar()) 16168 Capturer = ref; 16169 } 16170 16171 void VisitBlockExpr(BlockExpr *block) { 16172 // Look inside nested blocks 16173 if (block->getBlockDecl()->capturesVariable(Variable)) 16174 Visit(block->getBlockDecl()->getBody()); 16175 } 16176 16177 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 16178 if (Capturer) return; 16179 if (OVE->getSourceExpr()) 16180 Visit(OVE->getSourceExpr()); 16181 } 16182 16183 void VisitBinaryOperator(BinaryOperator *BinOp) { 16184 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 16185 return; 16186 Expr *LHS = BinOp->getLHS(); 16187 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 16188 if (DRE->getDecl() != Variable) 16189 return; 16190 if (Expr *RHS = BinOp->getRHS()) { 16191 RHS = RHS->IgnoreParenCasts(); 16192 Optional<llvm::APSInt> Value; 16193 VarWillBeReased = 16194 (RHS && (Value = RHS->getIntegerConstantExpr(Context)) && 16195 *Value == 0); 16196 } 16197 } 16198 } 16199 }; 16200 16201 } // namespace 16202 16203 /// Check whether the given argument is a block which captures a 16204 /// variable. 16205 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 16206 assert(owner.Variable && owner.Loc.isValid()); 16207 16208 e = e->IgnoreParenCasts(); 16209 16210 // Look through [^{...} copy] and Block_copy(^{...}). 16211 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 16212 Selector Cmd = ME->getSelector(); 16213 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 16214 e = ME->getInstanceReceiver(); 16215 if (!e) 16216 return nullptr; 16217 e = e->IgnoreParenCasts(); 16218 } 16219 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 16220 if (CE->getNumArgs() == 1) { 16221 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 16222 if (Fn) { 16223 const IdentifierInfo *FnI = Fn->getIdentifier(); 16224 if (FnI && FnI->isStr("_Block_copy")) { 16225 e = CE->getArg(0)->IgnoreParenCasts(); 16226 } 16227 } 16228 } 16229 } 16230 16231 BlockExpr *block = dyn_cast<BlockExpr>(e); 16232 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 16233 return nullptr; 16234 16235 FindCaptureVisitor visitor(S.Context, owner.Variable); 16236 visitor.Visit(block->getBlockDecl()->getBody()); 16237 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 16238 } 16239 16240 static void diagnoseRetainCycle(Sema &S, Expr *capturer, 16241 RetainCycleOwner &owner) { 16242 assert(capturer); 16243 assert(owner.Variable && owner.Loc.isValid()); 16244 16245 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 16246 << owner.Variable << capturer->getSourceRange(); 16247 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 16248 << owner.Indirect << owner.Range; 16249 } 16250 16251 /// Check for a keyword selector that starts with the word 'add' or 16252 /// 'set'. 16253 static bool isSetterLikeSelector(Selector sel) { 16254 if (sel.isUnarySelector()) return false; 16255 16256 StringRef str = sel.getNameForSlot(0); 16257 while (!str.empty() && str.front() == '_') str = str.substr(1); 16258 if (str.startswith("set")) 16259 str = str.substr(3); 16260 else if (str.startswith("add")) { 16261 // Specially allow 'addOperationWithBlock:'. 16262 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) 16263 return false; 16264 str = str.substr(3); 16265 } 16266 else 16267 return false; 16268 16269 if (str.empty()) return true; 16270 return !isLowercase(str.front()); 16271 } 16272 16273 static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S, 16274 ObjCMessageExpr *Message) { 16275 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 16276 Message->getReceiverInterface(), 16277 NSAPI::ClassId_NSMutableArray); 16278 if (!IsMutableArray) { 16279 return None; 16280 } 16281 16282 Selector Sel = Message->getSelector(); 16283 16284 Optional<NSAPI::NSArrayMethodKind> MKOpt = 16285 S.NSAPIObj->getNSArrayMethodKind(Sel); 16286 if (!MKOpt) { 16287 return None; 16288 } 16289 16290 NSAPI::NSArrayMethodKind MK = *MKOpt; 16291 16292 switch (MK) { 16293 case NSAPI::NSMutableArr_addObject: 16294 case NSAPI::NSMutableArr_insertObjectAtIndex: 16295 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 16296 return 0; 16297 case NSAPI::NSMutableArr_replaceObjectAtIndex: 16298 return 1; 16299 16300 default: 16301 return None; 16302 } 16303 16304 return None; 16305 } 16306 16307 static 16308 Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S, 16309 ObjCMessageExpr *Message) { 16310 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 16311 Message->getReceiverInterface(), 16312 NSAPI::ClassId_NSMutableDictionary); 16313 if (!IsMutableDictionary) { 16314 return None; 16315 } 16316 16317 Selector Sel = Message->getSelector(); 16318 16319 Optional<NSAPI::NSDictionaryMethodKind> MKOpt = 16320 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 16321 if (!MKOpt) { 16322 return None; 16323 } 16324 16325 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 16326 16327 switch (MK) { 16328 case NSAPI::NSMutableDict_setObjectForKey: 16329 case NSAPI::NSMutableDict_setValueForKey: 16330 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 16331 return 0; 16332 16333 default: 16334 return None; 16335 } 16336 16337 return None; 16338 } 16339 16340 static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 16341 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 16342 Message->getReceiverInterface(), 16343 NSAPI::ClassId_NSMutableSet); 16344 16345 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 16346 Message->getReceiverInterface(), 16347 NSAPI::ClassId_NSMutableOrderedSet); 16348 if (!IsMutableSet && !IsMutableOrderedSet) { 16349 return None; 16350 } 16351 16352 Selector Sel = Message->getSelector(); 16353 16354 Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel); 16355 if (!MKOpt) { 16356 return None; 16357 } 16358 16359 NSAPI::NSSetMethodKind MK = *MKOpt; 16360 16361 switch (MK) { 16362 case NSAPI::NSMutableSet_addObject: 16363 case NSAPI::NSOrderedSet_setObjectAtIndex: 16364 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 16365 case NSAPI::NSOrderedSet_insertObjectAtIndex: 16366 return 0; 16367 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 16368 return 1; 16369 } 16370 16371 return None; 16372 } 16373 16374 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 16375 if (!Message->isInstanceMessage()) { 16376 return; 16377 } 16378 16379 Optional<int> ArgOpt; 16380 16381 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 16382 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 16383 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 16384 return; 16385 } 16386 16387 int ArgIndex = *ArgOpt; 16388 16389 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 16390 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 16391 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 16392 } 16393 16394 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 16395 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 16396 if (ArgRE->isObjCSelfExpr()) { 16397 Diag(Message->getSourceRange().getBegin(), 16398 diag::warn_objc_circular_container) 16399 << ArgRE->getDecl() << StringRef("'super'"); 16400 } 16401 } 16402 } else { 16403 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 16404 16405 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 16406 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 16407 } 16408 16409 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 16410 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 16411 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 16412 ValueDecl *Decl = ReceiverRE->getDecl(); 16413 Diag(Message->getSourceRange().getBegin(), 16414 diag::warn_objc_circular_container) 16415 << Decl << Decl; 16416 if (!ArgRE->isObjCSelfExpr()) { 16417 Diag(Decl->getLocation(), 16418 diag::note_objc_circular_container_declared_here) 16419 << Decl; 16420 } 16421 } 16422 } 16423 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 16424 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 16425 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 16426 ObjCIvarDecl *Decl = IvarRE->getDecl(); 16427 Diag(Message->getSourceRange().getBegin(), 16428 diag::warn_objc_circular_container) 16429 << Decl << Decl; 16430 Diag(Decl->getLocation(), 16431 diag::note_objc_circular_container_declared_here) 16432 << Decl; 16433 } 16434 } 16435 } 16436 } 16437 } 16438 16439 /// Check a message send to see if it's likely to cause a retain cycle. 16440 void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 16441 // Only check instance methods whose selector looks like a setter. 16442 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 16443 return; 16444 16445 // Try to find a variable that the receiver is strongly owned by. 16446 RetainCycleOwner owner; 16447 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 16448 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 16449 return; 16450 } else { 16451 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 16452 owner.Variable = getCurMethodDecl()->getSelfDecl(); 16453 owner.Loc = msg->getSuperLoc(); 16454 owner.Range = msg->getSuperLoc(); 16455 } 16456 16457 // Check whether the receiver is captured by any of the arguments. 16458 const ObjCMethodDecl *MD = msg->getMethodDecl(); 16459 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 16460 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 16461 // noescape blocks should not be retained by the method. 16462 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 16463 continue; 16464 return diagnoseRetainCycle(*this, capturer, owner); 16465 } 16466 } 16467 } 16468 16469 /// Check a property assign to see if it's likely to cause a retain cycle. 16470 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 16471 RetainCycleOwner owner; 16472 if (!findRetainCycleOwner(*this, receiver, owner)) 16473 return; 16474 16475 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 16476 diagnoseRetainCycle(*this, capturer, owner); 16477 } 16478 16479 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 16480 RetainCycleOwner Owner; 16481 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 16482 return; 16483 16484 // Because we don't have an expression for the variable, we have to set the 16485 // location explicitly here. 16486 Owner.Loc = Var->getLocation(); 16487 Owner.Range = Var->getSourceRange(); 16488 16489 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 16490 diagnoseRetainCycle(*this, Capturer, Owner); 16491 } 16492 16493 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 16494 Expr *RHS, bool isProperty) { 16495 // Check if RHS is an Objective-C object literal, which also can get 16496 // immediately zapped in a weak reference. Note that we explicitly 16497 // allow ObjCStringLiterals, since those are designed to never really die. 16498 RHS = RHS->IgnoreParenImpCasts(); 16499 16500 // This enum needs to match with the 'select' in 16501 // warn_objc_arc_literal_assign (off-by-1). 16502 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 16503 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 16504 return false; 16505 16506 S.Diag(Loc, diag::warn_arc_literal_assign) 16507 << (unsigned) Kind 16508 << (isProperty ? 0 : 1) 16509 << RHS->getSourceRange(); 16510 16511 return true; 16512 } 16513 16514 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 16515 Qualifiers::ObjCLifetime LT, 16516 Expr *RHS, bool isProperty) { 16517 // Strip off any implicit cast added to get to the one ARC-specific. 16518 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 16519 if (cast->getCastKind() == CK_ARCConsumeObject) { 16520 S.Diag(Loc, diag::warn_arc_retained_assign) 16521 << (LT == Qualifiers::OCL_ExplicitNone) 16522 << (isProperty ? 0 : 1) 16523 << RHS->getSourceRange(); 16524 return true; 16525 } 16526 RHS = cast->getSubExpr(); 16527 } 16528 16529 if (LT == Qualifiers::OCL_Weak && 16530 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 16531 return true; 16532 16533 return false; 16534 } 16535 16536 bool Sema::checkUnsafeAssigns(SourceLocation Loc, 16537 QualType LHS, Expr *RHS) { 16538 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 16539 16540 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 16541 return false; 16542 16543 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 16544 return true; 16545 16546 return false; 16547 } 16548 16549 void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 16550 Expr *LHS, Expr *RHS) { 16551 QualType LHSType; 16552 // PropertyRef on LHS type need be directly obtained from 16553 // its declaration as it has a PseudoType. 16554 ObjCPropertyRefExpr *PRE 16555 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 16556 if (PRE && !PRE->isImplicitProperty()) { 16557 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 16558 if (PD) 16559 LHSType = PD->getType(); 16560 } 16561 16562 if (LHSType.isNull()) 16563 LHSType = LHS->getType(); 16564 16565 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 16566 16567 if (LT == Qualifiers::OCL_Weak) { 16568 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 16569 getCurFunction()->markSafeWeakUse(LHS); 16570 } 16571 16572 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 16573 return; 16574 16575 // FIXME. Check for other life times. 16576 if (LT != Qualifiers::OCL_None) 16577 return; 16578 16579 if (PRE) { 16580 if (PRE->isImplicitProperty()) 16581 return; 16582 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 16583 if (!PD) 16584 return; 16585 16586 unsigned Attributes = PD->getPropertyAttributes(); 16587 if (Attributes & ObjCPropertyAttribute::kind_assign) { 16588 // when 'assign' attribute was not explicitly specified 16589 // by user, ignore it and rely on property type itself 16590 // for lifetime info. 16591 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 16592 if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) && 16593 LHSType->isObjCRetainableType()) 16594 return; 16595 16596 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 16597 if (cast->getCastKind() == CK_ARCConsumeObject) { 16598 Diag(Loc, diag::warn_arc_retained_property_assign) 16599 << RHS->getSourceRange(); 16600 return; 16601 } 16602 RHS = cast->getSubExpr(); 16603 } 16604 } else if (Attributes & ObjCPropertyAttribute::kind_weak) { 16605 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 16606 return; 16607 } 16608 } 16609 } 16610 16611 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 16612 16613 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 16614 SourceLocation StmtLoc, 16615 const NullStmt *Body) { 16616 // Do not warn if the body is a macro that expands to nothing, e.g: 16617 // 16618 // #define CALL(x) 16619 // if (condition) 16620 // CALL(0); 16621 if (Body->hasLeadingEmptyMacro()) 16622 return false; 16623 16624 // Get line numbers of statement and body. 16625 bool StmtLineInvalid; 16626 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 16627 &StmtLineInvalid); 16628 if (StmtLineInvalid) 16629 return false; 16630 16631 bool BodyLineInvalid; 16632 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 16633 &BodyLineInvalid); 16634 if (BodyLineInvalid) 16635 return false; 16636 16637 // Warn if null statement and body are on the same line. 16638 if (StmtLine != BodyLine) 16639 return false; 16640 16641 return true; 16642 } 16643 16644 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 16645 const Stmt *Body, 16646 unsigned DiagID) { 16647 // Since this is a syntactic check, don't emit diagnostic for template 16648 // instantiations, this just adds noise. 16649 if (CurrentInstantiationScope) 16650 return; 16651 16652 // The body should be a null statement. 16653 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 16654 if (!NBody) 16655 return; 16656 16657 // Do the usual checks. 16658 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 16659 return; 16660 16661 Diag(NBody->getSemiLoc(), DiagID); 16662 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 16663 } 16664 16665 void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 16666 const Stmt *PossibleBody) { 16667 assert(!CurrentInstantiationScope); // Ensured by caller 16668 16669 SourceLocation StmtLoc; 16670 const Stmt *Body; 16671 unsigned DiagID; 16672 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 16673 StmtLoc = FS->getRParenLoc(); 16674 Body = FS->getBody(); 16675 DiagID = diag::warn_empty_for_body; 16676 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 16677 StmtLoc = WS->getCond()->getSourceRange().getEnd(); 16678 Body = WS->getBody(); 16679 DiagID = diag::warn_empty_while_body; 16680 } else 16681 return; // Neither `for' nor `while'. 16682 16683 // The body should be a null statement. 16684 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 16685 if (!NBody) 16686 return; 16687 16688 // Skip expensive checks if diagnostic is disabled. 16689 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 16690 return; 16691 16692 // Do the usual checks. 16693 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 16694 return; 16695 16696 // `for(...);' and `while(...);' are popular idioms, so in order to keep 16697 // noise level low, emit diagnostics only if for/while is followed by a 16698 // CompoundStmt, e.g.: 16699 // for (int i = 0; i < n; i++); 16700 // { 16701 // a(i); 16702 // } 16703 // or if for/while is followed by a statement with more indentation 16704 // than for/while itself: 16705 // for (int i = 0; i < n; i++); 16706 // a(i); 16707 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 16708 if (!ProbableTypo) { 16709 bool BodyColInvalid; 16710 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 16711 PossibleBody->getBeginLoc(), &BodyColInvalid); 16712 if (BodyColInvalid) 16713 return; 16714 16715 bool StmtColInvalid; 16716 unsigned StmtCol = 16717 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 16718 if (StmtColInvalid) 16719 return; 16720 16721 if (BodyCol > StmtCol) 16722 ProbableTypo = true; 16723 } 16724 16725 if (ProbableTypo) { 16726 Diag(NBody->getSemiLoc(), DiagID); 16727 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 16728 } 16729 } 16730 16731 //===--- CHECK: Warn on self move with std::move. -------------------------===// 16732 16733 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 16734 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 16735 SourceLocation OpLoc) { 16736 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 16737 return; 16738 16739 if (inTemplateInstantiation()) 16740 return; 16741 16742 // Strip parens and casts away. 16743 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 16744 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 16745 16746 // Check for a call expression 16747 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 16748 if (!CE || CE->getNumArgs() != 1) 16749 return; 16750 16751 // Check for a call to std::move 16752 if (!CE->isCallToStdMove()) 16753 return; 16754 16755 // Get argument from std::move 16756 RHSExpr = CE->getArg(0); 16757 16758 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 16759 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 16760 16761 // Two DeclRefExpr's, check that the decls are the same. 16762 if (LHSDeclRef && RHSDeclRef) { 16763 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 16764 return; 16765 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 16766 RHSDeclRef->getDecl()->getCanonicalDecl()) 16767 return; 16768 16769 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16770 << LHSExpr->getSourceRange() 16771 << RHSExpr->getSourceRange(); 16772 return; 16773 } 16774 16775 // Member variables require a different approach to check for self moves. 16776 // MemberExpr's are the same if every nested MemberExpr refers to the same 16777 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 16778 // the base Expr's are CXXThisExpr's. 16779 const Expr *LHSBase = LHSExpr; 16780 const Expr *RHSBase = RHSExpr; 16781 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 16782 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 16783 if (!LHSME || !RHSME) 16784 return; 16785 16786 while (LHSME && RHSME) { 16787 if (LHSME->getMemberDecl()->getCanonicalDecl() != 16788 RHSME->getMemberDecl()->getCanonicalDecl()) 16789 return; 16790 16791 LHSBase = LHSME->getBase(); 16792 RHSBase = RHSME->getBase(); 16793 LHSME = dyn_cast<MemberExpr>(LHSBase); 16794 RHSME = dyn_cast<MemberExpr>(RHSBase); 16795 } 16796 16797 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 16798 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 16799 if (LHSDeclRef && RHSDeclRef) { 16800 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 16801 return; 16802 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 16803 RHSDeclRef->getDecl()->getCanonicalDecl()) 16804 return; 16805 16806 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16807 << LHSExpr->getSourceRange() 16808 << RHSExpr->getSourceRange(); 16809 return; 16810 } 16811 16812 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 16813 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16814 << LHSExpr->getSourceRange() 16815 << RHSExpr->getSourceRange(); 16816 } 16817 16818 //===--- Layout compatibility ----------------------------------------------// 16819 16820 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 16821 16822 /// Check if two enumeration types are layout-compatible. 16823 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 16824 // C++11 [dcl.enum] p8: 16825 // Two enumeration types are layout-compatible if they have the same 16826 // underlying type. 16827 return ED1->isComplete() && ED2->isComplete() && 16828 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 16829 } 16830 16831 /// Check if two fields are layout-compatible. 16832 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 16833 FieldDecl *Field2) { 16834 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 16835 return false; 16836 16837 if (Field1->isBitField() != Field2->isBitField()) 16838 return false; 16839 16840 if (Field1->isBitField()) { 16841 // Make sure that the bit-fields are the same length. 16842 unsigned Bits1 = Field1->getBitWidthValue(C); 16843 unsigned Bits2 = Field2->getBitWidthValue(C); 16844 16845 if (Bits1 != Bits2) 16846 return false; 16847 } 16848 16849 return true; 16850 } 16851 16852 /// Check if two standard-layout structs are layout-compatible. 16853 /// (C++11 [class.mem] p17) 16854 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 16855 RecordDecl *RD2) { 16856 // If both records are C++ classes, check that base classes match. 16857 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 16858 // If one of records is a CXXRecordDecl we are in C++ mode, 16859 // thus the other one is a CXXRecordDecl, too. 16860 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 16861 // Check number of base classes. 16862 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 16863 return false; 16864 16865 // Check the base classes. 16866 for (CXXRecordDecl::base_class_const_iterator 16867 Base1 = D1CXX->bases_begin(), 16868 BaseEnd1 = D1CXX->bases_end(), 16869 Base2 = D2CXX->bases_begin(); 16870 Base1 != BaseEnd1; 16871 ++Base1, ++Base2) { 16872 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 16873 return false; 16874 } 16875 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 16876 // If only RD2 is a C++ class, it should have zero base classes. 16877 if (D2CXX->getNumBases() > 0) 16878 return false; 16879 } 16880 16881 // Check the fields. 16882 RecordDecl::field_iterator Field2 = RD2->field_begin(), 16883 Field2End = RD2->field_end(), 16884 Field1 = RD1->field_begin(), 16885 Field1End = RD1->field_end(); 16886 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 16887 if (!isLayoutCompatible(C, *Field1, *Field2)) 16888 return false; 16889 } 16890 if (Field1 != Field1End || Field2 != Field2End) 16891 return false; 16892 16893 return true; 16894 } 16895 16896 /// Check if two standard-layout unions are layout-compatible. 16897 /// (C++11 [class.mem] p18) 16898 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 16899 RecordDecl *RD2) { 16900 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 16901 for (auto *Field2 : RD2->fields()) 16902 UnmatchedFields.insert(Field2); 16903 16904 for (auto *Field1 : RD1->fields()) { 16905 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 16906 I = UnmatchedFields.begin(), 16907 E = UnmatchedFields.end(); 16908 16909 for ( ; I != E; ++I) { 16910 if (isLayoutCompatible(C, Field1, *I)) { 16911 bool Result = UnmatchedFields.erase(*I); 16912 (void) Result; 16913 assert(Result); 16914 break; 16915 } 16916 } 16917 if (I == E) 16918 return false; 16919 } 16920 16921 return UnmatchedFields.empty(); 16922 } 16923 16924 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 16925 RecordDecl *RD2) { 16926 if (RD1->isUnion() != RD2->isUnion()) 16927 return false; 16928 16929 if (RD1->isUnion()) 16930 return isLayoutCompatibleUnion(C, RD1, RD2); 16931 else 16932 return isLayoutCompatibleStruct(C, RD1, RD2); 16933 } 16934 16935 /// Check if two types are layout-compatible in C++11 sense. 16936 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 16937 if (T1.isNull() || T2.isNull()) 16938 return false; 16939 16940 // C++11 [basic.types] p11: 16941 // If two types T1 and T2 are the same type, then T1 and T2 are 16942 // layout-compatible types. 16943 if (C.hasSameType(T1, T2)) 16944 return true; 16945 16946 T1 = T1.getCanonicalType().getUnqualifiedType(); 16947 T2 = T2.getCanonicalType().getUnqualifiedType(); 16948 16949 const Type::TypeClass TC1 = T1->getTypeClass(); 16950 const Type::TypeClass TC2 = T2->getTypeClass(); 16951 16952 if (TC1 != TC2) 16953 return false; 16954 16955 if (TC1 == Type::Enum) { 16956 return isLayoutCompatible(C, 16957 cast<EnumType>(T1)->getDecl(), 16958 cast<EnumType>(T2)->getDecl()); 16959 } else if (TC1 == Type::Record) { 16960 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 16961 return false; 16962 16963 return isLayoutCompatible(C, 16964 cast<RecordType>(T1)->getDecl(), 16965 cast<RecordType>(T2)->getDecl()); 16966 } 16967 16968 return false; 16969 } 16970 16971 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 16972 16973 /// Given a type tag expression find the type tag itself. 16974 /// 16975 /// \param TypeExpr Type tag expression, as it appears in user's code. 16976 /// 16977 /// \param VD Declaration of an identifier that appears in a type tag. 16978 /// 16979 /// \param MagicValue Type tag magic value. 16980 /// 16981 /// \param isConstantEvaluated whether the evalaution should be performed in 16982 16983 /// constant context. 16984 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 16985 const ValueDecl **VD, uint64_t *MagicValue, 16986 bool isConstantEvaluated) { 16987 while(true) { 16988 if (!TypeExpr) 16989 return false; 16990 16991 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 16992 16993 switch (TypeExpr->getStmtClass()) { 16994 case Stmt::UnaryOperatorClass: { 16995 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 16996 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 16997 TypeExpr = UO->getSubExpr(); 16998 continue; 16999 } 17000 return false; 17001 } 17002 17003 case Stmt::DeclRefExprClass: { 17004 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 17005 *VD = DRE->getDecl(); 17006 return true; 17007 } 17008 17009 case Stmt::IntegerLiteralClass: { 17010 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 17011 llvm::APInt MagicValueAPInt = IL->getValue(); 17012 if (MagicValueAPInt.getActiveBits() <= 64) { 17013 *MagicValue = MagicValueAPInt.getZExtValue(); 17014 return true; 17015 } else 17016 return false; 17017 } 17018 17019 case Stmt::BinaryConditionalOperatorClass: 17020 case Stmt::ConditionalOperatorClass: { 17021 const AbstractConditionalOperator *ACO = 17022 cast<AbstractConditionalOperator>(TypeExpr); 17023 bool Result; 17024 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx, 17025 isConstantEvaluated)) { 17026 if (Result) 17027 TypeExpr = ACO->getTrueExpr(); 17028 else 17029 TypeExpr = ACO->getFalseExpr(); 17030 continue; 17031 } 17032 return false; 17033 } 17034 17035 case Stmt::BinaryOperatorClass: { 17036 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 17037 if (BO->getOpcode() == BO_Comma) { 17038 TypeExpr = BO->getRHS(); 17039 continue; 17040 } 17041 return false; 17042 } 17043 17044 default: 17045 return false; 17046 } 17047 } 17048 } 17049 17050 /// Retrieve the C type corresponding to type tag TypeExpr. 17051 /// 17052 /// \param TypeExpr Expression that specifies a type tag. 17053 /// 17054 /// \param MagicValues Registered magic values. 17055 /// 17056 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 17057 /// kind. 17058 /// 17059 /// \param TypeInfo Information about the corresponding C type. 17060 /// 17061 /// \param isConstantEvaluated whether the evalaution should be performed in 17062 /// constant context. 17063 /// 17064 /// \returns true if the corresponding C type was found. 17065 static bool GetMatchingCType( 17066 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr, 17067 const ASTContext &Ctx, 17068 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData> 17069 *MagicValues, 17070 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo, 17071 bool isConstantEvaluated) { 17072 FoundWrongKind = false; 17073 17074 // Variable declaration that has type_tag_for_datatype attribute. 17075 const ValueDecl *VD = nullptr; 17076 17077 uint64_t MagicValue; 17078 17079 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated)) 17080 return false; 17081 17082 if (VD) { 17083 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 17084 if (I->getArgumentKind() != ArgumentKind) { 17085 FoundWrongKind = true; 17086 return false; 17087 } 17088 TypeInfo.Type = I->getMatchingCType(); 17089 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 17090 TypeInfo.MustBeNull = I->getMustBeNull(); 17091 return true; 17092 } 17093 return false; 17094 } 17095 17096 if (!MagicValues) 17097 return false; 17098 17099 llvm::DenseMap<Sema::TypeTagMagicValue, 17100 Sema::TypeTagData>::const_iterator I = 17101 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 17102 if (I == MagicValues->end()) 17103 return false; 17104 17105 TypeInfo = I->second; 17106 return true; 17107 } 17108 17109 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 17110 uint64_t MagicValue, QualType Type, 17111 bool LayoutCompatible, 17112 bool MustBeNull) { 17113 if (!TypeTagForDatatypeMagicValues) 17114 TypeTagForDatatypeMagicValues.reset( 17115 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 17116 17117 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 17118 (*TypeTagForDatatypeMagicValues)[Magic] = 17119 TypeTagData(Type, LayoutCompatible, MustBeNull); 17120 } 17121 17122 static bool IsSameCharType(QualType T1, QualType T2) { 17123 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 17124 if (!BT1) 17125 return false; 17126 17127 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 17128 if (!BT2) 17129 return false; 17130 17131 BuiltinType::Kind T1Kind = BT1->getKind(); 17132 BuiltinType::Kind T2Kind = BT2->getKind(); 17133 17134 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 17135 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 17136 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 17137 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 17138 } 17139 17140 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 17141 const ArrayRef<const Expr *> ExprArgs, 17142 SourceLocation CallSiteLoc) { 17143 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 17144 bool IsPointerAttr = Attr->getIsPointer(); 17145 17146 // Retrieve the argument representing the 'type_tag'. 17147 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 17148 if (TypeTagIdxAST >= ExprArgs.size()) { 17149 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 17150 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 17151 return; 17152 } 17153 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 17154 bool FoundWrongKind; 17155 TypeTagData TypeInfo; 17156 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 17157 TypeTagForDatatypeMagicValues.get(), FoundWrongKind, 17158 TypeInfo, isConstantEvaluated())) { 17159 if (FoundWrongKind) 17160 Diag(TypeTagExpr->getExprLoc(), 17161 diag::warn_type_tag_for_datatype_wrong_kind) 17162 << TypeTagExpr->getSourceRange(); 17163 return; 17164 } 17165 17166 // Retrieve the argument representing the 'arg_idx'. 17167 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 17168 if (ArgumentIdxAST >= ExprArgs.size()) { 17169 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 17170 << 1 << Attr->getArgumentIdx().getSourceIndex(); 17171 return; 17172 } 17173 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 17174 if (IsPointerAttr) { 17175 // Skip implicit cast of pointer to `void *' (as a function argument). 17176 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 17177 if (ICE->getType()->isVoidPointerType() && 17178 ICE->getCastKind() == CK_BitCast) 17179 ArgumentExpr = ICE->getSubExpr(); 17180 } 17181 QualType ArgumentType = ArgumentExpr->getType(); 17182 17183 // Passing a `void*' pointer shouldn't trigger a warning. 17184 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 17185 return; 17186 17187 if (TypeInfo.MustBeNull) { 17188 // Type tag with matching void type requires a null pointer. 17189 if (!ArgumentExpr->isNullPointerConstant(Context, 17190 Expr::NPC_ValueDependentIsNotNull)) { 17191 Diag(ArgumentExpr->getExprLoc(), 17192 diag::warn_type_safety_null_pointer_required) 17193 << ArgumentKind->getName() 17194 << ArgumentExpr->getSourceRange() 17195 << TypeTagExpr->getSourceRange(); 17196 } 17197 return; 17198 } 17199 17200 QualType RequiredType = TypeInfo.Type; 17201 if (IsPointerAttr) 17202 RequiredType = Context.getPointerType(RequiredType); 17203 17204 bool mismatch = false; 17205 if (!TypeInfo.LayoutCompatible) { 17206 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 17207 17208 // C++11 [basic.fundamental] p1: 17209 // Plain char, signed char, and unsigned char are three distinct types. 17210 // 17211 // But we treat plain `char' as equivalent to `signed char' or `unsigned 17212 // char' depending on the current char signedness mode. 17213 if (mismatch) 17214 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 17215 RequiredType->getPointeeType())) || 17216 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 17217 mismatch = false; 17218 } else 17219 if (IsPointerAttr) 17220 mismatch = !isLayoutCompatible(Context, 17221 ArgumentType->getPointeeType(), 17222 RequiredType->getPointeeType()); 17223 else 17224 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 17225 17226 if (mismatch) 17227 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 17228 << ArgumentType << ArgumentKind 17229 << TypeInfo.LayoutCompatible << RequiredType 17230 << ArgumentExpr->getSourceRange() 17231 << TypeTagExpr->getSourceRange(); 17232 } 17233 17234 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 17235 CharUnits Alignment) { 17236 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 17237 } 17238 17239 void Sema::DiagnoseMisalignedMembers() { 17240 for (MisalignedMember &m : MisalignedMembers) { 17241 const NamedDecl *ND = m.RD; 17242 if (ND->getName().empty()) { 17243 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 17244 ND = TD; 17245 } 17246 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 17247 << m.MD << ND << m.E->getSourceRange(); 17248 } 17249 MisalignedMembers.clear(); 17250 } 17251 17252 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 17253 E = E->IgnoreParens(); 17254 if (!T->isPointerType() && !T->isIntegerType()) 17255 return; 17256 if (isa<UnaryOperator>(E) && 17257 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 17258 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 17259 if (isa<MemberExpr>(Op)) { 17260 auto MA = llvm::find(MisalignedMembers, MisalignedMember(Op)); 17261 if (MA != MisalignedMembers.end() && 17262 (T->isIntegerType() || 17263 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 17264 Context.getTypeAlignInChars( 17265 T->getPointeeType()) <= MA->Alignment)))) 17266 MisalignedMembers.erase(MA); 17267 } 17268 } 17269 } 17270 17271 void Sema::RefersToMemberWithReducedAlignment( 17272 Expr *E, 17273 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 17274 Action) { 17275 const auto *ME = dyn_cast<MemberExpr>(E); 17276 if (!ME) 17277 return; 17278 17279 // No need to check expressions with an __unaligned-qualified type. 17280 if (E->getType().getQualifiers().hasUnaligned()) 17281 return; 17282 17283 // For a chain of MemberExpr like "a.b.c.d" this list 17284 // will keep FieldDecl's like [d, c, b]. 17285 SmallVector<FieldDecl *, 4> ReverseMemberChain; 17286 const MemberExpr *TopME = nullptr; 17287 bool AnyIsPacked = false; 17288 do { 17289 QualType BaseType = ME->getBase()->getType(); 17290 if (BaseType->isDependentType()) 17291 return; 17292 if (ME->isArrow()) 17293 BaseType = BaseType->getPointeeType(); 17294 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl(); 17295 if (RD->isInvalidDecl()) 17296 return; 17297 17298 ValueDecl *MD = ME->getMemberDecl(); 17299 auto *FD = dyn_cast<FieldDecl>(MD); 17300 // We do not care about non-data members. 17301 if (!FD || FD->isInvalidDecl()) 17302 return; 17303 17304 AnyIsPacked = 17305 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 17306 ReverseMemberChain.push_back(FD); 17307 17308 TopME = ME; 17309 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 17310 } while (ME); 17311 assert(TopME && "We did not compute a topmost MemberExpr!"); 17312 17313 // Not the scope of this diagnostic. 17314 if (!AnyIsPacked) 17315 return; 17316 17317 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 17318 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 17319 // TODO: The innermost base of the member expression may be too complicated. 17320 // For now, just disregard these cases. This is left for future 17321 // improvement. 17322 if (!DRE && !isa<CXXThisExpr>(TopBase)) 17323 return; 17324 17325 // Alignment expected by the whole expression. 17326 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 17327 17328 // No need to do anything else with this case. 17329 if (ExpectedAlignment.isOne()) 17330 return; 17331 17332 // Synthesize offset of the whole access. 17333 CharUnits Offset; 17334 for (const FieldDecl *FD : llvm::reverse(ReverseMemberChain)) 17335 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(FD)); 17336 17337 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 17338 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 17339 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 17340 17341 // The base expression of the innermost MemberExpr may give 17342 // stronger guarantees than the class containing the member. 17343 if (DRE && !TopME->isArrow()) { 17344 const ValueDecl *VD = DRE->getDecl(); 17345 if (!VD->getType()->isReferenceType()) 17346 CompleteObjectAlignment = 17347 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 17348 } 17349 17350 // Check if the synthesized offset fulfills the alignment. 17351 if (Offset % ExpectedAlignment != 0 || 17352 // It may fulfill the offset it but the effective alignment may still be 17353 // lower than the expected expression alignment. 17354 CompleteObjectAlignment < ExpectedAlignment) { 17355 // If this happens, we want to determine a sensible culprit of this. 17356 // Intuitively, watching the chain of member expressions from right to 17357 // left, we start with the required alignment (as required by the field 17358 // type) but some packed attribute in that chain has reduced the alignment. 17359 // It may happen that another packed structure increases it again. But if 17360 // we are here such increase has not been enough. So pointing the first 17361 // FieldDecl that either is packed or else its RecordDecl is, 17362 // seems reasonable. 17363 FieldDecl *FD = nullptr; 17364 CharUnits Alignment; 17365 for (FieldDecl *FDI : ReverseMemberChain) { 17366 if (FDI->hasAttr<PackedAttr>() || 17367 FDI->getParent()->hasAttr<PackedAttr>()) { 17368 FD = FDI; 17369 Alignment = std::min( 17370 Context.getTypeAlignInChars(FD->getType()), 17371 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 17372 break; 17373 } 17374 } 17375 assert(FD && "We did not find a packed FieldDecl!"); 17376 Action(E, FD->getParent(), FD, Alignment); 17377 } 17378 } 17379 17380 void Sema::CheckAddressOfPackedMember(Expr *rhs) { 17381 using namespace std::placeholders; 17382 17383 RefersToMemberWithReducedAlignment( 17384 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 17385 _2, _3, _4)); 17386 } 17387 17388 // Check if \p Ty is a valid type for the elementwise math builtins. If it is 17389 // not a valid type, emit an error message and return true. Otherwise return 17390 // false. 17391 static bool checkMathBuiltinElementType(Sema &S, SourceLocation Loc, 17392 QualType Ty) { 17393 if (!Ty->getAs<VectorType>() && !ConstantMatrixType::isValidElementType(Ty)) { 17394 S.Diag(Loc, diag::err_builtin_invalid_arg_type) 17395 << 1 << /* vector, integer or float ty*/ 0 << Ty; 17396 return true; 17397 } 17398 return false; 17399 } 17400 17401 bool Sema::PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall) { 17402 if (checkArgCount(*this, TheCall, 1)) 17403 return true; 17404 17405 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 17406 if (A.isInvalid()) 17407 return true; 17408 17409 TheCall->setArg(0, A.get()); 17410 QualType TyA = A.get()->getType(); 17411 17412 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 17413 return true; 17414 17415 TheCall->setType(TyA); 17416 return false; 17417 } 17418 17419 bool Sema::SemaBuiltinElementwiseMath(CallExpr *TheCall) { 17420 if (checkArgCount(*this, TheCall, 2)) 17421 return true; 17422 17423 ExprResult A = TheCall->getArg(0); 17424 ExprResult B = TheCall->getArg(1); 17425 // Do standard promotions between the two arguments, returning their common 17426 // type. 17427 QualType Res = 17428 UsualArithmeticConversions(A, B, TheCall->getExprLoc(), ACK_Comparison); 17429 if (A.isInvalid() || B.isInvalid()) 17430 return true; 17431 17432 QualType TyA = A.get()->getType(); 17433 QualType TyB = B.get()->getType(); 17434 17435 if (Res.isNull() || TyA.getCanonicalType() != TyB.getCanonicalType()) 17436 return Diag(A.get()->getBeginLoc(), 17437 diag::err_typecheck_call_different_arg_types) 17438 << TyA << TyB; 17439 17440 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 17441 return true; 17442 17443 TheCall->setArg(0, A.get()); 17444 TheCall->setArg(1, B.get()); 17445 TheCall->setType(Res); 17446 return false; 17447 } 17448 17449 bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall) { 17450 if (checkArgCount(*this, TheCall, 1)) 17451 return true; 17452 17453 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 17454 if (A.isInvalid()) 17455 return true; 17456 17457 TheCall->setArg(0, A.get()); 17458 return false; 17459 } 17460 17461 ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall, 17462 ExprResult CallResult) { 17463 if (checkArgCount(*this, TheCall, 1)) 17464 return ExprError(); 17465 17466 ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0)); 17467 if (MatrixArg.isInvalid()) 17468 return MatrixArg; 17469 Expr *Matrix = MatrixArg.get(); 17470 17471 auto *MType = Matrix->getType()->getAs<ConstantMatrixType>(); 17472 if (!MType) { 17473 Diag(Matrix->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17474 << 1 << /* matrix ty*/ 1 << Matrix->getType(); 17475 return ExprError(); 17476 } 17477 17478 // Create returned matrix type by swapping rows and columns of the argument 17479 // matrix type. 17480 QualType ResultType = Context.getConstantMatrixType( 17481 MType->getElementType(), MType->getNumColumns(), MType->getNumRows()); 17482 17483 // Change the return type to the type of the returned matrix. 17484 TheCall->setType(ResultType); 17485 17486 // Update call argument to use the possibly converted matrix argument. 17487 TheCall->setArg(0, Matrix); 17488 return CallResult; 17489 } 17490 17491 // Get and verify the matrix dimensions. 17492 static llvm::Optional<unsigned> 17493 getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) { 17494 SourceLocation ErrorPos; 17495 Optional<llvm::APSInt> Value = 17496 Expr->getIntegerConstantExpr(S.Context, &ErrorPos); 17497 if (!Value) { 17498 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg) 17499 << Name; 17500 return {}; 17501 } 17502 uint64_t Dim = Value->getZExtValue(); 17503 if (!ConstantMatrixType::isDimensionValid(Dim)) { 17504 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension) 17505 << Name << ConstantMatrixType::getMaxElementsPerDimension(); 17506 return {}; 17507 } 17508 return Dim; 17509 } 17510 17511 ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, 17512 ExprResult CallResult) { 17513 if (!getLangOpts().MatrixTypes) { 17514 Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled); 17515 return ExprError(); 17516 } 17517 17518 if (checkArgCount(*this, TheCall, 4)) 17519 return ExprError(); 17520 17521 unsigned PtrArgIdx = 0; 17522 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 17523 Expr *RowsExpr = TheCall->getArg(1); 17524 Expr *ColumnsExpr = TheCall->getArg(2); 17525 Expr *StrideExpr = TheCall->getArg(3); 17526 17527 bool ArgError = false; 17528 17529 // Check pointer argument. 17530 { 17531 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 17532 if (PtrConv.isInvalid()) 17533 return PtrConv; 17534 PtrExpr = PtrConv.get(); 17535 TheCall->setArg(0, PtrExpr); 17536 if (PtrExpr->isTypeDependent()) { 17537 TheCall->setType(Context.DependentTy); 17538 return TheCall; 17539 } 17540 } 17541 17542 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 17543 QualType ElementTy; 17544 if (!PtrTy) { 17545 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17546 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 17547 ArgError = true; 17548 } else { 17549 ElementTy = PtrTy->getPointeeType().getUnqualifiedType(); 17550 17551 if (!ConstantMatrixType::isValidElementType(ElementTy)) { 17552 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17553 << PtrArgIdx + 1 << /* pointer to element ty*/ 2 17554 << PtrExpr->getType(); 17555 ArgError = true; 17556 } 17557 } 17558 17559 // Apply default Lvalue conversions and convert the expression to size_t. 17560 auto ApplyArgumentConversions = [this](Expr *E) { 17561 ExprResult Conv = DefaultLvalueConversion(E); 17562 if (Conv.isInvalid()) 17563 return Conv; 17564 17565 return tryConvertExprToType(Conv.get(), Context.getSizeType()); 17566 }; 17567 17568 // Apply conversion to row and column expressions. 17569 ExprResult RowsConv = ApplyArgumentConversions(RowsExpr); 17570 if (!RowsConv.isInvalid()) { 17571 RowsExpr = RowsConv.get(); 17572 TheCall->setArg(1, RowsExpr); 17573 } else 17574 RowsExpr = nullptr; 17575 17576 ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr); 17577 if (!ColumnsConv.isInvalid()) { 17578 ColumnsExpr = ColumnsConv.get(); 17579 TheCall->setArg(2, ColumnsExpr); 17580 } else 17581 ColumnsExpr = nullptr; 17582 17583 // If any any part of the result matrix type is still pending, just use 17584 // Context.DependentTy, until all parts are resolved. 17585 if ((RowsExpr && RowsExpr->isTypeDependent()) || 17586 (ColumnsExpr && ColumnsExpr->isTypeDependent())) { 17587 TheCall->setType(Context.DependentTy); 17588 return CallResult; 17589 } 17590 17591 // Check row and column dimensions. 17592 llvm::Optional<unsigned> MaybeRows; 17593 if (RowsExpr) 17594 MaybeRows = getAndVerifyMatrixDimension(RowsExpr, "row", *this); 17595 17596 llvm::Optional<unsigned> MaybeColumns; 17597 if (ColumnsExpr) 17598 MaybeColumns = getAndVerifyMatrixDimension(ColumnsExpr, "column", *this); 17599 17600 // Check stride argument. 17601 ExprResult StrideConv = ApplyArgumentConversions(StrideExpr); 17602 if (StrideConv.isInvalid()) 17603 return ExprError(); 17604 StrideExpr = StrideConv.get(); 17605 TheCall->setArg(3, StrideExpr); 17606 17607 if (MaybeRows) { 17608 if (Optional<llvm::APSInt> Value = 17609 StrideExpr->getIntegerConstantExpr(Context)) { 17610 uint64_t Stride = Value->getZExtValue(); 17611 if (Stride < *MaybeRows) { 17612 Diag(StrideExpr->getBeginLoc(), 17613 diag::err_builtin_matrix_stride_too_small); 17614 ArgError = true; 17615 } 17616 } 17617 } 17618 17619 if (ArgError || !MaybeRows || !MaybeColumns) 17620 return ExprError(); 17621 17622 TheCall->setType( 17623 Context.getConstantMatrixType(ElementTy, *MaybeRows, *MaybeColumns)); 17624 return CallResult; 17625 } 17626 17627 ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, 17628 ExprResult CallResult) { 17629 if (checkArgCount(*this, TheCall, 3)) 17630 return ExprError(); 17631 17632 unsigned PtrArgIdx = 1; 17633 Expr *MatrixExpr = TheCall->getArg(0); 17634 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 17635 Expr *StrideExpr = TheCall->getArg(2); 17636 17637 bool ArgError = false; 17638 17639 { 17640 ExprResult MatrixConv = DefaultLvalueConversion(MatrixExpr); 17641 if (MatrixConv.isInvalid()) 17642 return MatrixConv; 17643 MatrixExpr = MatrixConv.get(); 17644 TheCall->setArg(0, MatrixExpr); 17645 } 17646 if (MatrixExpr->isTypeDependent()) { 17647 TheCall->setType(Context.DependentTy); 17648 return TheCall; 17649 } 17650 17651 auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>(); 17652 if (!MatrixTy) { 17653 Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17654 << 1 << /*matrix ty */ 1 << MatrixExpr->getType(); 17655 ArgError = true; 17656 } 17657 17658 { 17659 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 17660 if (PtrConv.isInvalid()) 17661 return PtrConv; 17662 PtrExpr = PtrConv.get(); 17663 TheCall->setArg(1, PtrExpr); 17664 if (PtrExpr->isTypeDependent()) { 17665 TheCall->setType(Context.DependentTy); 17666 return TheCall; 17667 } 17668 } 17669 17670 // Check pointer argument. 17671 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 17672 if (!PtrTy) { 17673 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17674 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 17675 ArgError = true; 17676 } else { 17677 QualType ElementTy = PtrTy->getPointeeType(); 17678 if (ElementTy.isConstQualified()) { 17679 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const); 17680 ArgError = true; 17681 } 17682 ElementTy = ElementTy.getUnqualifiedType().getCanonicalType(); 17683 if (MatrixTy && 17684 !Context.hasSameType(ElementTy, MatrixTy->getElementType())) { 17685 Diag(PtrExpr->getBeginLoc(), 17686 diag::err_builtin_matrix_pointer_arg_mismatch) 17687 << ElementTy << MatrixTy->getElementType(); 17688 ArgError = true; 17689 } 17690 } 17691 17692 // Apply default Lvalue conversions and convert the stride expression to 17693 // size_t. 17694 { 17695 ExprResult StrideConv = DefaultLvalueConversion(StrideExpr); 17696 if (StrideConv.isInvalid()) 17697 return StrideConv; 17698 17699 StrideConv = tryConvertExprToType(StrideConv.get(), Context.getSizeType()); 17700 if (StrideConv.isInvalid()) 17701 return StrideConv; 17702 StrideExpr = StrideConv.get(); 17703 TheCall->setArg(2, StrideExpr); 17704 } 17705 17706 // Check stride argument. 17707 if (MatrixTy) { 17708 if (Optional<llvm::APSInt> Value = 17709 StrideExpr->getIntegerConstantExpr(Context)) { 17710 uint64_t Stride = Value->getZExtValue(); 17711 if (Stride < MatrixTy->getNumRows()) { 17712 Diag(StrideExpr->getBeginLoc(), 17713 diag::err_builtin_matrix_stride_too_small); 17714 ArgError = true; 17715 } 17716 } 17717 } 17718 17719 if (ArgError) 17720 return ExprError(); 17721 17722 return CallResult; 17723 } 17724 17725 /// \brief Enforce the bounds of a TCB 17726 /// CheckTCBEnforcement - Enforces that every function in a named TCB only 17727 /// directly calls other functions in the same TCB as marked by the enforce_tcb 17728 /// and enforce_tcb_leaf attributes. 17729 void Sema::CheckTCBEnforcement(const SourceLocation CallExprLoc, 17730 const NamedDecl *Callee) { 17731 const NamedDecl *Caller = getCurFunctionOrMethodDecl(); 17732 17733 if (!Caller || !Caller->hasAttr<EnforceTCBAttr>()) 17734 return; 17735 17736 // Search through the enforce_tcb and enforce_tcb_leaf attributes to find 17737 // all TCBs the callee is a part of. 17738 llvm::StringSet<> CalleeTCBs; 17739 for (const auto *A : Callee->specific_attrs<EnforceTCBAttr>()) 17740 CalleeTCBs.insert(A->getTCBName()); 17741 for (const auto *A : Callee->specific_attrs<EnforceTCBLeafAttr>()) 17742 CalleeTCBs.insert(A->getTCBName()); 17743 17744 // Go through the TCBs the caller is a part of and emit warnings if Caller 17745 // is in a TCB that the Callee is not. 17746 for (const auto *A : Caller->specific_attrs<EnforceTCBAttr>()) { 17747 StringRef CallerTCB = A->getTCBName(); 17748 if (CalleeTCBs.count(CallerTCB) == 0) { 17749 this->Diag(CallExprLoc, diag::warn_tcb_enforcement_violation) 17750 << Callee << CallerTCB; 17751 } 17752 } 17753 } 17754