1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements extra semantic analysis beyond what is enforced 10 // by the C type system. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/APValue.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/AttrIterator.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclBase.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/DeclarationName.h" 24 #include "clang/AST/EvaluatedExprVisitor.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/ExprCXX.h" 27 #include "clang/AST/ExprObjC.h" 28 #include "clang/AST/ExprOpenMP.h" 29 #include "clang/AST/FormatString.h" 30 #include "clang/AST/NSAPI.h" 31 #include "clang/AST/NonTrivialTypeVisitor.h" 32 #include "clang/AST/OperationKinds.h" 33 #include "clang/AST/RecordLayout.h" 34 #include "clang/AST/Stmt.h" 35 #include "clang/AST/TemplateBase.h" 36 #include "clang/AST/Type.h" 37 #include "clang/AST/TypeLoc.h" 38 #include "clang/AST/UnresolvedSet.h" 39 #include "clang/Basic/AddressSpaces.h" 40 #include "clang/Basic/CharInfo.h" 41 #include "clang/Basic/Diagnostic.h" 42 #include "clang/Basic/IdentifierTable.h" 43 #include "clang/Basic/LLVM.h" 44 #include "clang/Basic/LangOptions.h" 45 #include "clang/Basic/OpenCLOptions.h" 46 #include "clang/Basic/OperatorKinds.h" 47 #include "clang/Basic/PartialDiagnostic.h" 48 #include "clang/Basic/SourceLocation.h" 49 #include "clang/Basic/SourceManager.h" 50 #include "clang/Basic/Specifiers.h" 51 #include "clang/Basic/SyncScope.h" 52 #include "clang/Basic/TargetBuiltins.h" 53 #include "clang/Basic/TargetCXXABI.h" 54 #include "clang/Basic/TargetInfo.h" 55 #include "clang/Basic/TypeTraits.h" 56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 57 #include "clang/Sema/Initialization.h" 58 #include "clang/Sema/Lookup.h" 59 #include "clang/Sema/Ownership.h" 60 #include "clang/Sema/Scope.h" 61 #include "clang/Sema/ScopeInfo.h" 62 #include "clang/Sema/Sema.h" 63 #include "clang/Sema/SemaInternal.h" 64 #include "llvm/ADT/APFloat.h" 65 #include "llvm/ADT/APInt.h" 66 #include "llvm/ADT/APSInt.h" 67 #include "llvm/ADT/ArrayRef.h" 68 #include "llvm/ADT/DenseMap.h" 69 #include "llvm/ADT/FoldingSet.h" 70 #include "llvm/ADT/None.h" 71 #include "llvm/ADT/Optional.h" 72 #include "llvm/ADT/STLExtras.h" 73 #include "llvm/ADT/SmallBitVector.h" 74 #include "llvm/ADT/SmallPtrSet.h" 75 #include "llvm/ADT/SmallString.h" 76 #include "llvm/ADT/SmallVector.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/ADT/StringSwitch.h" 79 #include "llvm/ADT/Triple.h" 80 #include "llvm/Support/AtomicOrdering.h" 81 #include "llvm/Support/Casting.h" 82 #include "llvm/Support/Compiler.h" 83 #include "llvm/Support/ConvertUTF.h" 84 #include "llvm/Support/ErrorHandling.h" 85 #include "llvm/Support/Format.h" 86 #include "llvm/Support/Locale.h" 87 #include "llvm/Support/MathExtras.h" 88 #include "llvm/Support/SaveAndRestore.h" 89 #include "llvm/Support/raw_ostream.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstddef> 93 #include <cstdint> 94 #include <functional> 95 #include <limits> 96 #include <string> 97 #include <tuple> 98 #include <utility> 99 100 using namespace clang; 101 using namespace sema; 102 103 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 104 unsigned ByteNo) const { 105 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 106 Context.getTargetInfo()); 107 } 108 109 /// Checks that a call expression's argument count is the desired number. 110 /// This is useful when doing custom type-checking. Returns true on error. 111 static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) { 112 unsigned argCount = call->getNumArgs(); 113 if (argCount == desiredArgCount) return false; 114 115 if (argCount < desiredArgCount) 116 return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args) 117 << 0 /*function call*/ << desiredArgCount << argCount 118 << call->getSourceRange(); 119 120 // Highlight all the excess arguments. 121 SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(), 122 call->getArg(argCount - 1)->getEndLoc()); 123 124 return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args) 125 << 0 /*function call*/ << desiredArgCount << argCount 126 << call->getArg(1)->getSourceRange(); 127 } 128 129 /// Check that the first argument to __builtin_annotation is an integer 130 /// and the second argument is a non-wide string literal. 131 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 132 if (checkArgCount(S, TheCall, 2)) 133 return true; 134 135 // First argument should be an integer. 136 Expr *ValArg = TheCall->getArg(0); 137 QualType Ty = ValArg->getType(); 138 if (!Ty->isIntegerType()) { 139 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 140 << ValArg->getSourceRange(); 141 return true; 142 } 143 144 // Second argument should be a constant string. 145 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 146 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 147 if (!Literal || !Literal->isAscii()) { 148 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 149 << StrArg->getSourceRange(); 150 return true; 151 } 152 153 TheCall->setType(Ty); 154 return false; 155 } 156 157 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 158 // We need at least one argument. 159 if (TheCall->getNumArgs() < 1) { 160 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 161 << 0 << 1 << TheCall->getNumArgs() 162 << TheCall->getCallee()->getSourceRange(); 163 return true; 164 } 165 166 // All arguments should be wide string literals. 167 for (Expr *Arg : TheCall->arguments()) { 168 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 169 if (!Literal || !Literal->isWide()) { 170 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 171 << Arg->getSourceRange(); 172 return true; 173 } 174 } 175 176 return false; 177 } 178 179 /// Check that the argument to __builtin_addressof is a glvalue, and set the 180 /// result type to the corresponding pointer type. 181 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 182 if (checkArgCount(S, TheCall, 1)) 183 return true; 184 185 ExprResult Arg(TheCall->getArg(0)); 186 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 187 if (ResultType.isNull()) 188 return true; 189 190 TheCall->setArg(0, Arg.get()); 191 TheCall->setType(ResultType); 192 return false; 193 } 194 195 /// Check the number of arguments and set the result type to 196 /// the argument type. 197 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) { 198 if (checkArgCount(S, TheCall, 1)) 199 return true; 200 201 TheCall->setType(TheCall->getArg(0)->getType()); 202 return false; 203 } 204 205 /// Check that the value argument for __builtin_is_aligned(value, alignment) and 206 /// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer 207 /// type (but not a function pointer) and that the alignment is a power-of-two. 208 static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) { 209 if (checkArgCount(S, TheCall, 2)) 210 return true; 211 212 clang::Expr *Source = TheCall->getArg(0); 213 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned; 214 215 auto IsValidIntegerType = [](QualType Ty) { 216 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType(); 217 }; 218 QualType SrcTy = Source->getType(); 219 // We should also be able to use it with arrays (but not functions!). 220 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) { 221 SrcTy = S.Context.getDecayedType(SrcTy); 222 } 223 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) || 224 SrcTy->isFunctionPointerType()) { 225 // FIXME: this is not quite the right error message since we don't allow 226 // floating point types, or member pointers. 227 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand) 228 << SrcTy; 229 return true; 230 } 231 232 clang::Expr *AlignOp = TheCall->getArg(1); 233 if (!IsValidIntegerType(AlignOp->getType())) { 234 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int) 235 << AlignOp->getType(); 236 return true; 237 } 238 Expr::EvalResult AlignResult; 239 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1; 240 // We can't check validity of alignment if it is type dependent. 241 if (!AlignOp->isInstantiationDependent() && 242 AlignOp->EvaluateAsInt(AlignResult, S.Context, 243 Expr::SE_AllowSideEffects)) { 244 llvm::APSInt AlignValue = AlignResult.Val.getInt(); 245 llvm::APSInt MaxValue( 246 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits)); 247 if (AlignValue < 1) { 248 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1; 249 return true; 250 } 251 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) { 252 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big) 253 << MaxValue.toString(10); 254 return true; 255 } 256 if (!AlignValue.isPowerOf2()) { 257 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two); 258 return true; 259 } 260 if (AlignValue == 1) { 261 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless) 262 << IsBooleanAlignBuiltin; 263 } 264 } 265 266 ExprResult SrcArg = S.PerformCopyInitialization( 267 InitializedEntity::InitializeParameter(S.Context, SrcTy, false), 268 SourceLocation(), Source); 269 if (SrcArg.isInvalid()) 270 return true; 271 TheCall->setArg(0, SrcArg.get()); 272 ExprResult AlignArg = 273 S.PerformCopyInitialization(InitializedEntity::InitializeParameter( 274 S.Context, AlignOp->getType(), false), 275 SourceLocation(), AlignOp); 276 if (AlignArg.isInvalid()) 277 return true; 278 TheCall->setArg(1, AlignArg.get()); 279 // For align_up/align_down, the return type is the same as the (potentially 280 // decayed) argument type including qualifiers. For is_aligned(), the result 281 // is always bool. 282 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy); 283 return false; 284 } 285 286 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall) { 287 if (checkArgCount(S, TheCall, 3)) 288 return true; 289 290 // First two arguments should be integers. 291 for (unsigned I = 0; I < 2; ++I) { 292 ExprResult Arg = TheCall->getArg(I); 293 QualType Ty = Arg.get()->getType(); 294 if (!Ty->isIntegerType()) { 295 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 296 << Ty << Arg.get()->getSourceRange(); 297 return true; 298 } 299 InitializedEntity Entity = InitializedEntity::InitializeParameter( 300 S.getASTContext(), Ty, /*consume*/ false); 301 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 302 if (Arg.isInvalid()) 303 return true; 304 TheCall->setArg(I, Arg.get()); 305 } 306 307 // Third argument should be a pointer to a non-const integer. 308 // IRGen correctly handles volatile, restrict, and address spaces, and 309 // the other qualifiers aren't possible. 310 { 311 ExprResult Arg = TheCall->getArg(2); 312 QualType Ty = Arg.get()->getType(); 313 const auto *PtrTy = Ty->getAs<PointerType>(); 314 if (!(PtrTy && PtrTy->getPointeeType()->isIntegerType() && 315 !PtrTy->getPointeeType().isConstQualified())) { 316 S.Diag(Arg.get()->getBeginLoc(), 317 diag::err_overflow_builtin_must_be_ptr_int) 318 << Ty << Arg.get()->getSourceRange(); 319 return true; 320 } 321 InitializedEntity Entity = InitializedEntity::InitializeParameter( 322 S.getASTContext(), Ty, /*consume*/ false); 323 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 324 if (Arg.isInvalid()) 325 return true; 326 TheCall->setArg(2, Arg.get()); 327 } 328 return false; 329 } 330 331 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 332 if (checkArgCount(S, BuiltinCall, 2)) 333 return true; 334 335 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 336 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 337 Expr *Call = BuiltinCall->getArg(0); 338 Expr *Chain = BuiltinCall->getArg(1); 339 340 if (Call->getStmtClass() != Stmt::CallExprClass) { 341 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 342 << Call->getSourceRange(); 343 return true; 344 } 345 346 auto CE = cast<CallExpr>(Call); 347 if (CE->getCallee()->getType()->isBlockPointerType()) { 348 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 349 << Call->getSourceRange(); 350 return true; 351 } 352 353 const Decl *TargetDecl = CE->getCalleeDecl(); 354 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 355 if (FD->getBuiltinID()) { 356 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 357 << Call->getSourceRange(); 358 return true; 359 } 360 361 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 362 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 363 << Call->getSourceRange(); 364 return true; 365 } 366 367 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 368 if (ChainResult.isInvalid()) 369 return true; 370 if (!ChainResult.get()->getType()->isPointerType()) { 371 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 372 << Chain->getSourceRange(); 373 return true; 374 } 375 376 QualType ReturnTy = CE->getCallReturnType(S.Context); 377 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 378 QualType BuiltinTy = S.Context.getFunctionType( 379 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 380 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 381 382 Builtin = 383 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 384 385 BuiltinCall->setType(CE->getType()); 386 BuiltinCall->setValueKind(CE->getValueKind()); 387 BuiltinCall->setObjectKind(CE->getObjectKind()); 388 BuiltinCall->setCallee(Builtin); 389 BuiltinCall->setArg(1, ChainResult.get()); 390 391 return false; 392 } 393 394 namespace { 395 396 class EstimateSizeFormatHandler 397 : public analyze_format_string::FormatStringHandler { 398 size_t Size; 399 400 public: 401 EstimateSizeFormatHandler(StringRef Format) 402 : Size(std::min(Format.find(0), Format.size()) + 403 1 /* null byte always written by sprintf */) {} 404 405 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 406 const char *, unsigned SpecifierLen) override { 407 408 const size_t FieldWidth = computeFieldWidth(FS); 409 const size_t Precision = computePrecision(FS); 410 411 // The actual format. 412 switch (FS.getConversionSpecifier().getKind()) { 413 // Just a char. 414 case analyze_format_string::ConversionSpecifier::cArg: 415 case analyze_format_string::ConversionSpecifier::CArg: 416 Size += std::max(FieldWidth, (size_t)1); 417 break; 418 // Just an integer. 419 case analyze_format_string::ConversionSpecifier::dArg: 420 case analyze_format_string::ConversionSpecifier::DArg: 421 case analyze_format_string::ConversionSpecifier::iArg: 422 case analyze_format_string::ConversionSpecifier::oArg: 423 case analyze_format_string::ConversionSpecifier::OArg: 424 case analyze_format_string::ConversionSpecifier::uArg: 425 case analyze_format_string::ConversionSpecifier::UArg: 426 case analyze_format_string::ConversionSpecifier::xArg: 427 case analyze_format_string::ConversionSpecifier::XArg: 428 Size += std::max(FieldWidth, Precision); 429 break; 430 431 // %g style conversion switches between %f or %e style dynamically. 432 // %f always takes less space, so default to it. 433 case analyze_format_string::ConversionSpecifier::gArg: 434 case analyze_format_string::ConversionSpecifier::GArg: 435 436 // Floating point number in the form '[+]ddd.ddd'. 437 case analyze_format_string::ConversionSpecifier::fArg: 438 case analyze_format_string::ConversionSpecifier::FArg: 439 Size += std::max(FieldWidth, 1 /* integer part */ + 440 (Precision ? 1 + Precision 441 : 0) /* period + decimal */); 442 break; 443 444 // Floating point number in the form '[-]d.ddde[+-]dd'. 445 case analyze_format_string::ConversionSpecifier::eArg: 446 case analyze_format_string::ConversionSpecifier::EArg: 447 Size += 448 std::max(FieldWidth, 449 1 /* integer part */ + 450 (Precision ? 1 + Precision : 0) /* period + decimal */ + 451 1 /* e or E letter */ + 2 /* exponent */); 452 break; 453 454 // Floating point number in the form '[-]0xh.hhhhp±dd'. 455 case analyze_format_string::ConversionSpecifier::aArg: 456 case analyze_format_string::ConversionSpecifier::AArg: 457 Size += 458 std::max(FieldWidth, 459 2 /* 0x */ + 1 /* integer part */ + 460 (Precision ? 1 + Precision : 0) /* period + decimal */ + 461 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */); 462 break; 463 464 // Just a string. 465 case analyze_format_string::ConversionSpecifier::sArg: 466 case analyze_format_string::ConversionSpecifier::SArg: 467 Size += FieldWidth; 468 break; 469 470 // Just a pointer in the form '0xddd'. 471 case analyze_format_string::ConversionSpecifier::pArg: 472 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision); 473 break; 474 475 // A plain percent. 476 case analyze_format_string::ConversionSpecifier::PercentArg: 477 Size += 1; 478 break; 479 480 default: 481 break; 482 } 483 484 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix(); 485 486 if (FS.hasAlternativeForm()) { 487 switch (FS.getConversionSpecifier().getKind()) { 488 default: 489 break; 490 // Force a leading '0'. 491 case analyze_format_string::ConversionSpecifier::oArg: 492 Size += 1; 493 break; 494 // Force a leading '0x'. 495 case analyze_format_string::ConversionSpecifier::xArg: 496 case analyze_format_string::ConversionSpecifier::XArg: 497 Size += 2; 498 break; 499 // Force a period '.' before decimal, even if precision is 0. 500 case analyze_format_string::ConversionSpecifier::aArg: 501 case analyze_format_string::ConversionSpecifier::AArg: 502 case analyze_format_string::ConversionSpecifier::eArg: 503 case analyze_format_string::ConversionSpecifier::EArg: 504 case analyze_format_string::ConversionSpecifier::fArg: 505 case analyze_format_string::ConversionSpecifier::FArg: 506 case analyze_format_string::ConversionSpecifier::gArg: 507 case analyze_format_string::ConversionSpecifier::GArg: 508 Size += (Precision ? 0 : 1); 509 break; 510 } 511 } 512 assert(SpecifierLen <= Size && "no underflow"); 513 Size -= SpecifierLen; 514 return true; 515 } 516 517 size_t getSizeLowerBound() const { return Size; } 518 519 private: 520 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) { 521 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth(); 522 size_t FieldWidth = 0; 523 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant) 524 FieldWidth = FW.getConstantAmount(); 525 return FieldWidth; 526 } 527 528 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) { 529 const analyze_format_string::OptionalAmount &FW = FS.getPrecision(); 530 size_t Precision = 0; 531 532 // See man 3 printf for default precision value based on the specifier. 533 switch (FW.getHowSpecified()) { 534 case analyze_format_string::OptionalAmount::NotSpecified: 535 switch (FS.getConversionSpecifier().getKind()) { 536 default: 537 break; 538 case analyze_format_string::ConversionSpecifier::dArg: // %d 539 case analyze_format_string::ConversionSpecifier::DArg: // %D 540 case analyze_format_string::ConversionSpecifier::iArg: // %i 541 Precision = 1; 542 break; 543 case analyze_format_string::ConversionSpecifier::oArg: // %d 544 case analyze_format_string::ConversionSpecifier::OArg: // %D 545 case analyze_format_string::ConversionSpecifier::uArg: // %d 546 case analyze_format_string::ConversionSpecifier::UArg: // %D 547 case analyze_format_string::ConversionSpecifier::xArg: // %d 548 case analyze_format_string::ConversionSpecifier::XArg: // %D 549 Precision = 1; 550 break; 551 case analyze_format_string::ConversionSpecifier::fArg: // %f 552 case analyze_format_string::ConversionSpecifier::FArg: // %F 553 case analyze_format_string::ConversionSpecifier::eArg: // %e 554 case analyze_format_string::ConversionSpecifier::EArg: // %E 555 case analyze_format_string::ConversionSpecifier::gArg: // %g 556 case analyze_format_string::ConversionSpecifier::GArg: // %G 557 Precision = 6; 558 break; 559 case analyze_format_string::ConversionSpecifier::pArg: // %d 560 Precision = 1; 561 break; 562 } 563 break; 564 case analyze_format_string::OptionalAmount::Constant: 565 Precision = FW.getConstantAmount(); 566 break; 567 default: 568 break; 569 } 570 return Precision; 571 } 572 }; 573 574 } // namespace 575 576 /// Check a call to BuiltinID for buffer overflows. If BuiltinID is a 577 /// __builtin_*_chk function, then use the object size argument specified in the 578 /// source. Otherwise, infer the object size using __builtin_object_size. 579 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, 580 CallExpr *TheCall) { 581 // FIXME: There are some more useful checks we could be doing here: 582 // - Evaluate strlen of strcpy arguments, use as object size. 583 584 if (TheCall->isValueDependent() || TheCall->isTypeDependent() || 585 isConstantEvaluated()) 586 return; 587 588 unsigned BuiltinID = FD->getBuiltinID(/*ConsiderWrappers=*/true); 589 if (!BuiltinID) 590 return; 591 592 const TargetInfo &TI = getASTContext().getTargetInfo(); 593 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType()); 594 595 unsigned DiagID = 0; 596 bool IsChkVariant = false; 597 Optional<llvm::APSInt> UsedSize; 598 unsigned SizeIndex, ObjectIndex; 599 switch (BuiltinID) { 600 default: 601 return; 602 case Builtin::BIsprintf: 603 case Builtin::BI__builtin___sprintf_chk: { 604 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3; 605 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 606 607 if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) { 608 609 if (!Format->isAscii() && !Format->isUTF8()) 610 return; 611 612 StringRef FormatStrRef = Format->getString(); 613 EstimateSizeFormatHandler H(FormatStrRef); 614 const char *FormatBytes = FormatStrRef.data(); 615 const ConstantArrayType *T = 616 Context.getAsConstantArrayType(Format->getType()); 617 assert(T && "String literal not of constant array type!"); 618 size_t TypeSize = T->getSize().getZExtValue(); 619 620 // In case there's a null byte somewhere. 621 size_t StrLen = 622 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 623 if (!analyze_format_string::ParsePrintfString( 624 H, FormatBytes, FormatBytes + StrLen, getLangOpts(), 625 Context.getTargetInfo(), false)) { 626 DiagID = diag::warn_fortify_source_format_overflow; 627 UsedSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound()) 628 .extOrTrunc(SizeTypeWidth); 629 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) { 630 IsChkVariant = true; 631 ObjectIndex = 2; 632 } else { 633 IsChkVariant = false; 634 ObjectIndex = 0; 635 } 636 break; 637 } 638 } 639 return; 640 } 641 case Builtin::BI__builtin___memcpy_chk: 642 case Builtin::BI__builtin___memmove_chk: 643 case Builtin::BI__builtin___memset_chk: 644 case Builtin::BI__builtin___strlcat_chk: 645 case Builtin::BI__builtin___strlcpy_chk: 646 case Builtin::BI__builtin___strncat_chk: 647 case Builtin::BI__builtin___strncpy_chk: 648 case Builtin::BI__builtin___stpncpy_chk: 649 case Builtin::BI__builtin___memccpy_chk: 650 case Builtin::BI__builtin___mempcpy_chk: { 651 DiagID = diag::warn_builtin_chk_overflow; 652 IsChkVariant = true; 653 SizeIndex = TheCall->getNumArgs() - 2; 654 ObjectIndex = TheCall->getNumArgs() - 1; 655 break; 656 } 657 658 case Builtin::BI__builtin___snprintf_chk: 659 case Builtin::BI__builtin___vsnprintf_chk: { 660 DiagID = diag::warn_builtin_chk_overflow; 661 IsChkVariant = true; 662 SizeIndex = 1; 663 ObjectIndex = 3; 664 break; 665 } 666 667 case Builtin::BIstrncat: 668 case Builtin::BI__builtin_strncat: 669 case Builtin::BIstrncpy: 670 case Builtin::BI__builtin_strncpy: 671 case Builtin::BIstpncpy: 672 case Builtin::BI__builtin_stpncpy: { 673 // Whether these functions overflow depends on the runtime strlen of the 674 // string, not just the buffer size, so emitting the "always overflow" 675 // diagnostic isn't quite right. We should still diagnose passing a buffer 676 // size larger than the destination buffer though; this is a runtime abort 677 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise. 678 DiagID = diag::warn_fortify_source_size_mismatch; 679 SizeIndex = TheCall->getNumArgs() - 1; 680 ObjectIndex = 0; 681 break; 682 } 683 684 case Builtin::BImemcpy: 685 case Builtin::BI__builtin_memcpy: 686 case Builtin::BImemmove: 687 case Builtin::BI__builtin_memmove: 688 case Builtin::BImemset: 689 case Builtin::BI__builtin_memset: 690 case Builtin::BImempcpy: 691 case Builtin::BI__builtin_mempcpy: { 692 DiagID = diag::warn_fortify_source_overflow; 693 SizeIndex = TheCall->getNumArgs() - 1; 694 ObjectIndex = 0; 695 break; 696 } 697 case Builtin::BIsnprintf: 698 case Builtin::BI__builtin_snprintf: 699 case Builtin::BIvsnprintf: 700 case Builtin::BI__builtin_vsnprintf: { 701 DiagID = diag::warn_fortify_source_size_mismatch; 702 SizeIndex = 1; 703 ObjectIndex = 0; 704 break; 705 } 706 } 707 708 llvm::APSInt ObjectSize; 709 // For __builtin___*_chk, the object size is explicitly provided by the caller 710 // (usually using __builtin_object_size). Use that value to check this call. 711 if (IsChkVariant) { 712 Expr::EvalResult Result; 713 Expr *SizeArg = TheCall->getArg(ObjectIndex); 714 if (!SizeArg->EvaluateAsInt(Result, getASTContext())) 715 return; 716 ObjectSize = Result.Val.getInt(); 717 718 // Otherwise, try to evaluate an imaginary call to __builtin_object_size. 719 } else { 720 // If the parameter has a pass_object_size attribute, then we should use its 721 // (potentially) more strict checking mode. Otherwise, conservatively assume 722 // type 0. 723 int BOSType = 0; 724 if (const auto *POS = 725 FD->getParamDecl(ObjectIndex)->getAttr<PassObjectSizeAttr>()) 726 BOSType = POS->getType(); 727 728 Expr *ObjArg = TheCall->getArg(ObjectIndex); 729 uint64_t Result; 730 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType)) 731 return; 732 // Get the object size in the target's size_t width. 733 ObjectSize = llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth); 734 } 735 736 // Evaluate the number of bytes of the object that this call will use. 737 if (!UsedSize) { 738 Expr::EvalResult Result; 739 Expr *UsedSizeArg = TheCall->getArg(SizeIndex); 740 if (!UsedSizeArg->EvaluateAsInt(Result, getASTContext())) 741 return; 742 UsedSize = Result.Val.getInt().extOrTrunc(SizeTypeWidth); 743 } 744 745 if (UsedSize.getValue().ule(ObjectSize)) 746 return; 747 748 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID); 749 // Skim off the details of whichever builtin was called to produce a better 750 // diagnostic, as it's unlikley that the user wrote the __builtin explicitly. 751 if (IsChkVariant) { 752 FunctionName = FunctionName.drop_front(std::strlen("__builtin___")); 753 FunctionName = FunctionName.drop_back(std::strlen("_chk")); 754 } else if (FunctionName.startswith("__builtin_")) { 755 FunctionName = FunctionName.drop_front(std::strlen("__builtin_")); 756 } 757 758 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 759 PDiag(DiagID) 760 << FunctionName << ObjectSize.toString(/*Radix=*/10) 761 << UsedSize.getValue().toString(/*Radix=*/10)); 762 } 763 764 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 765 Scope::ScopeFlags NeededScopeFlags, 766 unsigned DiagID) { 767 // Scopes aren't available during instantiation. Fortunately, builtin 768 // functions cannot be template args so they cannot be formed through template 769 // instantiation. Therefore checking once during the parse is sufficient. 770 if (SemaRef.inTemplateInstantiation()) 771 return false; 772 773 Scope *S = SemaRef.getCurScope(); 774 while (S && !S->isSEHExceptScope()) 775 S = S->getParent(); 776 if (!S || !(S->getFlags() & NeededScopeFlags)) { 777 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 778 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 779 << DRE->getDecl()->getIdentifier(); 780 return true; 781 } 782 783 return false; 784 } 785 786 static inline bool isBlockPointer(Expr *Arg) { 787 return Arg->getType()->isBlockPointerType(); 788 } 789 790 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 791 /// void*, which is a requirement of device side enqueue. 792 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 793 const BlockPointerType *BPT = 794 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 795 ArrayRef<QualType> Params = 796 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes(); 797 unsigned ArgCounter = 0; 798 bool IllegalParams = false; 799 // Iterate through the block parameters until either one is found that is not 800 // a local void*, or the block is valid. 801 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 802 I != E; ++I, ++ArgCounter) { 803 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 804 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 805 LangAS::opencl_local) { 806 // Get the location of the error. If a block literal has been passed 807 // (BlockExpr) then we can point straight to the offending argument, 808 // else we just point to the variable reference. 809 SourceLocation ErrorLoc; 810 if (isa<BlockExpr>(BlockArg)) { 811 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 812 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 813 } else if (isa<DeclRefExpr>(BlockArg)) { 814 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 815 } 816 S.Diag(ErrorLoc, 817 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 818 IllegalParams = true; 819 } 820 } 821 822 return IllegalParams; 823 } 824 825 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 826 if (!S.getOpenCLOptions().isEnabled("cl_khr_subgroups")) { 827 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 828 << 1 << Call->getDirectCallee() << "cl_khr_subgroups"; 829 return true; 830 } 831 return false; 832 } 833 834 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 835 if (checkArgCount(S, TheCall, 2)) 836 return true; 837 838 if (checkOpenCLSubgroupExt(S, TheCall)) 839 return true; 840 841 // First argument is an ndrange_t type. 842 Expr *NDRangeArg = TheCall->getArg(0); 843 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 844 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 845 << TheCall->getDirectCallee() << "'ndrange_t'"; 846 return true; 847 } 848 849 Expr *BlockArg = TheCall->getArg(1); 850 if (!isBlockPointer(BlockArg)) { 851 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 852 << TheCall->getDirectCallee() << "block"; 853 return true; 854 } 855 return checkOpenCLBlockArgs(S, BlockArg); 856 } 857 858 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 859 /// get_kernel_work_group_size 860 /// and get_kernel_preferred_work_group_size_multiple builtin functions. 861 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 862 if (checkArgCount(S, TheCall, 1)) 863 return true; 864 865 Expr *BlockArg = TheCall->getArg(0); 866 if (!isBlockPointer(BlockArg)) { 867 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 868 << TheCall->getDirectCallee() << "block"; 869 return true; 870 } 871 return checkOpenCLBlockArgs(S, BlockArg); 872 } 873 874 /// Diagnose integer type and any valid implicit conversion to it. 875 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 876 const QualType &IntType); 877 878 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 879 unsigned Start, unsigned End) { 880 bool IllegalParams = false; 881 for (unsigned I = Start; I <= End; ++I) 882 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 883 S.Context.getSizeType()); 884 return IllegalParams; 885 } 886 887 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 888 /// 'local void*' parameter of passed block. 889 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 890 Expr *BlockArg, 891 unsigned NumNonVarArgs) { 892 const BlockPointerType *BPT = 893 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 894 unsigned NumBlockParams = 895 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams(); 896 unsigned TotalNumArgs = TheCall->getNumArgs(); 897 898 // For each argument passed to the block, a corresponding uint needs to 899 // be passed to describe the size of the local memory. 900 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 901 S.Diag(TheCall->getBeginLoc(), 902 diag::err_opencl_enqueue_kernel_local_size_args); 903 return true; 904 } 905 906 // Check that the sizes of the local memory are specified by integers. 907 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 908 TotalNumArgs - 1); 909 } 910 911 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 912 /// overload formats specified in Table 6.13.17.1. 913 /// int enqueue_kernel(queue_t queue, 914 /// kernel_enqueue_flags_t flags, 915 /// const ndrange_t ndrange, 916 /// void (^block)(void)) 917 /// int enqueue_kernel(queue_t queue, 918 /// kernel_enqueue_flags_t flags, 919 /// const ndrange_t ndrange, 920 /// uint num_events_in_wait_list, 921 /// clk_event_t *event_wait_list, 922 /// clk_event_t *event_ret, 923 /// void (^block)(void)) 924 /// int enqueue_kernel(queue_t queue, 925 /// kernel_enqueue_flags_t flags, 926 /// const ndrange_t ndrange, 927 /// void (^block)(local void*, ...), 928 /// uint size0, ...) 929 /// int enqueue_kernel(queue_t queue, 930 /// kernel_enqueue_flags_t flags, 931 /// const ndrange_t ndrange, 932 /// uint num_events_in_wait_list, 933 /// clk_event_t *event_wait_list, 934 /// clk_event_t *event_ret, 935 /// void (^block)(local void*, ...), 936 /// uint size0, ...) 937 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 938 unsigned NumArgs = TheCall->getNumArgs(); 939 940 if (NumArgs < 4) { 941 S.Diag(TheCall->getBeginLoc(), 942 diag::err_typecheck_call_too_few_args_at_least) 943 << 0 << 4 << NumArgs; 944 return true; 945 } 946 947 Expr *Arg0 = TheCall->getArg(0); 948 Expr *Arg1 = TheCall->getArg(1); 949 Expr *Arg2 = TheCall->getArg(2); 950 Expr *Arg3 = TheCall->getArg(3); 951 952 // First argument always needs to be a queue_t type. 953 if (!Arg0->getType()->isQueueT()) { 954 S.Diag(TheCall->getArg(0)->getBeginLoc(), 955 diag::err_opencl_builtin_expected_type) 956 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 957 return true; 958 } 959 960 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 961 if (!Arg1->getType()->isIntegerType()) { 962 S.Diag(TheCall->getArg(1)->getBeginLoc(), 963 diag::err_opencl_builtin_expected_type) 964 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 965 return true; 966 } 967 968 // Third argument is always an ndrange_t type. 969 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 970 S.Diag(TheCall->getArg(2)->getBeginLoc(), 971 diag::err_opencl_builtin_expected_type) 972 << TheCall->getDirectCallee() << "'ndrange_t'"; 973 return true; 974 } 975 976 // With four arguments, there is only one form that the function could be 977 // called in: no events and no variable arguments. 978 if (NumArgs == 4) { 979 // check that the last argument is the right block type. 980 if (!isBlockPointer(Arg3)) { 981 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 982 << TheCall->getDirectCallee() << "block"; 983 return true; 984 } 985 // we have a block type, check the prototype 986 const BlockPointerType *BPT = 987 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 988 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) { 989 S.Diag(Arg3->getBeginLoc(), 990 diag::err_opencl_enqueue_kernel_blocks_no_args); 991 return true; 992 } 993 return false; 994 } 995 // we can have block + varargs. 996 if (isBlockPointer(Arg3)) 997 return (checkOpenCLBlockArgs(S, Arg3) || 998 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 999 // last two cases with either exactly 7 args or 7 args and varargs. 1000 if (NumArgs >= 7) { 1001 // check common block argument. 1002 Expr *Arg6 = TheCall->getArg(6); 1003 if (!isBlockPointer(Arg6)) { 1004 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1005 << TheCall->getDirectCallee() << "block"; 1006 return true; 1007 } 1008 if (checkOpenCLBlockArgs(S, Arg6)) 1009 return true; 1010 1011 // Forth argument has to be any integer type. 1012 if (!Arg3->getType()->isIntegerType()) { 1013 S.Diag(TheCall->getArg(3)->getBeginLoc(), 1014 diag::err_opencl_builtin_expected_type) 1015 << TheCall->getDirectCallee() << "integer"; 1016 return true; 1017 } 1018 // check remaining common arguments. 1019 Expr *Arg4 = TheCall->getArg(4); 1020 Expr *Arg5 = TheCall->getArg(5); 1021 1022 // Fifth argument is always passed as a pointer to clk_event_t. 1023 if (!Arg4->isNullPointerConstant(S.Context, 1024 Expr::NPC_ValueDependentIsNotNull) && 1025 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 1026 S.Diag(TheCall->getArg(4)->getBeginLoc(), 1027 diag::err_opencl_builtin_expected_type) 1028 << TheCall->getDirectCallee() 1029 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1030 return true; 1031 } 1032 1033 // Sixth argument is always passed as a pointer to clk_event_t. 1034 if (!Arg5->isNullPointerConstant(S.Context, 1035 Expr::NPC_ValueDependentIsNotNull) && 1036 !(Arg5->getType()->isPointerType() && 1037 Arg5->getType()->getPointeeType()->isClkEventT())) { 1038 S.Diag(TheCall->getArg(5)->getBeginLoc(), 1039 diag::err_opencl_builtin_expected_type) 1040 << TheCall->getDirectCallee() 1041 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1042 return true; 1043 } 1044 1045 if (NumArgs == 7) 1046 return false; 1047 1048 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 1049 } 1050 1051 // None of the specific case has been detected, give generic error 1052 S.Diag(TheCall->getBeginLoc(), 1053 diag::err_opencl_enqueue_kernel_incorrect_args); 1054 return true; 1055 } 1056 1057 /// Returns OpenCL access qual. 1058 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 1059 return D->getAttr<OpenCLAccessAttr>(); 1060 } 1061 1062 /// Returns true if pipe element type is different from the pointer. 1063 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 1064 const Expr *Arg0 = Call->getArg(0); 1065 // First argument type should always be pipe. 1066 if (!Arg0->getType()->isPipeType()) { 1067 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1068 << Call->getDirectCallee() << Arg0->getSourceRange(); 1069 return true; 1070 } 1071 OpenCLAccessAttr *AccessQual = 1072 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 1073 // Validates the access qualifier is compatible with the call. 1074 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 1075 // read_only and write_only, and assumed to be read_only if no qualifier is 1076 // specified. 1077 switch (Call->getDirectCallee()->getBuiltinID()) { 1078 case Builtin::BIread_pipe: 1079 case Builtin::BIreserve_read_pipe: 1080 case Builtin::BIcommit_read_pipe: 1081 case Builtin::BIwork_group_reserve_read_pipe: 1082 case Builtin::BIsub_group_reserve_read_pipe: 1083 case Builtin::BIwork_group_commit_read_pipe: 1084 case Builtin::BIsub_group_commit_read_pipe: 1085 if (!(!AccessQual || AccessQual->isReadOnly())) { 1086 S.Diag(Arg0->getBeginLoc(), 1087 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1088 << "read_only" << Arg0->getSourceRange(); 1089 return true; 1090 } 1091 break; 1092 case Builtin::BIwrite_pipe: 1093 case Builtin::BIreserve_write_pipe: 1094 case Builtin::BIcommit_write_pipe: 1095 case Builtin::BIwork_group_reserve_write_pipe: 1096 case Builtin::BIsub_group_reserve_write_pipe: 1097 case Builtin::BIwork_group_commit_write_pipe: 1098 case Builtin::BIsub_group_commit_write_pipe: 1099 if (!(AccessQual && AccessQual->isWriteOnly())) { 1100 S.Diag(Arg0->getBeginLoc(), 1101 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1102 << "write_only" << Arg0->getSourceRange(); 1103 return true; 1104 } 1105 break; 1106 default: 1107 break; 1108 } 1109 return false; 1110 } 1111 1112 /// Returns true if pipe element type is different from the pointer. 1113 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 1114 const Expr *Arg0 = Call->getArg(0); 1115 const Expr *ArgIdx = Call->getArg(Idx); 1116 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 1117 const QualType EltTy = PipeTy->getElementType(); 1118 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 1119 // The Idx argument should be a pointer and the type of the pointer and 1120 // the type of pipe element should also be the same. 1121 if (!ArgTy || 1122 !S.Context.hasSameType( 1123 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 1124 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1125 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 1126 << ArgIdx->getType() << ArgIdx->getSourceRange(); 1127 return true; 1128 } 1129 return false; 1130 } 1131 1132 // Performs semantic analysis for the read/write_pipe call. 1133 // \param S Reference to the semantic analyzer. 1134 // \param Call A pointer to the builtin call. 1135 // \return True if a semantic error has been found, false otherwise. 1136 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 1137 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 1138 // functions have two forms. 1139 switch (Call->getNumArgs()) { 1140 case 2: 1141 if (checkOpenCLPipeArg(S, Call)) 1142 return true; 1143 // The call with 2 arguments should be 1144 // read/write_pipe(pipe T, T*). 1145 // Check packet type T. 1146 if (checkOpenCLPipePacketType(S, Call, 1)) 1147 return true; 1148 break; 1149 1150 case 4: { 1151 if (checkOpenCLPipeArg(S, Call)) 1152 return true; 1153 // The call with 4 arguments should be 1154 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 1155 // Check reserve_id_t. 1156 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1157 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1158 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1159 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1160 return true; 1161 } 1162 1163 // Check the index. 1164 const Expr *Arg2 = Call->getArg(2); 1165 if (!Arg2->getType()->isIntegerType() && 1166 !Arg2->getType()->isUnsignedIntegerType()) { 1167 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1168 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1169 << Arg2->getType() << Arg2->getSourceRange(); 1170 return true; 1171 } 1172 1173 // Check packet type T. 1174 if (checkOpenCLPipePacketType(S, Call, 3)) 1175 return true; 1176 } break; 1177 default: 1178 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 1179 << Call->getDirectCallee() << Call->getSourceRange(); 1180 return true; 1181 } 1182 1183 return false; 1184 } 1185 1186 // Performs a semantic analysis on the {work_group_/sub_group_ 1187 // /_}reserve_{read/write}_pipe 1188 // \param S Reference to the semantic analyzer. 1189 // \param Call The call to the builtin function to be analyzed. 1190 // \return True if a semantic error was found, false otherwise. 1191 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 1192 if (checkArgCount(S, Call, 2)) 1193 return true; 1194 1195 if (checkOpenCLPipeArg(S, Call)) 1196 return true; 1197 1198 // Check the reserve size. 1199 if (!Call->getArg(1)->getType()->isIntegerType() && 1200 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 1201 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1202 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1203 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1204 return true; 1205 } 1206 1207 // Since return type of reserve_read/write_pipe built-in function is 1208 // reserve_id_t, which is not defined in the builtin def file , we used int 1209 // as return type and need to override the return type of these functions. 1210 Call->setType(S.Context.OCLReserveIDTy); 1211 1212 return false; 1213 } 1214 1215 // Performs a semantic analysis on {work_group_/sub_group_ 1216 // /_}commit_{read/write}_pipe 1217 // \param S Reference to the semantic analyzer. 1218 // \param Call The call to the builtin function to be analyzed. 1219 // \return True if a semantic error was found, false otherwise. 1220 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 1221 if (checkArgCount(S, Call, 2)) 1222 return true; 1223 1224 if (checkOpenCLPipeArg(S, Call)) 1225 return true; 1226 1227 // Check reserve_id_t. 1228 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1229 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1230 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1231 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1232 return true; 1233 } 1234 1235 return false; 1236 } 1237 1238 // Performs a semantic analysis on the call to built-in Pipe 1239 // Query Functions. 1240 // \param S Reference to the semantic analyzer. 1241 // \param Call The call to the builtin function to be analyzed. 1242 // \return True if a semantic error was found, false otherwise. 1243 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 1244 if (checkArgCount(S, Call, 1)) 1245 return true; 1246 1247 if (!Call->getArg(0)->getType()->isPipeType()) { 1248 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1249 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 1250 return true; 1251 } 1252 1253 return false; 1254 } 1255 1256 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. 1257 // Performs semantic analysis for the to_global/local/private call. 1258 // \param S Reference to the semantic analyzer. 1259 // \param BuiltinID ID of the builtin function. 1260 // \param Call A pointer to the builtin call. 1261 // \return True if a semantic error has been found, false otherwise. 1262 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 1263 CallExpr *Call) { 1264 if (Call->getNumArgs() != 1) { 1265 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_arg_num) 1266 << Call->getDirectCallee() << Call->getSourceRange(); 1267 return true; 1268 } 1269 1270 auto RT = Call->getArg(0)->getType(); 1271 if (!RT->isPointerType() || RT->getPointeeType() 1272 .getAddressSpace() == LangAS::opencl_constant) { 1273 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 1274 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 1275 return true; 1276 } 1277 1278 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 1279 S.Diag(Call->getArg(0)->getBeginLoc(), 1280 diag::warn_opencl_generic_address_space_arg) 1281 << Call->getDirectCallee()->getNameInfo().getAsString() 1282 << Call->getArg(0)->getSourceRange(); 1283 } 1284 1285 RT = RT->getPointeeType(); 1286 auto Qual = RT.getQualifiers(); 1287 switch (BuiltinID) { 1288 case Builtin::BIto_global: 1289 Qual.setAddressSpace(LangAS::opencl_global); 1290 break; 1291 case Builtin::BIto_local: 1292 Qual.setAddressSpace(LangAS::opencl_local); 1293 break; 1294 case Builtin::BIto_private: 1295 Qual.setAddressSpace(LangAS::opencl_private); 1296 break; 1297 default: 1298 llvm_unreachable("Invalid builtin function"); 1299 } 1300 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 1301 RT.getUnqualifiedType(), Qual))); 1302 1303 return false; 1304 } 1305 1306 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { 1307 if (checkArgCount(S, TheCall, 1)) 1308 return ExprError(); 1309 1310 // Compute __builtin_launder's parameter type from the argument. 1311 // The parameter type is: 1312 // * The type of the argument if it's not an array or function type, 1313 // Otherwise, 1314 // * The decayed argument type. 1315 QualType ParamTy = [&]() { 1316 QualType ArgTy = TheCall->getArg(0)->getType(); 1317 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe()) 1318 return S.Context.getPointerType(Ty->getElementType()); 1319 if (ArgTy->isFunctionType()) { 1320 return S.Context.getPointerType(ArgTy); 1321 } 1322 return ArgTy; 1323 }(); 1324 1325 TheCall->setType(ParamTy); 1326 1327 auto DiagSelect = [&]() -> llvm::Optional<unsigned> { 1328 if (!ParamTy->isPointerType()) 1329 return 0; 1330 if (ParamTy->isFunctionPointerType()) 1331 return 1; 1332 if (ParamTy->isVoidPointerType()) 1333 return 2; 1334 return llvm::Optional<unsigned>{}; 1335 }(); 1336 if (DiagSelect.hasValue()) { 1337 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) 1338 << DiagSelect.getValue() << TheCall->getSourceRange(); 1339 return ExprError(); 1340 } 1341 1342 // We either have an incomplete class type, or we have a class template 1343 // whose instantiation has not been forced. Example: 1344 // 1345 // template <class T> struct Foo { T value; }; 1346 // Foo<int> *p = nullptr; 1347 // auto *d = __builtin_launder(p); 1348 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(), 1349 diag::err_incomplete_type)) 1350 return ExprError(); 1351 1352 assert(ParamTy->getPointeeType()->isObjectType() && 1353 "Unhandled non-object pointer case"); 1354 1355 InitializedEntity Entity = 1356 InitializedEntity::InitializeParameter(S.Context, ParamTy, false); 1357 ExprResult Arg = 1358 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0)); 1359 if (Arg.isInvalid()) 1360 return ExprError(); 1361 TheCall->setArg(0, Arg.get()); 1362 1363 return TheCall; 1364 } 1365 1366 // Emit an error and return true if the current architecture is not in the list 1367 // of supported architectures. 1368 static bool 1369 CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1370 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 1371 llvm::Triple::ArchType CurArch = 1372 S.getASTContext().getTargetInfo().getTriple().getArch(); 1373 if (llvm::is_contained(SupportedArchs, CurArch)) 1374 return false; 1375 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1376 << TheCall->getSourceRange(); 1377 return true; 1378 } 1379 1380 static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr, 1381 SourceLocation CallSiteLoc); 1382 1383 bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 1384 CallExpr *TheCall) { 1385 switch (TI.getTriple().getArch()) { 1386 default: 1387 // Some builtins don't require additional checking, so just consider these 1388 // acceptable. 1389 return false; 1390 case llvm::Triple::arm: 1391 case llvm::Triple::armeb: 1392 case llvm::Triple::thumb: 1393 case llvm::Triple::thumbeb: 1394 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall); 1395 case llvm::Triple::aarch64: 1396 case llvm::Triple::aarch64_32: 1397 case llvm::Triple::aarch64_be: 1398 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall); 1399 case llvm::Triple::bpfeb: 1400 case llvm::Triple::bpfel: 1401 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall); 1402 case llvm::Triple::hexagon: 1403 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall); 1404 case llvm::Triple::mips: 1405 case llvm::Triple::mipsel: 1406 case llvm::Triple::mips64: 1407 case llvm::Triple::mips64el: 1408 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall); 1409 case llvm::Triple::systemz: 1410 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall); 1411 case llvm::Triple::x86: 1412 case llvm::Triple::x86_64: 1413 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall); 1414 case llvm::Triple::ppc: 1415 case llvm::Triple::ppc64: 1416 case llvm::Triple::ppc64le: 1417 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall); 1418 case llvm::Triple::amdgcn: 1419 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall); 1420 } 1421 } 1422 1423 ExprResult 1424 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 1425 CallExpr *TheCall) { 1426 ExprResult TheCallResult(TheCall); 1427 1428 // Find out if any arguments are required to be integer constant expressions. 1429 unsigned ICEArguments = 0; 1430 ASTContext::GetBuiltinTypeError Error; 1431 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 1432 if (Error != ASTContext::GE_None) 1433 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 1434 1435 // If any arguments are required to be ICE's, check and diagnose. 1436 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 1437 // Skip arguments not required to be ICE's. 1438 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 1439 1440 llvm::APSInt Result; 1441 if (SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 1442 return true; 1443 ICEArguments &= ~(1 << ArgNo); 1444 } 1445 1446 switch (BuiltinID) { 1447 case Builtin::BI__builtin___CFStringMakeConstantString: 1448 assert(TheCall->getNumArgs() == 1 && 1449 "Wrong # arguments to builtin CFStringMakeConstantString"); 1450 if (CheckObjCString(TheCall->getArg(0))) 1451 return ExprError(); 1452 break; 1453 case Builtin::BI__builtin_ms_va_start: 1454 case Builtin::BI__builtin_stdarg_start: 1455 case Builtin::BI__builtin_va_start: 1456 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1457 return ExprError(); 1458 break; 1459 case Builtin::BI__va_start: { 1460 switch (Context.getTargetInfo().getTriple().getArch()) { 1461 case llvm::Triple::aarch64: 1462 case llvm::Triple::arm: 1463 case llvm::Triple::thumb: 1464 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 1465 return ExprError(); 1466 break; 1467 default: 1468 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1469 return ExprError(); 1470 break; 1471 } 1472 break; 1473 } 1474 1475 // The acquire, release, and no fence variants are ARM and AArch64 only. 1476 case Builtin::BI_interlockedbittestandset_acq: 1477 case Builtin::BI_interlockedbittestandset_rel: 1478 case Builtin::BI_interlockedbittestandset_nf: 1479 case Builtin::BI_interlockedbittestandreset_acq: 1480 case Builtin::BI_interlockedbittestandreset_rel: 1481 case Builtin::BI_interlockedbittestandreset_nf: 1482 if (CheckBuiltinTargetSupport( 1483 *this, BuiltinID, TheCall, 1484 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 1485 return ExprError(); 1486 break; 1487 1488 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 1489 case Builtin::BI_bittest64: 1490 case Builtin::BI_bittestandcomplement64: 1491 case Builtin::BI_bittestandreset64: 1492 case Builtin::BI_bittestandset64: 1493 case Builtin::BI_interlockedbittestandreset64: 1494 case Builtin::BI_interlockedbittestandset64: 1495 if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall, 1496 {llvm::Triple::x86_64, llvm::Triple::arm, 1497 llvm::Triple::thumb, llvm::Triple::aarch64})) 1498 return ExprError(); 1499 break; 1500 1501 case Builtin::BI__builtin_isgreater: 1502 case Builtin::BI__builtin_isgreaterequal: 1503 case Builtin::BI__builtin_isless: 1504 case Builtin::BI__builtin_islessequal: 1505 case Builtin::BI__builtin_islessgreater: 1506 case Builtin::BI__builtin_isunordered: 1507 if (SemaBuiltinUnorderedCompare(TheCall)) 1508 return ExprError(); 1509 break; 1510 case Builtin::BI__builtin_fpclassify: 1511 if (SemaBuiltinFPClassification(TheCall, 6)) 1512 return ExprError(); 1513 break; 1514 case Builtin::BI__builtin_isfinite: 1515 case Builtin::BI__builtin_isinf: 1516 case Builtin::BI__builtin_isinf_sign: 1517 case Builtin::BI__builtin_isnan: 1518 case Builtin::BI__builtin_isnormal: 1519 case Builtin::BI__builtin_signbit: 1520 case Builtin::BI__builtin_signbitf: 1521 case Builtin::BI__builtin_signbitl: 1522 if (SemaBuiltinFPClassification(TheCall, 1)) 1523 return ExprError(); 1524 break; 1525 case Builtin::BI__builtin_shufflevector: 1526 return SemaBuiltinShuffleVector(TheCall); 1527 // TheCall will be freed by the smart pointer here, but that's fine, since 1528 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 1529 case Builtin::BI__builtin_prefetch: 1530 if (SemaBuiltinPrefetch(TheCall)) 1531 return ExprError(); 1532 break; 1533 case Builtin::BI__builtin_alloca_with_align: 1534 if (SemaBuiltinAllocaWithAlign(TheCall)) 1535 return ExprError(); 1536 LLVM_FALLTHROUGH; 1537 case Builtin::BI__builtin_alloca: 1538 Diag(TheCall->getBeginLoc(), diag::warn_alloca) 1539 << TheCall->getDirectCallee(); 1540 break; 1541 case Builtin::BI__assume: 1542 case Builtin::BI__builtin_assume: 1543 if (SemaBuiltinAssume(TheCall)) 1544 return ExprError(); 1545 break; 1546 case Builtin::BI__builtin_assume_aligned: 1547 if (SemaBuiltinAssumeAligned(TheCall)) 1548 return ExprError(); 1549 break; 1550 case Builtin::BI__builtin_dynamic_object_size: 1551 case Builtin::BI__builtin_object_size: 1552 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 1553 return ExprError(); 1554 break; 1555 case Builtin::BI__builtin_longjmp: 1556 if (SemaBuiltinLongjmp(TheCall)) 1557 return ExprError(); 1558 break; 1559 case Builtin::BI__builtin_setjmp: 1560 if (SemaBuiltinSetjmp(TheCall)) 1561 return ExprError(); 1562 break; 1563 case Builtin::BI_setjmp: 1564 case Builtin::BI_setjmpex: 1565 if (checkArgCount(*this, TheCall, 1)) 1566 return true; 1567 break; 1568 case Builtin::BI__builtin_classify_type: 1569 if (checkArgCount(*this, TheCall, 1)) return true; 1570 TheCall->setType(Context.IntTy); 1571 break; 1572 case Builtin::BI__builtin_constant_p: { 1573 if (checkArgCount(*this, TheCall, 1)) return true; 1574 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 1575 if (Arg.isInvalid()) return true; 1576 TheCall->setArg(0, Arg.get()); 1577 TheCall->setType(Context.IntTy); 1578 break; 1579 } 1580 case Builtin::BI__builtin_launder: 1581 return SemaBuiltinLaunder(*this, TheCall); 1582 case Builtin::BI__sync_fetch_and_add: 1583 case Builtin::BI__sync_fetch_and_add_1: 1584 case Builtin::BI__sync_fetch_and_add_2: 1585 case Builtin::BI__sync_fetch_and_add_4: 1586 case Builtin::BI__sync_fetch_and_add_8: 1587 case Builtin::BI__sync_fetch_and_add_16: 1588 case Builtin::BI__sync_fetch_and_sub: 1589 case Builtin::BI__sync_fetch_and_sub_1: 1590 case Builtin::BI__sync_fetch_and_sub_2: 1591 case Builtin::BI__sync_fetch_and_sub_4: 1592 case Builtin::BI__sync_fetch_and_sub_8: 1593 case Builtin::BI__sync_fetch_and_sub_16: 1594 case Builtin::BI__sync_fetch_and_or: 1595 case Builtin::BI__sync_fetch_and_or_1: 1596 case Builtin::BI__sync_fetch_and_or_2: 1597 case Builtin::BI__sync_fetch_and_or_4: 1598 case Builtin::BI__sync_fetch_and_or_8: 1599 case Builtin::BI__sync_fetch_and_or_16: 1600 case Builtin::BI__sync_fetch_and_and: 1601 case Builtin::BI__sync_fetch_and_and_1: 1602 case Builtin::BI__sync_fetch_and_and_2: 1603 case Builtin::BI__sync_fetch_and_and_4: 1604 case Builtin::BI__sync_fetch_and_and_8: 1605 case Builtin::BI__sync_fetch_and_and_16: 1606 case Builtin::BI__sync_fetch_and_xor: 1607 case Builtin::BI__sync_fetch_and_xor_1: 1608 case Builtin::BI__sync_fetch_and_xor_2: 1609 case Builtin::BI__sync_fetch_and_xor_4: 1610 case Builtin::BI__sync_fetch_and_xor_8: 1611 case Builtin::BI__sync_fetch_and_xor_16: 1612 case Builtin::BI__sync_fetch_and_nand: 1613 case Builtin::BI__sync_fetch_and_nand_1: 1614 case Builtin::BI__sync_fetch_and_nand_2: 1615 case Builtin::BI__sync_fetch_and_nand_4: 1616 case Builtin::BI__sync_fetch_and_nand_8: 1617 case Builtin::BI__sync_fetch_and_nand_16: 1618 case Builtin::BI__sync_add_and_fetch: 1619 case Builtin::BI__sync_add_and_fetch_1: 1620 case Builtin::BI__sync_add_and_fetch_2: 1621 case Builtin::BI__sync_add_and_fetch_4: 1622 case Builtin::BI__sync_add_and_fetch_8: 1623 case Builtin::BI__sync_add_and_fetch_16: 1624 case Builtin::BI__sync_sub_and_fetch: 1625 case Builtin::BI__sync_sub_and_fetch_1: 1626 case Builtin::BI__sync_sub_and_fetch_2: 1627 case Builtin::BI__sync_sub_and_fetch_4: 1628 case Builtin::BI__sync_sub_and_fetch_8: 1629 case Builtin::BI__sync_sub_and_fetch_16: 1630 case Builtin::BI__sync_and_and_fetch: 1631 case Builtin::BI__sync_and_and_fetch_1: 1632 case Builtin::BI__sync_and_and_fetch_2: 1633 case Builtin::BI__sync_and_and_fetch_4: 1634 case Builtin::BI__sync_and_and_fetch_8: 1635 case Builtin::BI__sync_and_and_fetch_16: 1636 case Builtin::BI__sync_or_and_fetch: 1637 case Builtin::BI__sync_or_and_fetch_1: 1638 case Builtin::BI__sync_or_and_fetch_2: 1639 case Builtin::BI__sync_or_and_fetch_4: 1640 case Builtin::BI__sync_or_and_fetch_8: 1641 case Builtin::BI__sync_or_and_fetch_16: 1642 case Builtin::BI__sync_xor_and_fetch: 1643 case Builtin::BI__sync_xor_and_fetch_1: 1644 case Builtin::BI__sync_xor_and_fetch_2: 1645 case Builtin::BI__sync_xor_and_fetch_4: 1646 case Builtin::BI__sync_xor_and_fetch_8: 1647 case Builtin::BI__sync_xor_and_fetch_16: 1648 case Builtin::BI__sync_nand_and_fetch: 1649 case Builtin::BI__sync_nand_and_fetch_1: 1650 case Builtin::BI__sync_nand_and_fetch_2: 1651 case Builtin::BI__sync_nand_and_fetch_4: 1652 case Builtin::BI__sync_nand_and_fetch_8: 1653 case Builtin::BI__sync_nand_and_fetch_16: 1654 case Builtin::BI__sync_val_compare_and_swap: 1655 case Builtin::BI__sync_val_compare_and_swap_1: 1656 case Builtin::BI__sync_val_compare_and_swap_2: 1657 case Builtin::BI__sync_val_compare_and_swap_4: 1658 case Builtin::BI__sync_val_compare_and_swap_8: 1659 case Builtin::BI__sync_val_compare_and_swap_16: 1660 case Builtin::BI__sync_bool_compare_and_swap: 1661 case Builtin::BI__sync_bool_compare_and_swap_1: 1662 case Builtin::BI__sync_bool_compare_and_swap_2: 1663 case Builtin::BI__sync_bool_compare_and_swap_4: 1664 case Builtin::BI__sync_bool_compare_and_swap_8: 1665 case Builtin::BI__sync_bool_compare_and_swap_16: 1666 case Builtin::BI__sync_lock_test_and_set: 1667 case Builtin::BI__sync_lock_test_and_set_1: 1668 case Builtin::BI__sync_lock_test_and_set_2: 1669 case Builtin::BI__sync_lock_test_and_set_4: 1670 case Builtin::BI__sync_lock_test_and_set_8: 1671 case Builtin::BI__sync_lock_test_and_set_16: 1672 case Builtin::BI__sync_lock_release: 1673 case Builtin::BI__sync_lock_release_1: 1674 case Builtin::BI__sync_lock_release_2: 1675 case Builtin::BI__sync_lock_release_4: 1676 case Builtin::BI__sync_lock_release_8: 1677 case Builtin::BI__sync_lock_release_16: 1678 case Builtin::BI__sync_swap: 1679 case Builtin::BI__sync_swap_1: 1680 case Builtin::BI__sync_swap_2: 1681 case Builtin::BI__sync_swap_4: 1682 case Builtin::BI__sync_swap_8: 1683 case Builtin::BI__sync_swap_16: 1684 return SemaBuiltinAtomicOverloaded(TheCallResult); 1685 case Builtin::BI__sync_synchronize: 1686 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 1687 << TheCall->getCallee()->getSourceRange(); 1688 break; 1689 case Builtin::BI__builtin_nontemporal_load: 1690 case Builtin::BI__builtin_nontemporal_store: 1691 return SemaBuiltinNontemporalOverloaded(TheCallResult); 1692 case Builtin::BI__builtin_memcpy_inline: { 1693 clang::Expr *SizeOp = TheCall->getArg(2); 1694 // We warn about copying to or from `nullptr` pointers when `size` is 1695 // greater than 0. When `size` is value dependent we cannot evaluate its 1696 // value so we bail out. 1697 if (SizeOp->isValueDependent()) 1698 break; 1699 if (!SizeOp->EvaluateKnownConstInt(Context).isNullValue()) { 1700 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 1701 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc()); 1702 } 1703 break; 1704 } 1705 #define BUILTIN(ID, TYPE, ATTRS) 1706 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 1707 case Builtin::BI##ID: \ 1708 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 1709 #include "clang/Basic/Builtins.def" 1710 case Builtin::BI__annotation: 1711 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 1712 return ExprError(); 1713 break; 1714 case Builtin::BI__builtin_annotation: 1715 if (SemaBuiltinAnnotation(*this, TheCall)) 1716 return ExprError(); 1717 break; 1718 case Builtin::BI__builtin_addressof: 1719 if (SemaBuiltinAddressof(*this, TheCall)) 1720 return ExprError(); 1721 break; 1722 case Builtin::BI__builtin_is_aligned: 1723 case Builtin::BI__builtin_align_up: 1724 case Builtin::BI__builtin_align_down: 1725 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID)) 1726 return ExprError(); 1727 break; 1728 case Builtin::BI__builtin_add_overflow: 1729 case Builtin::BI__builtin_sub_overflow: 1730 case Builtin::BI__builtin_mul_overflow: 1731 if (SemaBuiltinOverflow(*this, TheCall)) 1732 return ExprError(); 1733 break; 1734 case Builtin::BI__builtin_operator_new: 1735 case Builtin::BI__builtin_operator_delete: { 1736 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 1737 ExprResult Res = 1738 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 1739 if (Res.isInvalid()) 1740 CorrectDelayedTyposInExpr(TheCallResult.get()); 1741 return Res; 1742 } 1743 case Builtin::BI__builtin_dump_struct: { 1744 // We first want to ensure we are called with 2 arguments 1745 if (checkArgCount(*this, TheCall, 2)) 1746 return ExprError(); 1747 // Ensure that the first argument is of type 'struct XX *' 1748 const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts(); 1749 const QualType PtrArgType = PtrArg->getType(); 1750 if (!PtrArgType->isPointerType() || 1751 !PtrArgType->getPointeeType()->isRecordType()) { 1752 Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1753 << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType 1754 << "structure pointer"; 1755 return ExprError(); 1756 } 1757 1758 // Ensure that the second argument is of type 'FunctionType' 1759 const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts(); 1760 const QualType FnPtrArgType = FnPtrArg->getType(); 1761 if (!FnPtrArgType->isPointerType()) { 1762 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1763 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1764 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1765 return ExprError(); 1766 } 1767 1768 const auto *FuncType = 1769 FnPtrArgType->getPointeeType()->getAs<FunctionType>(); 1770 1771 if (!FuncType) { 1772 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1773 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1774 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1775 return ExprError(); 1776 } 1777 1778 if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) { 1779 if (!FT->getNumParams()) { 1780 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1781 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1782 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1783 return ExprError(); 1784 } 1785 QualType PT = FT->getParamType(0); 1786 if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy || 1787 !PT->isPointerType() || !PT->getPointeeType()->isCharType() || 1788 !PT->getPointeeType().isConstQualified()) { 1789 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1790 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1791 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1792 return ExprError(); 1793 } 1794 } 1795 1796 TheCall->setType(Context.IntTy); 1797 break; 1798 } 1799 case Builtin::BI__builtin_preserve_access_index: 1800 if (SemaBuiltinPreserveAI(*this, TheCall)) 1801 return ExprError(); 1802 break; 1803 case Builtin::BI__builtin_call_with_static_chain: 1804 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 1805 return ExprError(); 1806 break; 1807 case Builtin::BI__exception_code: 1808 case Builtin::BI_exception_code: 1809 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 1810 diag::err_seh___except_block)) 1811 return ExprError(); 1812 break; 1813 case Builtin::BI__exception_info: 1814 case Builtin::BI_exception_info: 1815 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 1816 diag::err_seh___except_filter)) 1817 return ExprError(); 1818 break; 1819 case Builtin::BI__GetExceptionInfo: 1820 if (checkArgCount(*this, TheCall, 1)) 1821 return ExprError(); 1822 1823 if (CheckCXXThrowOperand( 1824 TheCall->getBeginLoc(), 1825 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 1826 TheCall)) 1827 return ExprError(); 1828 1829 TheCall->setType(Context.VoidPtrTy); 1830 break; 1831 // OpenCL v2.0, s6.13.16 - Pipe functions 1832 case Builtin::BIread_pipe: 1833 case Builtin::BIwrite_pipe: 1834 // Since those two functions are declared with var args, we need a semantic 1835 // check for the argument. 1836 if (SemaBuiltinRWPipe(*this, TheCall)) 1837 return ExprError(); 1838 break; 1839 case Builtin::BIreserve_read_pipe: 1840 case Builtin::BIreserve_write_pipe: 1841 case Builtin::BIwork_group_reserve_read_pipe: 1842 case Builtin::BIwork_group_reserve_write_pipe: 1843 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 1844 return ExprError(); 1845 break; 1846 case Builtin::BIsub_group_reserve_read_pipe: 1847 case Builtin::BIsub_group_reserve_write_pipe: 1848 if (checkOpenCLSubgroupExt(*this, TheCall) || 1849 SemaBuiltinReserveRWPipe(*this, TheCall)) 1850 return ExprError(); 1851 break; 1852 case Builtin::BIcommit_read_pipe: 1853 case Builtin::BIcommit_write_pipe: 1854 case Builtin::BIwork_group_commit_read_pipe: 1855 case Builtin::BIwork_group_commit_write_pipe: 1856 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 1857 return ExprError(); 1858 break; 1859 case Builtin::BIsub_group_commit_read_pipe: 1860 case Builtin::BIsub_group_commit_write_pipe: 1861 if (checkOpenCLSubgroupExt(*this, TheCall) || 1862 SemaBuiltinCommitRWPipe(*this, TheCall)) 1863 return ExprError(); 1864 break; 1865 case Builtin::BIget_pipe_num_packets: 1866 case Builtin::BIget_pipe_max_packets: 1867 if (SemaBuiltinPipePackets(*this, TheCall)) 1868 return ExprError(); 1869 break; 1870 case Builtin::BIto_global: 1871 case Builtin::BIto_local: 1872 case Builtin::BIto_private: 1873 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 1874 return ExprError(); 1875 break; 1876 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 1877 case Builtin::BIenqueue_kernel: 1878 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 1879 return ExprError(); 1880 break; 1881 case Builtin::BIget_kernel_work_group_size: 1882 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 1883 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 1884 return ExprError(); 1885 break; 1886 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 1887 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 1888 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 1889 return ExprError(); 1890 break; 1891 case Builtin::BI__builtin_os_log_format: 1892 Cleanup.setExprNeedsCleanups(true); 1893 LLVM_FALLTHROUGH; 1894 case Builtin::BI__builtin_os_log_format_buffer_size: 1895 if (SemaBuiltinOSLogFormat(TheCall)) 1896 return ExprError(); 1897 break; 1898 case Builtin::BI__builtin_frame_address: 1899 case Builtin::BI__builtin_return_address: 1900 if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF)) 1901 return ExprError(); 1902 1903 // -Wframe-address warning if non-zero passed to builtin 1904 // return/frame address. 1905 Expr::EvalResult Result; 1906 if (TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) && 1907 Result.Val.getInt() != 0) 1908 Diag(TheCall->getBeginLoc(), diag::warn_frame_address) 1909 << ((BuiltinID == Builtin::BI__builtin_return_address) 1910 ? "__builtin_return_address" 1911 : "__builtin_frame_address") 1912 << TheCall->getSourceRange(); 1913 break; 1914 } 1915 1916 // Since the target specific builtins for each arch overlap, only check those 1917 // of the arch we are compiling for. 1918 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 1919 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) { 1920 assert(Context.getAuxTargetInfo() && 1921 "Aux Target Builtin, but not an aux target?"); 1922 1923 if (CheckTSBuiltinFunctionCall( 1924 *Context.getAuxTargetInfo(), 1925 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall)) 1926 return ExprError(); 1927 } else { 1928 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID, 1929 TheCall)) 1930 return ExprError(); 1931 } 1932 } 1933 1934 return TheCallResult; 1935 } 1936 1937 // Get the valid immediate range for the specified NEON type code. 1938 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 1939 NeonTypeFlags Type(t); 1940 int IsQuad = ForceQuad ? true : Type.isQuad(); 1941 switch (Type.getEltType()) { 1942 case NeonTypeFlags::Int8: 1943 case NeonTypeFlags::Poly8: 1944 return shift ? 7 : (8 << IsQuad) - 1; 1945 case NeonTypeFlags::Int16: 1946 case NeonTypeFlags::Poly16: 1947 return shift ? 15 : (4 << IsQuad) - 1; 1948 case NeonTypeFlags::Int32: 1949 return shift ? 31 : (2 << IsQuad) - 1; 1950 case NeonTypeFlags::Int64: 1951 case NeonTypeFlags::Poly64: 1952 return shift ? 63 : (1 << IsQuad) - 1; 1953 case NeonTypeFlags::Poly128: 1954 return shift ? 127 : (1 << IsQuad) - 1; 1955 case NeonTypeFlags::Float16: 1956 assert(!shift && "cannot shift float types!"); 1957 return (4 << IsQuad) - 1; 1958 case NeonTypeFlags::Float32: 1959 assert(!shift && "cannot shift float types!"); 1960 return (2 << IsQuad) - 1; 1961 case NeonTypeFlags::Float64: 1962 assert(!shift && "cannot shift float types!"); 1963 return (1 << IsQuad) - 1; 1964 } 1965 llvm_unreachable("Invalid NeonTypeFlag!"); 1966 } 1967 1968 /// getNeonEltType - Return the QualType corresponding to the elements of 1969 /// the vector type specified by the NeonTypeFlags. This is used to check 1970 /// the pointer arguments for Neon load/store intrinsics. 1971 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 1972 bool IsPolyUnsigned, bool IsInt64Long) { 1973 switch (Flags.getEltType()) { 1974 case NeonTypeFlags::Int8: 1975 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 1976 case NeonTypeFlags::Int16: 1977 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 1978 case NeonTypeFlags::Int32: 1979 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 1980 case NeonTypeFlags::Int64: 1981 if (IsInt64Long) 1982 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 1983 else 1984 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 1985 : Context.LongLongTy; 1986 case NeonTypeFlags::Poly8: 1987 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 1988 case NeonTypeFlags::Poly16: 1989 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 1990 case NeonTypeFlags::Poly64: 1991 if (IsInt64Long) 1992 return Context.UnsignedLongTy; 1993 else 1994 return Context.UnsignedLongLongTy; 1995 case NeonTypeFlags::Poly128: 1996 break; 1997 case NeonTypeFlags::Float16: 1998 return Context.HalfTy; 1999 case NeonTypeFlags::Float32: 2000 return Context.FloatTy; 2001 case NeonTypeFlags::Float64: 2002 return Context.DoubleTy; 2003 } 2004 llvm_unreachable("Invalid NeonTypeFlag!"); 2005 } 2006 2007 bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2008 // Range check SVE intrinsics that take immediate values. 2009 SmallVector<std::tuple<int,int,int>, 3> ImmChecks; 2010 2011 switch (BuiltinID) { 2012 default: 2013 return false; 2014 #define GET_SVE_IMMEDIATE_CHECK 2015 #include "clang/Basic/arm_sve_sema_rangechecks.inc" 2016 #undef GET_SVE_IMMEDIATE_CHECK 2017 } 2018 2019 // Perform all the immediate checks for this builtin call. 2020 bool HasError = false; 2021 for (auto &I : ImmChecks) { 2022 int ArgNum, CheckTy, ElementSizeInBits; 2023 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I; 2024 2025 typedef bool(*OptionSetCheckFnTy)(int64_t Value); 2026 2027 // Function that checks whether the operand (ArgNum) is an immediate 2028 // that is one of the predefined values. 2029 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm, 2030 int ErrDiag) -> bool { 2031 // We can't check the value of a dependent argument. 2032 Expr *Arg = TheCall->getArg(ArgNum); 2033 if (Arg->isTypeDependent() || Arg->isValueDependent()) 2034 return false; 2035 2036 // Check constant-ness first. 2037 llvm::APSInt Imm; 2038 if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm)) 2039 return true; 2040 2041 if (!CheckImm(Imm.getSExtValue())) 2042 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange(); 2043 return false; 2044 }; 2045 2046 switch ((SVETypeFlags::ImmCheckType)CheckTy) { 2047 case SVETypeFlags::ImmCheck0_31: 2048 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31)) 2049 HasError = true; 2050 break; 2051 case SVETypeFlags::ImmCheck0_13: 2052 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13)) 2053 HasError = true; 2054 break; 2055 case SVETypeFlags::ImmCheck1_16: 2056 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16)) 2057 HasError = true; 2058 break; 2059 case SVETypeFlags::ImmCheck0_7: 2060 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7)) 2061 HasError = true; 2062 break; 2063 case SVETypeFlags::ImmCheckExtract: 2064 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2065 (2048 / ElementSizeInBits) - 1)) 2066 HasError = true; 2067 break; 2068 case SVETypeFlags::ImmCheckShiftRight: 2069 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits)) 2070 HasError = true; 2071 break; 2072 case SVETypeFlags::ImmCheckShiftRightNarrow: 2073 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 2074 ElementSizeInBits / 2)) 2075 HasError = true; 2076 break; 2077 case SVETypeFlags::ImmCheckShiftLeft: 2078 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2079 ElementSizeInBits - 1)) 2080 HasError = true; 2081 break; 2082 case SVETypeFlags::ImmCheckLaneIndex: 2083 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2084 (128 / (1 * ElementSizeInBits)) - 1)) 2085 HasError = true; 2086 break; 2087 case SVETypeFlags::ImmCheckLaneIndexCompRotate: 2088 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2089 (128 / (2 * ElementSizeInBits)) - 1)) 2090 HasError = true; 2091 break; 2092 case SVETypeFlags::ImmCheckLaneIndexDot: 2093 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2094 (128 / (4 * ElementSizeInBits)) - 1)) 2095 HasError = true; 2096 break; 2097 case SVETypeFlags::ImmCheckComplexRot90_270: 2098 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; }, 2099 diag::err_rotation_argument_to_cadd)) 2100 HasError = true; 2101 break; 2102 case SVETypeFlags::ImmCheckComplexRotAll90: 2103 if (CheckImmediateInSet( 2104 [](int64_t V) { 2105 return V == 0 || V == 90 || V == 180 || V == 270; 2106 }, 2107 diag::err_rotation_argument_to_cmla)) 2108 HasError = true; 2109 break; 2110 } 2111 } 2112 2113 return HasError; 2114 } 2115 2116 bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI, 2117 unsigned BuiltinID, CallExpr *TheCall) { 2118 llvm::APSInt Result; 2119 uint64_t mask = 0; 2120 unsigned TV = 0; 2121 int PtrArgNum = -1; 2122 bool HasConstPtr = false; 2123 switch (BuiltinID) { 2124 #define GET_NEON_OVERLOAD_CHECK 2125 #include "clang/Basic/arm_neon.inc" 2126 #include "clang/Basic/arm_fp16.inc" 2127 #undef GET_NEON_OVERLOAD_CHECK 2128 } 2129 2130 // For NEON intrinsics which are overloaded on vector element type, validate 2131 // the immediate which specifies which variant to emit. 2132 unsigned ImmArg = TheCall->getNumArgs()-1; 2133 if (mask) { 2134 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 2135 return true; 2136 2137 TV = Result.getLimitedValue(64); 2138 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 2139 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 2140 << TheCall->getArg(ImmArg)->getSourceRange(); 2141 } 2142 2143 if (PtrArgNum >= 0) { 2144 // Check that pointer arguments have the specified type. 2145 Expr *Arg = TheCall->getArg(PtrArgNum); 2146 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 2147 Arg = ICE->getSubExpr(); 2148 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 2149 QualType RHSTy = RHS.get()->getType(); 2150 2151 llvm::Triple::ArchType Arch = TI.getTriple().getArch(); 2152 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 2153 Arch == llvm::Triple::aarch64_32 || 2154 Arch == llvm::Triple::aarch64_be; 2155 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong; 2156 QualType EltTy = 2157 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 2158 if (HasConstPtr) 2159 EltTy = EltTy.withConst(); 2160 QualType LHSTy = Context.getPointerType(EltTy); 2161 AssignConvertType ConvTy; 2162 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 2163 if (RHS.isInvalid()) 2164 return true; 2165 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 2166 RHS.get(), AA_Assigning)) 2167 return true; 2168 } 2169 2170 // For NEON intrinsics which take an immediate value as part of the 2171 // instruction, range check them here. 2172 unsigned i = 0, l = 0, u = 0; 2173 switch (BuiltinID) { 2174 default: 2175 return false; 2176 #define GET_NEON_IMMEDIATE_CHECK 2177 #include "clang/Basic/arm_neon.inc" 2178 #include "clang/Basic/arm_fp16.inc" 2179 #undef GET_NEON_IMMEDIATE_CHECK 2180 } 2181 2182 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2183 } 2184 2185 bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2186 switch (BuiltinID) { 2187 default: 2188 return false; 2189 #include "clang/Basic/arm_mve_builtin_sema.inc" 2190 } 2191 } 2192 2193 bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2194 CallExpr *TheCall) { 2195 bool Err = false; 2196 switch (BuiltinID) { 2197 default: 2198 return false; 2199 #include "clang/Basic/arm_cde_builtin_sema.inc" 2200 } 2201 2202 if (Err) 2203 return true; 2204 2205 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true); 2206 } 2207 2208 bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI, 2209 const Expr *CoprocArg, bool WantCDE) { 2210 if (isConstantEvaluated()) 2211 return false; 2212 2213 // We can't check the value of a dependent argument. 2214 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent()) 2215 return false; 2216 2217 llvm::APSInt CoprocNoAP; 2218 bool IsICE = CoprocArg->isIntegerConstantExpr(CoprocNoAP, Context); 2219 (void)IsICE; 2220 assert(IsICE && "Coprocossor immediate is not a constant expression"); 2221 int64_t CoprocNo = CoprocNoAP.getExtValue(); 2222 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative"); 2223 2224 uint32_t CDECoprocMask = TI.getARMCDECoprocMask(); 2225 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo)); 2226 2227 if (IsCDECoproc != WantCDE) 2228 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc) 2229 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange(); 2230 2231 return false; 2232 } 2233 2234 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 2235 unsigned MaxWidth) { 2236 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 2237 BuiltinID == ARM::BI__builtin_arm_ldaex || 2238 BuiltinID == ARM::BI__builtin_arm_strex || 2239 BuiltinID == ARM::BI__builtin_arm_stlex || 2240 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2241 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2242 BuiltinID == AArch64::BI__builtin_arm_strex || 2243 BuiltinID == AArch64::BI__builtin_arm_stlex) && 2244 "unexpected ARM builtin"); 2245 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 2246 BuiltinID == ARM::BI__builtin_arm_ldaex || 2247 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2248 BuiltinID == AArch64::BI__builtin_arm_ldaex; 2249 2250 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 2251 2252 // Ensure that we have the proper number of arguments. 2253 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 2254 return true; 2255 2256 // Inspect the pointer argument of the atomic builtin. This should always be 2257 // a pointer type, whose element is an integral scalar or pointer type. 2258 // Because it is a pointer type, we don't have to worry about any implicit 2259 // casts here. 2260 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 2261 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 2262 if (PointerArgRes.isInvalid()) 2263 return true; 2264 PointerArg = PointerArgRes.get(); 2265 2266 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 2267 if (!pointerType) { 2268 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 2269 << PointerArg->getType() << PointerArg->getSourceRange(); 2270 return true; 2271 } 2272 2273 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 2274 // task is to insert the appropriate casts into the AST. First work out just 2275 // what the appropriate type is. 2276 QualType ValType = pointerType->getPointeeType(); 2277 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 2278 if (IsLdrex) 2279 AddrType.addConst(); 2280 2281 // Issue a warning if the cast is dodgy. 2282 CastKind CastNeeded = CK_NoOp; 2283 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 2284 CastNeeded = CK_BitCast; 2285 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 2286 << PointerArg->getType() << Context.getPointerType(AddrType) 2287 << AA_Passing << PointerArg->getSourceRange(); 2288 } 2289 2290 // Finally, do the cast and replace the argument with the corrected version. 2291 AddrType = Context.getPointerType(AddrType); 2292 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 2293 if (PointerArgRes.isInvalid()) 2294 return true; 2295 PointerArg = PointerArgRes.get(); 2296 2297 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 2298 2299 // In general, we allow ints, floats and pointers to be loaded and stored. 2300 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 2301 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 2302 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 2303 << PointerArg->getType() << PointerArg->getSourceRange(); 2304 return true; 2305 } 2306 2307 // But ARM doesn't have instructions to deal with 128-bit versions. 2308 if (Context.getTypeSize(ValType) > MaxWidth) { 2309 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 2310 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 2311 << PointerArg->getType() << PointerArg->getSourceRange(); 2312 return true; 2313 } 2314 2315 switch (ValType.getObjCLifetime()) { 2316 case Qualifiers::OCL_None: 2317 case Qualifiers::OCL_ExplicitNone: 2318 // okay 2319 break; 2320 2321 case Qualifiers::OCL_Weak: 2322 case Qualifiers::OCL_Strong: 2323 case Qualifiers::OCL_Autoreleasing: 2324 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 2325 << ValType << PointerArg->getSourceRange(); 2326 return true; 2327 } 2328 2329 if (IsLdrex) { 2330 TheCall->setType(ValType); 2331 return false; 2332 } 2333 2334 // Initialize the argument to be stored. 2335 ExprResult ValArg = TheCall->getArg(0); 2336 InitializedEntity Entity = InitializedEntity::InitializeParameter( 2337 Context, ValType, /*consume*/ false); 2338 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 2339 if (ValArg.isInvalid()) 2340 return true; 2341 TheCall->setArg(0, ValArg.get()); 2342 2343 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 2344 // but the custom checker bypasses all default analysis. 2345 TheCall->setType(Context.IntTy); 2346 return false; 2347 } 2348 2349 bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2350 CallExpr *TheCall) { 2351 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 2352 BuiltinID == ARM::BI__builtin_arm_ldaex || 2353 BuiltinID == ARM::BI__builtin_arm_strex || 2354 BuiltinID == ARM::BI__builtin_arm_stlex) { 2355 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 2356 } 2357 2358 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 2359 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 2360 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 2361 } 2362 2363 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 2364 BuiltinID == ARM::BI__builtin_arm_wsr64) 2365 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 2366 2367 if (BuiltinID == ARM::BI__builtin_arm_rsr || 2368 BuiltinID == ARM::BI__builtin_arm_rsrp || 2369 BuiltinID == ARM::BI__builtin_arm_wsr || 2370 BuiltinID == ARM::BI__builtin_arm_wsrp) 2371 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2372 2373 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2374 return true; 2375 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall)) 2376 return true; 2377 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2378 return true; 2379 2380 // For intrinsics which take an immediate value as part of the instruction, 2381 // range check them here. 2382 // FIXME: VFP Intrinsics should error if VFP not present. 2383 switch (BuiltinID) { 2384 default: return false; 2385 case ARM::BI__builtin_arm_ssat: 2386 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 2387 case ARM::BI__builtin_arm_usat: 2388 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 2389 case ARM::BI__builtin_arm_ssat16: 2390 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 2391 case ARM::BI__builtin_arm_usat16: 2392 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 2393 case ARM::BI__builtin_arm_vcvtr_f: 2394 case ARM::BI__builtin_arm_vcvtr_d: 2395 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 2396 case ARM::BI__builtin_arm_dmb: 2397 case ARM::BI__builtin_arm_dsb: 2398 case ARM::BI__builtin_arm_isb: 2399 case ARM::BI__builtin_arm_dbg: 2400 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 2401 case ARM::BI__builtin_arm_cdp: 2402 case ARM::BI__builtin_arm_cdp2: 2403 case ARM::BI__builtin_arm_mcr: 2404 case ARM::BI__builtin_arm_mcr2: 2405 case ARM::BI__builtin_arm_mrc: 2406 case ARM::BI__builtin_arm_mrc2: 2407 case ARM::BI__builtin_arm_mcrr: 2408 case ARM::BI__builtin_arm_mcrr2: 2409 case ARM::BI__builtin_arm_mrrc: 2410 case ARM::BI__builtin_arm_mrrc2: 2411 case ARM::BI__builtin_arm_ldc: 2412 case ARM::BI__builtin_arm_ldcl: 2413 case ARM::BI__builtin_arm_ldc2: 2414 case ARM::BI__builtin_arm_ldc2l: 2415 case ARM::BI__builtin_arm_stc: 2416 case ARM::BI__builtin_arm_stcl: 2417 case ARM::BI__builtin_arm_stc2: 2418 case ARM::BI__builtin_arm_stc2l: 2419 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) || 2420 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), 2421 /*WantCDE*/ false); 2422 } 2423 } 2424 2425 bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, 2426 unsigned BuiltinID, 2427 CallExpr *TheCall) { 2428 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 2429 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2430 BuiltinID == AArch64::BI__builtin_arm_strex || 2431 BuiltinID == AArch64::BI__builtin_arm_stlex) { 2432 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 2433 } 2434 2435 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 2436 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 2437 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || 2438 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 2439 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 2440 } 2441 2442 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 2443 BuiltinID == AArch64::BI__builtin_arm_wsr64) 2444 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2445 2446 // Memory Tagging Extensions (MTE) Intrinsics 2447 if (BuiltinID == AArch64::BI__builtin_arm_irg || 2448 BuiltinID == AArch64::BI__builtin_arm_addg || 2449 BuiltinID == AArch64::BI__builtin_arm_gmi || 2450 BuiltinID == AArch64::BI__builtin_arm_ldg || 2451 BuiltinID == AArch64::BI__builtin_arm_stg || 2452 BuiltinID == AArch64::BI__builtin_arm_subp) { 2453 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 2454 } 2455 2456 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 2457 BuiltinID == AArch64::BI__builtin_arm_rsrp || 2458 BuiltinID == AArch64::BI__builtin_arm_wsr || 2459 BuiltinID == AArch64::BI__builtin_arm_wsrp) 2460 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2461 2462 // Only check the valid encoding range. Any constant in this range would be 2463 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 2464 // an exception for incorrect registers. This matches MSVC behavior. 2465 if (BuiltinID == AArch64::BI_ReadStatusReg || 2466 BuiltinID == AArch64::BI_WriteStatusReg) 2467 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 2468 2469 if (BuiltinID == AArch64::BI__getReg) 2470 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 2471 2472 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2473 return true; 2474 2475 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall)) 2476 return true; 2477 2478 // For intrinsics which take an immediate value as part of the instruction, 2479 // range check them here. 2480 unsigned i = 0, l = 0, u = 0; 2481 switch (BuiltinID) { 2482 default: return false; 2483 case AArch64::BI__builtin_arm_dmb: 2484 case AArch64::BI__builtin_arm_dsb: 2485 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 2486 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break; 2487 } 2488 2489 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2490 } 2491 2492 bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID, 2493 CallExpr *TheCall) { 2494 assert((BuiltinID == BPF::BI__builtin_preserve_field_info || 2495 BuiltinID == BPF::BI__builtin_btf_type_id) && 2496 "unexpected ARM builtin"); 2497 2498 if (checkArgCount(*this, TheCall, 2)) 2499 return true; 2500 2501 Expr *Arg; 2502 if (BuiltinID == BPF::BI__builtin_btf_type_id) { 2503 // The second argument needs to be a constant int 2504 llvm::APSInt Value; 2505 Arg = TheCall->getArg(1); 2506 if (!Arg->isIntegerConstantExpr(Value, Context)) { 2507 Diag(Arg->getBeginLoc(), diag::err_btf_type_id_not_const) 2508 << 2 << Arg->getSourceRange(); 2509 return true; 2510 } 2511 2512 TheCall->setType(Context.UnsignedIntTy); 2513 return false; 2514 } 2515 2516 // The first argument needs to be a record field access. 2517 // If it is an array element access, we delay decision 2518 // to BPF backend to check whether the access is a 2519 // field access or not. 2520 Arg = TheCall->getArg(0); 2521 if (Arg->getType()->getAsPlaceholderType() || 2522 (Arg->IgnoreParens()->getObjectKind() != OK_BitField && 2523 !dyn_cast<MemberExpr>(Arg->IgnoreParens()) && 2524 !dyn_cast<ArraySubscriptExpr>(Arg->IgnoreParens()))) { 2525 Diag(Arg->getBeginLoc(), diag::err_preserve_field_info_not_field) 2526 << 1 << Arg->getSourceRange(); 2527 return true; 2528 } 2529 2530 // The second argument needs to be a constant int 2531 Arg = TheCall->getArg(1); 2532 llvm::APSInt Value; 2533 if (!Arg->isIntegerConstantExpr(Value, Context)) { 2534 Diag(Arg->getBeginLoc(), diag::err_preserve_field_info_not_const) 2535 << 2 << Arg->getSourceRange(); 2536 return true; 2537 } 2538 2539 TheCall->setType(Context.UnsignedIntTy); 2540 return false; 2541 } 2542 2543 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 2544 struct ArgInfo { 2545 uint8_t OpNum; 2546 bool IsSigned; 2547 uint8_t BitWidth; 2548 uint8_t Align; 2549 }; 2550 struct BuiltinInfo { 2551 unsigned BuiltinID; 2552 ArgInfo Infos[2]; 2553 }; 2554 2555 static BuiltinInfo Infos[] = { 2556 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 2557 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 2558 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 2559 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} }, 2560 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 2561 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 2562 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 2563 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 2564 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 2565 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 2566 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 2567 2568 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 2569 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 2570 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 2571 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 2572 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 2573 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 2574 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 2575 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 2576 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 2577 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 2578 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 2579 2580 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 2581 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 2582 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 2583 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 2584 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 2585 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 2586 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 2587 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 2588 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 2589 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 2590 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 2591 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 2592 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 2593 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 2594 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 2595 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 2596 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 2597 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 2598 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 2599 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 2600 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 2601 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 2602 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 2603 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 2604 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 2605 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 2606 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 2607 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 2608 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 2609 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 2610 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 2611 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 2612 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 2613 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 2614 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 2615 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 2616 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 2617 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 2618 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 2619 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 2620 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 2621 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 2622 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 2623 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 2624 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 2625 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 2626 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 2627 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 2628 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 2629 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 2630 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 2631 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 2632 {{ 1, false, 6, 0 }} }, 2633 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 2634 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 2635 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 2636 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 2637 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 2638 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 2639 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 2640 {{ 1, false, 5, 0 }} }, 2641 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 2642 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 2643 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 2644 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 2645 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 2646 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 2647 { 2, false, 5, 0 }} }, 2648 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 2649 { 2, false, 6, 0 }} }, 2650 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 2651 { 3, false, 5, 0 }} }, 2652 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 2653 { 3, false, 6, 0 }} }, 2654 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 2655 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 2656 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 2657 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 2658 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 2659 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 2660 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 2661 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 2662 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 2663 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 2664 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 2665 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 2666 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 2667 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 2668 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 2669 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 2670 {{ 2, false, 4, 0 }, 2671 { 3, false, 5, 0 }} }, 2672 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 2673 {{ 2, false, 4, 0 }, 2674 { 3, false, 5, 0 }} }, 2675 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 2676 {{ 2, false, 4, 0 }, 2677 { 3, false, 5, 0 }} }, 2678 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 2679 {{ 2, false, 4, 0 }, 2680 { 3, false, 5, 0 }} }, 2681 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 2682 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 2683 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 2684 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 2685 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 2686 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 2687 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 2688 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 2689 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 2690 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 2691 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 2692 { 2, false, 5, 0 }} }, 2693 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 2694 { 2, false, 6, 0 }} }, 2695 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 2696 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 2697 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 2698 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 2699 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 2700 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 2701 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 2702 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 2703 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 2704 {{ 1, false, 4, 0 }} }, 2705 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 2706 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 2707 {{ 1, false, 4, 0 }} }, 2708 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 2709 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 2710 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 2711 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 2712 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 2713 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 2714 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 2715 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 2716 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 2717 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 2718 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 2719 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 2720 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 2721 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 2722 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 2723 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 2724 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 2725 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 2726 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 2727 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 2728 {{ 3, false, 1, 0 }} }, 2729 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 2730 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 2731 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 2732 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 2733 {{ 3, false, 1, 0 }} }, 2734 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 2735 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 2736 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 2737 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 2738 {{ 3, false, 1, 0 }} }, 2739 }; 2740 2741 // Use a dynamically initialized static to sort the table exactly once on 2742 // first run. 2743 static const bool SortOnce = 2744 (llvm::sort(Infos, 2745 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 2746 return LHS.BuiltinID < RHS.BuiltinID; 2747 }), 2748 true); 2749 (void)SortOnce; 2750 2751 const BuiltinInfo *F = llvm::partition_point( 2752 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; }); 2753 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 2754 return false; 2755 2756 bool Error = false; 2757 2758 for (const ArgInfo &A : F->Infos) { 2759 // Ignore empty ArgInfo elements. 2760 if (A.BitWidth == 0) 2761 continue; 2762 2763 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 2764 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 2765 if (!A.Align) { 2766 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 2767 } else { 2768 unsigned M = 1 << A.Align; 2769 Min *= M; 2770 Max *= M; 2771 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max) | 2772 SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 2773 } 2774 } 2775 return Error; 2776 } 2777 2778 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 2779 CallExpr *TheCall) { 2780 return CheckHexagonBuiltinArgument(BuiltinID, TheCall); 2781 } 2782 2783 bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI, 2784 unsigned BuiltinID, CallExpr *TheCall) { 2785 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) || 2786 CheckMipsBuiltinArgument(BuiltinID, TheCall); 2787 } 2788 2789 bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, 2790 CallExpr *TheCall) { 2791 2792 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID && 2793 BuiltinID <= Mips::BI__builtin_mips_lwx) { 2794 if (!TI.hasFeature("dsp")) 2795 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp); 2796 } 2797 2798 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID && 2799 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) { 2800 if (!TI.hasFeature("dspr2")) 2801 return Diag(TheCall->getBeginLoc(), 2802 diag::err_mips_builtin_requires_dspr2); 2803 } 2804 2805 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID && 2806 BuiltinID <= Mips::BI__builtin_msa_xori_b) { 2807 if (!TI.hasFeature("msa")) 2808 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa); 2809 } 2810 2811 return false; 2812 } 2813 2814 // CheckMipsBuiltinArgument - Checks the constant value passed to the 2815 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The 2816 // ordering for DSP is unspecified. MSA is ordered by the data format used 2817 // by the underlying instruction i.e., df/m, df/n and then by size. 2818 // 2819 // FIXME: The size tests here should instead be tablegen'd along with the 2820 // definitions from include/clang/Basic/BuiltinsMips.def. 2821 // FIXME: GCC is strict on signedness for some of these intrinsics, we should 2822 // be too. 2823 bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 2824 unsigned i = 0, l = 0, u = 0, m = 0; 2825 switch (BuiltinID) { 2826 default: return false; 2827 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 2828 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 2829 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 2830 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 2831 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 2832 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 2833 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 2834 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the 2835 // df/m field. 2836 // These intrinsics take an unsigned 3 bit immediate. 2837 case Mips::BI__builtin_msa_bclri_b: 2838 case Mips::BI__builtin_msa_bnegi_b: 2839 case Mips::BI__builtin_msa_bseti_b: 2840 case Mips::BI__builtin_msa_sat_s_b: 2841 case Mips::BI__builtin_msa_sat_u_b: 2842 case Mips::BI__builtin_msa_slli_b: 2843 case Mips::BI__builtin_msa_srai_b: 2844 case Mips::BI__builtin_msa_srari_b: 2845 case Mips::BI__builtin_msa_srli_b: 2846 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 2847 case Mips::BI__builtin_msa_binsli_b: 2848 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 2849 // These intrinsics take an unsigned 4 bit immediate. 2850 case Mips::BI__builtin_msa_bclri_h: 2851 case Mips::BI__builtin_msa_bnegi_h: 2852 case Mips::BI__builtin_msa_bseti_h: 2853 case Mips::BI__builtin_msa_sat_s_h: 2854 case Mips::BI__builtin_msa_sat_u_h: 2855 case Mips::BI__builtin_msa_slli_h: 2856 case Mips::BI__builtin_msa_srai_h: 2857 case Mips::BI__builtin_msa_srari_h: 2858 case Mips::BI__builtin_msa_srli_h: 2859 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 2860 case Mips::BI__builtin_msa_binsli_h: 2861 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 2862 // These intrinsics take an unsigned 5 bit immediate. 2863 // The first block of intrinsics actually have an unsigned 5 bit field, 2864 // not a df/n field. 2865 case Mips::BI__builtin_msa_cfcmsa: 2866 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; 2867 case Mips::BI__builtin_msa_clei_u_b: 2868 case Mips::BI__builtin_msa_clei_u_h: 2869 case Mips::BI__builtin_msa_clei_u_w: 2870 case Mips::BI__builtin_msa_clei_u_d: 2871 case Mips::BI__builtin_msa_clti_u_b: 2872 case Mips::BI__builtin_msa_clti_u_h: 2873 case Mips::BI__builtin_msa_clti_u_w: 2874 case Mips::BI__builtin_msa_clti_u_d: 2875 case Mips::BI__builtin_msa_maxi_u_b: 2876 case Mips::BI__builtin_msa_maxi_u_h: 2877 case Mips::BI__builtin_msa_maxi_u_w: 2878 case Mips::BI__builtin_msa_maxi_u_d: 2879 case Mips::BI__builtin_msa_mini_u_b: 2880 case Mips::BI__builtin_msa_mini_u_h: 2881 case Mips::BI__builtin_msa_mini_u_w: 2882 case Mips::BI__builtin_msa_mini_u_d: 2883 case Mips::BI__builtin_msa_addvi_b: 2884 case Mips::BI__builtin_msa_addvi_h: 2885 case Mips::BI__builtin_msa_addvi_w: 2886 case Mips::BI__builtin_msa_addvi_d: 2887 case Mips::BI__builtin_msa_bclri_w: 2888 case Mips::BI__builtin_msa_bnegi_w: 2889 case Mips::BI__builtin_msa_bseti_w: 2890 case Mips::BI__builtin_msa_sat_s_w: 2891 case Mips::BI__builtin_msa_sat_u_w: 2892 case Mips::BI__builtin_msa_slli_w: 2893 case Mips::BI__builtin_msa_srai_w: 2894 case Mips::BI__builtin_msa_srari_w: 2895 case Mips::BI__builtin_msa_srli_w: 2896 case Mips::BI__builtin_msa_srlri_w: 2897 case Mips::BI__builtin_msa_subvi_b: 2898 case Mips::BI__builtin_msa_subvi_h: 2899 case Mips::BI__builtin_msa_subvi_w: 2900 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 2901 case Mips::BI__builtin_msa_binsli_w: 2902 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 2903 // These intrinsics take an unsigned 6 bit immediate. 2904 case Mips::BI__builtin_msa_bclri_d: 2905 case Mips::BI__builtin_msa_bnegi_d: 2906 case Mips::BI__builtin_msa_bseti_d: 2907 case Mips::BI__builtin_msa_sat_s_d: 2908 case Mips::BI__builtin_msa_sat_u_d: 2909 case Mips::BI__builtin_msa_slli_d: 2910 case Mips::BI__builtin_msa_srai_d: 2911 case Mips::BI__builtin_msa_srari_d: 2912 case Mips::BI__builtin_msa_srli_d: 2913 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 2914 case Mips::BI__builtin_msa_binsli_d: 2915 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 2916 // These intrinsics take a signed 5 bit immediate. 2917 case Mips::BI__builtin_msa_ceqi_b: 2918 case Mips::BI__builtin_msa_ceqi_h: 2919 case Mips::BI__builtin_msa_ceqi_w: 2920 case Mips::BI__builtin_msa_ceqi_d: 2921 case Mips::BI__builtin_msa_clti_s_b: 2922 case Mips::BI__builtin_msa_clti_s_h: 2923 case Mips::BI__builtin_msa_clti_s_w: 2924 case Mips::BI__builtin_msa_clti_s_d: 2925 case Mips::BI__builtin_msa_clei_s_b: 2926 case Mips::BI__builtin_msa_clei_s_h: 2927 case Mips::BI__builtin_msa_clei_s_w: 2928 case Mips::BI__builtin_msa_clei_s_d: 2929 case Mips::BI__builtin_msa_maxi_s_b: 2930 case Mips::BI__builtin_msa_maxi_s_h: 2931 case Mips::BI__builtin_msa_maxi_s_w: 2932 case Mips::BI__builtin_msa_maxi_s_d: 2933 case Mips::BI__builtin_msa_mini_s_b: 2934 case Mips::BI__builtin_msa_mini_s_h: 2935 case Mips::BI__builtin_msa_mini_s_w: 2936 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 2937 // These intrinsics take an unsigned 8 bit immediate. 2938 case Mips::BI__builtin_msa_andi_b: 2939 case Mips::BI__builtin_msa_nori_b: 2940 case Mips::BI__builtin_msa_ori_b: 2941 case Mips::BI__builtin_msa_shf_b: 2942 case Mips::BI__builtin_msa_shf_h: 2943 case Mips::BI__builtin_msa_shf_w: 2944 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 2945 case Mips::BI__builtin_msa_bseli_b: 2946 case Mips::BI__builtin_msa_bmnzi_b: 2947 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 2948 // df/n format 2949 // These intrinsics take an unsigned 4 bit immediate. 2950 case Mips::BI__builtin_msa_copy_s_b: 2951 case Mips::BI__builtin_msa_copy_u_b: 2952 case Mips::BI__builtin_msa_insve_b: 2953 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 2954 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 2955 // These intrinsics take an unsigned 3 bit immediate. 2956 case Mips::BI__builtin_msa_copy_s_h: 2957 case Mips::BI__builtin_msa_copy_u_h: 2958 case Mips::BI__builtin_msa_insve_h: 2959 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 2960 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 2961 // These intrinsics take an unsigned 2 bit immediate. 2962 case Mips::BI__builtin_msa_copy_s_w: 2963 case Mips::BI__builtin_msa_copy_u_w: 2964 case Mips::BI__builtin_msa_insve_w: 2965 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 2966 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 2967 // These intrinsics take an unsigned 1 bit immediate. 2968 case Mips::BI__builtin_msa_copy_s_d: 2969 case Mips::BI__builtin_msa_copy_u_d: 2970 case Mips::BI__builtin_msa_insve_d: 2971 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 2972 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 2973 // Memory offsets and immediate loads. 2974 // These intrinsics take a signed 10 bit immediate. 2975 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 2976 case Mips::BI__builtin_msa_ldi_h: 2977 case Mips::BI__builtin_msa_ldi_w: 2978 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 2979 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 2980 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 2981 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 2982 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 2983 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break; 2984 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break; 2985 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 2986 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 2987 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 2988 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 2989 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break; 2990 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break; 2991 } 2992 2993 if (!m) 2994 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 2995 2996 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 2997 SemaBuiltinConstantArgMultiple(TheCall, i, m); 2998 } 2999 3000 bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3001 CallExpr *TheCall) { 3002 unsigned i = 0, l = 0, u = 0; 3003 bool Is64BitBltin = BuiltinID == PPC::BI__builtin_divde || 3004 BuiltinID == PPC::BI__builtin_divdeu || 3005 BuiltinID == PPC::BI__builtin_bpermd; 3006 bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64; 3007 bool IsBltinExtDiv = BuiltinID == PPC::BI__builtin_divwe || 3008 BuiltinID == PPC::BI__builtin_divweu || 3009 BuiltinID == PPC::BI__builtin_divde || 3010 BuiltinID == PPC::BI__builtin_divdeu; 3011 3012 if (Is64BitBltin && !IsTarget64Bit) 3013 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 3014 << TheCall->getSourceRange(); 3015 3016 if ((IsBltinExtDiv && !TI.hasFeature("extdiv")) || 3017 (BuiltinID == PPC::BI__builtin_bpermd && !TI.hasFeature("bpermd"))) 3018 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7) 3019 << TheCall->getSourceRange(); 3020 3021 auto SemaVSXCheck = [&](CallExpr *TheCall) -> bool { 3022 if (!TI.hasFeature("vsx")) 3023 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7) 3024 << TheCall->getSourceRange(); 3025 return false; 3026 }; 3027 3028 switch (BuiltinID) { 3029 default: return false; 3030 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 3031 case PPC::BI__builtin_altivec_crypto_vshasigmad: 3032 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3033 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3034 case PPC::BI__builtin_altivec_dss: 3035 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3); 3036 case PPC::BI__builtin_tbegin: 3037 case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break; 3038 case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break; 3039 case PPC::BI__builtin_tabortwc: 3040 case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break; 3041 case PPC::BI__builtin_tabortwci: 3042 case PPC::BI__builtin_tabortdci: 3043 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 3044 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); 3045 case PPC::BI__builtin_altivec_dst: 3046 case PPC::BI__builtin_altivec_dstt: 3047 case PPC::BI__builtin_altivec_dstst: 3048 case PPC::BI__builtin_altivec_dststt: 3049 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 3050 case PPC::BI__builtin_vsx_xxpermdi: 3051 case PPC::BI__builtin_vsx_xxsldwi: 3052 return SemaBuiltinVSX(TheCall); 3053 case PPC::BI__builtin_unpack_vector_int128: 3054 return SemaVSXCheck(TheCall) || 3055 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3056 case PPC::BI__builtin_pack_vector_int128: 3057 return SemaVSXCheck(TheCall); 3058 } 3059 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3060 } 3061 3062 bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, 3063 CallExpr *TheCall) { 3064 switch (BuiltinID) { 3065 case AMDGPU::BI__builtin_amdgcn_fence: { 3066 ExprResult Arg = TheCall->getArg(0); 3067 auto ArgExpr = Arg.get(); 3068 Expr::EvalResult ArgResult; 3069 3070 if (!ArgExpr->EvaluateAsInt(ArgResult, Context)) 3071 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int) 3072 << ArgExpr->getType(); 3073 int ord = ArgResult.Val.getInt().getZExtValue(); 3074 3075 // Check valididty of memory ordering as per C11 / C++11's memody model. 3076 switch (static_cast<llvm::AtomicOrderingCABI>(ord)) { 3077 case llvm::AtomicOrderingCABI::acquire: 3078 case llvm::AtomicOrderingCABI::release: 3079 case llvm::AtomicOrderingCABI::acq_rel: 3080 case llvm::AtomicOrderingCABI::seq_cst: 3081 break; 3082 default: { 3083 return Diag(ArgExpr->getBeginLoc(), 3084 diag::warn_atomic_op_has_invalid_memory_order) 3085 << ArgExpr->getSourceRange(); 3086 } 3087 } 3088 3089 Arg = TheCall->getArg(1); 3090 ArgExpr = Arg.get(); 3091 Expr::EvalResult ArgResult1; 3092 // Check that sync scope is a constant literal 3093 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Expr::EvaluateForCodeGen, 3094 Context)) 3095 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal) 3096 << ArgExpr->getType(); 3097 } break; 3098 } 3099 return false; 3100 } 3101 3102 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 3103 CallExpr *TheCall) { 3104 if (BuiltinID == SystemZ::BI__builtin_tabort) { 3105 Expr *Arg = TheCall->getArg(0); 3106 llvm::APSInt AbortCode(32); 3107 if (Arg->isIntegerConstantExpr(AbortCode, Context) && 3108 AbortCode.getSExtValue() >= 0 && AbortCode.getSExtValue() < 256) 3109 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 3110 << Arg->getSourceRange(); 3111 } 3112 3113 // For intrinsics which take an immediate value as part of the instruction, 3114 // range check them here. 3115 unsigned i = 0, l = 0, u = 0; 3116 switch (BuiltinID) { 3117 default: return false; 3118 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 3119 case SystemZ::BI__builtin_s390_verimb: 3120 case SystemZ::BI__builtin_s390_verimh: 3121 case SystemZ::BI__builtin_s390_verimf: 3122 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 3123 case SystemZ::BI__builtin_s390_vfaeb: 3124 case SystemZ::BI__builtin_s390_vfaeh: 3125 case SystemZ::BI__builtin_s390_vfaef: 3126 case SystemZ::BI__builtin_s390_vfaebs: 3127 case SystemZ::BI__builtin_s390_vfaehs: 3128 case SystemZ::BI__builtin_s390_vfaefs: 3129 case SystemZ::BI__builtin_s390_vfaezb: 3130 case SystemZ::BI__builtin_s390_vfaezh: 3131 case SystemZ::BI__builtin_s390_vfaezf: 3132 case SystemZ::BI__builtin_s390_vfaezbs: 3133 case SystemZ::BI__builtin_s390_vfaezhs: 3134 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 3135 case SystemZ::BI__builtin_s390_vfisb: 3136 case SystemZ::BI__builtin_s390_vfidb: 3137 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 3138 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3139 case SystemZ::BI__builtin_s390_vftcisb: 3140 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 3141 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 3142 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 3143 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 3144 case SystemZ::BI__builtin_s390_vstrcb: 3145 case SystemZ::BI__builtin_s390_vstrch: 3146 case SystemZ::BI__builtin_s390_vstrcf: 3147 case SystemZ::BI__builtin_s390_vstrczb: 3148 case SystemZ::BI__builtin_s390_vstrczh: 3149 case SystemZ::BI__builtin_s390_vstrczf: 3150 case SystemZ::BI__builtin_s390_vstrcbs: 3151 case SystemZ::BI__builtin_s390_vstrchs: 3152 case SystemZ::BI__builtin_s390_vstrcfs: 3153 case SystemZ::BI__builtin_s390_vstrczbs: 3154 case SystemZ::BI__builtin_s390_vstrczhs: 3155 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 3156 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 3157 case SystemZ::BI__builtin_s390_vfminsb: 3158 case SystemZ::BI__builtin_s390_vfmaxsb: 3159 case SystemZ::BI__builtin_s390_vfmindb: 3160 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 3161 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break; 3162 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break; 3163 } 3164 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3165 } 3166 3167 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 3168 /// This checks that the target supports __builtin_cpu_supports and 3169 /// that the string argument is constant and valid. 3170 static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI, 3171 CallExpr *TheCall) { 3172 Expr *Arg = TheCall->getArg(0); 3173 3174 // Check if the argument is a string literal. 3175 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3176 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3177 << Arg->getSourceRange(); 3178 3179 // Check the contents of the string. 3180 StringRef Feature = 3181 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3182 if (!TI.validateCpuSupports(Feature)) 3183 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 3184 << Arg->getSourceRange(); 3185 return false; 3186 } 3187 3188 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 3189 /// This checks that the target supports __builtin_cpu_is and 3190 /// that the string argument is constant and valid. 3191 static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) { 3192 Expr *Arg = TheCall->getArg(0); 3193 3194 // Check if the argument is a string literal. 3195 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3196 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3197 << Arg->getSourceRange(); 3198 3199 // Check the contents of the string. 3200 StringRef Feature = 3201 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3202 if (!TI.validateCpuIs(Feature)) 3203 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 3204 << Arg->getSourceRange(); 3205 return false; 3206 } 3207 3208 // Check if the rounding mode is legal. 3209 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 3210 // Indicates if this instruction has rounding control or just SAE. 3211 bool HasRC = false; 3212 3213 unsigned ArgNum = 0; 3214 switch (BuiltinID) { 3215 default: 3216 return false; 3217 case X86::BI__builtin_ia32_vcvttsd2si32: 3218 case X86::BI__builtin_ia32_vcvttsd2si64: 3219 case X86::BI__builtin_ia32_vcvttsd2usi32: 3220 case X86::BI__builtin_ia32_vcvttsd2usi64: 3221 case X86::BI__builtin_ia32_vcvttss2si32: 3222 case X86::BI__builtin_ia32_vcvttss2si64: 3223 case X86::BI__builtin_ia32_vcvttss2usi32: 3224 case X86::BI__builtin_ia32_vcvttss2usi64: 3225 ArgNum = 1; 3226 break; 3227 case X86::BI__builtin_ia32_maxpd512: 3228 case X86::BI__builtin_ia32_maxps512: 3229 case X86::BI__builtin_ia32_minpd512: 3230 case X86::BI__builtin_ia32_minps512: 3231 ArgNum = 2; 3232 break; 3233 case X86::BI__builtin_ia32_cvtps2pd512_mask: 3234 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 3235 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 3236 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 3237 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 3238 case X86::BI__builtin_ia32_cvttps2dq512_mask: 3239 case X86::BI__builtin_ia32_cvttps2qq512_mask: 3240 case X86::BI__builtin_ia32_cvttps2udq512_mask: 3241 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 3242 case X86::BI__builtin_ia32_exp2pd_mask: 3243 case X86::BI__builtin_ia32_exp2ps_mask: 3244 case X86::BI__builtin_ia32_getexppd512_mask: 3245 case X86::BI__builtin_ia32_getexpps512_mask: 3246 case X86::BI__builtin_ia32_rcp28pd_mask: 3247 case X86::BI__builtin_ia32_rcp28ps_mask: 3248 case X86::BI__builtin_ia32_rsqrt28pd_mask: 3249 case X86::BI__builtin_ia32_rsqrt28ps_mask: 3250 case X86::BI__builtin_ia32_vcomisd: 3251 case X86::BI__builtin_ia32_vcomiss: 3252 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 3253 ArgNum = 3; 3254 break; 3255 case X86::BI__builtin_ia32_cmppd512_mask: 3256 case X86::BI__builtin_ia32_cmpps512_mask: 3257 case X86::BI__builtin_ia32_cmpsd_mask: 3258 case X86::BI__builtin_ia32_cmpss_mask: 3259 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 3260 case X86::BI__builtin_ia32_getexpsd128_round_mask: 3261 case X86::BI__builtin_ia32_getexpss128_round_mask: 3262 case X86::BI__builtin_ia32_getmantpd512_mask: 3263 case X86::BI__builtin_ia32_getmantps512_mask: 3264 case X86::BI__builtin_ia32_maxsd_round_mask: 3265 case X86::BI__builtin_ia32_maxss_round_mask: 3266 case X86::BI__builtin_ia32_minsd_round_mask: 3267 case X86::BI__builtin_ia32_minss_round_mask: 3268 case X86::BI__builtin_ia32_rcp28sd_round_mask: 3269 case X86::BI__builtin_ia32_rcp28ss_round_mask: 3270 case X86::BI__builtin_ia32_reducepd512_mask: 3271 case X86::BI__builtin_ia32_reduceps512_mask: 3272 case X86::BI__builtin_ia32_rndscalepd_mask: 3273 case X86::BI__builtin_ia32_rndscaleps_mask: 3274 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 3275 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 3276 ArgNum = 4; 3277 break; 3278 case X86::BI__builtin_ia32_fixupimmpd512_mask: 3279 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 3280 case X86::BI__builtin_ia32_fixupimmps512_mask: 3281 case X86::BI__builtin_ia32_fixupimmps512_maskz: 3282 case X86::BI__builtin_ia32_fixupimmsd_mask: 3283 case X86::BI__builtin_ia32_fixupimmsd_maskz: 3284 case X86::BI__builtin_ia32_fixupimmss_mask: 3285 case X86::BI__builtin_ia32_fixupimmss_maskz: 3286 case X86::BI__builtin_ia32_getmantsd_round_mask: 3287 case X86::BI__builtin_ia32_getmantss_round_mask: 3288 case X86::BI__builtin_ia32_rangepd512_mask: 3289 case X86::BI__builtin_ia32_rangeps512_mask: 3290 case X86::BI__builtin_ia32_rangesd128_round_mask: 3291 case X86::BI__builtin_ia32_rangess128_round_mask: 3292 case X86::BI__builtin_ia32_reducesd_mask: 3293 case X86::BI__builtin_ia32_reducess_mask: 3294 case X86::BI__builtin_ia32_rndscalesd_round_mask: 3295 case X86::BI__builtin_ia32_rndscaless_round_mask: 3296 ArgNum = 5; 3297 break; 3298 case X86::BI__builtin_ia32_vcvtsd2si64: 3299 case X86::BI__builtin_ia32_vcvtsd2si32: 3300 case X86::BI__builtin_ia32_vcvtsd2usi32: 3301 case X86::BI__builtin_ia32_vcvtsd2usi64: 3302 case X86::BI__builtin_ia32_vcvtss2si32: 3303 case X86::BI__builtin_ia32_vcvtss2si64: 3304 case X86::BI__builtin_ia32_vcvtss2usi32: 3305 case X86::BI__builtin_ia32_vcvtss2usi64: 3306 case X86::BI__builtin_ia32_sqrtpd512: 3307 case X86::BI__builtin_ia32_sqrtps512: 3308 ArgNum = 1; 3309 HasRC = true; 3310 break; 3311 case X86::BI__builtin_ia32_addpd512: 3312 case X86::BI__builtin_ia32_addps512: 3313 case X86::BI__builtin_ia32_divpd512: 3314 case X86::BI__builtin_ia32_divps512: 3315 case X86::BI__builtin_ia32_mulpd512: 3316 case X86::BI__builtin_ia32_mulps512: 3317 case X86::BI__builtin_ia32_subpd512: 3318 case X86::BI__builtin_ia32_subps512: 3319 case X86::BI__builtin_ia32_cvtsi2sd64: 3320 case X86::BI__builtin_ia32_cvtsi2ss32: 3321 case X86::BI__builtin_ia32_cvtsi2ss64: 3322 case X86::BI__builtin_ia32_cvtusi2sd64: 3323 case X86::BI__builtin_ia32_cvtusi2ss32: 3324 case X86::BI__builtin_ia32_cvtusi2ss64: 3325 ArgNum = 2; 3326 HasRC = true; 3327 break; 3328 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 3329 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 3330 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 3331 case X86::BI__builtin_ia32_cvtpd2dq512_mask: 3332 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 3333 case X86::BI__builtin_ia32_cvtpd2udq512_mask: 3334 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 3335 case X86::BI__builtin_ia32_cvtps2dq512_mask: 3336 case X86::BI__builtin_ia32_cvtps2qq512_mask: 3337 case X86::BI__builtin_ia32_cvtps2udq512_mask: 3338 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 3339 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 3340 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 3341 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 3342 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 3343 ArgNum = 3; 3344 HasRC = true; 3345 break; 3346 case X86::BI__builtin_ia32_addss_round_mask: 3347 case X86::BI__builtin_ia32_addsd_round_mask: 3348 case X86::BI__builtin_ia32_divss_round_mask: 3349 case X86::BI__builtin_ia32_divsd_round_mask: 3350 case X86::BI__builtin_ia32_mulss_round_mask: 3351 case X86::BI__builtin_ia32_mulsd_round_mask: 3352 case X86::BI__builtin_ia32_subss_round_mask: 3353 case X86::BI__builtin_ia32_subsd_round_mask: 3354 case X86::BI__builtin_ia32_scalefpd512_mask: 3355 case X86::BI__builtin_ia32_scalefps512_mask: 3356 case X86::BI__builtin_ia32_scalefsd_round_mask: 3357 case X86::BI__builtin_ia32_scalefss_round_mask: 3358 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 3359 case X86::BI__builtin_ia32_sqrtsd_round_mask: 3360 case X86::BI__builtin_ia32_sqrtss_round_mask: 3361 case X86::BI__builtin_ia32_vfmaddsd3_mask: 3362 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 3363 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 3364 case X86::BI__builtin_ia32_vfmaddss3_mask: 3365 case X86::BI__builtin_ia32_vfmaddss3_maskz: 3366 case X86::BI__builtin_ia32_vfmaddss3_mask3: 3367 case X86::BI__builtin_ia32_vfmaddpd512_mask: 3368 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 3369 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 3370 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 3371 case X86::BI__builtin_ia32_vfmaddps512_mask: 3372 case X86::BI__builtin_ia32_vfmaddps512_maskz: 3373 case X86::BI__builtin_ia32_vfmaddps512_mask3: 3374 case X86::BI__builtin_ia32_vfmsubps512_mask3: 3375 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 3376 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 3377 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 3378 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 3379 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 3380 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 3381 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 3382 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 3383 ArgNum = 4; 3384 HasRC = true; 3385 break; 3386 } 3387 3388 llvm::APSInt Result; 3389 3390 // We can't check the value of a dependent argument. 3391 Expr *Arg = TheCall->getArg(ArgNum); 3392 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3393 return false; 3394 3395 // Check constant-ness first. 3396 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3397 return true; 3398 3399 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 3400 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 3401 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding 3402 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together. 3403 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 3404 Result == 8/*ROUND_NO_EXC*/ || 3405 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) || 3406 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 3407 return false; 3408 3409 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 3410 << Arg->getSourceRange(); 3411 } 3412 3413 // Check if the gather/scatter scale is legal. 3414 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 3415 CallExpr *TheCall) { 3416 unsigned ArgNum = 0; 3417 switch (BuiltinID) { 3418 default: 3419 return false; 3420 case X86::BI__builtin_ia32_gatherpfdpd: 3421 case X86::BI__builtin_ia32_gatherpfdps: 3422 case X86::BI__builtin_ia32_gatherpfqpd: 3423 case X86::BI__builtin_ia32_gatherpfqps: 3424 case X86::BI__builtin_ia32_scatterpfdpd: 3425 case X86::BI__builtin_ia32_scatterpfdps: 3426 case X86::BI__builtin_ia32_scatterpfqpd: 3427 case X86::BI__builtin_ia32_scatterpfqps: 3428 ArgNum = 3; 3429 break; 3430 case X86::BI__builtin_ia32_gatherd_pd: 3431 case X86::BI__builtin_ia32_gatherd_pd256: 3432 case X86::BI__builtin_ia32_gatherq_pd: 3433 case X86::BI__builtin_ia32_gatherq_pd256: 3434 case X86::BI__builtin_ia32_gatherd_ps: 3435 case X86::BI__builtin_ia32_gatherd_ps256: 3436 case X86::BI__builtin_ia32_gatherq_ps: 3437 case X86::BI__builtin_ia32_gatherq_ps256: 3438 case X86::BI__builtin_ia32_gatherd_q: 3439 case X86::BI__builtin_ia32_gatherd_q256: 3440 case X86::BI__builtin_ia32_gatherq_q: 3441 case X86::BI__builtin_ia32_gatherq_q256: 3442 case X86::BI__builtin_ia32_gatherd_d: 3443 case X86::BI__builtin_ia32_gatherd_d256: 3444 case X86::BI__builtin_ia32_gatherq_d: 3445 case X86::BI__builtin_ia32_gatherq_d256: 3446 case X86::BI__builtin_ia32_gather3div2df: 3447 case X86::BI__builtin_ia32_gather3div2di: 3448 case X86::BI__builtin_ia32_gather3div4df: 3449 case X86::BI__builtin_ia32_gather3div4di: 3450 case X86::BI__builtin_ia32_gather3div4sf: 3451 case X86::BI__builtin_ia32_gather3div4si: 3452 case X86::BI__builtin_ia32_gather3div8sf: 3453 case X86::BI__builtin_ia32_gather3div8si: 3454 case X86::BI__builtin_ia32_gather3siv2df: 3455 case X86::BI__builtin_ia32_gather3siv2di: 3456 case X86::BI__builtin_ia32_gather3siv4df: 3457 case X86::BI__builtin_ia32_gather3siv4di: 3458 case X86::BI__builtin_ia32_gather3siv4sf: 3459 case X86::BI__builtin_ia32_gather3siv4si: 3460 case X86::BI__builtin_ia32_gather3siv8sf: 3461 case X86::BI__builtin_ia32_gather3siv8si: 3462 case X86::BI__builtin_ia32_gathersiv8df: 3463 case X86::BI__builtin_ia32_gathersiv16sf: 3464 case X86::BI__builtin_ia32_gatherdiv8df: 3465 case X86::BI__builtin_ia32_gatherdiv16sf: 3466 case X86::BI__builtin_ia32_gathersiv8di: 3467 case X86::BI__builtin_ia32_gathersiv16si: 3468 case X86::BI__builtin_ia32_gatherdiv8di: 3469 case X86::BI__builtin_ia32_gatherdiv16si: 3470 case X86::BI__builtin_ia32_scatterdiv2df: 3471 case X86::BI__builtin_ia32_scatterdiv2di: 3472 case X86::BI__builtin_ia32_scatterdiv4df: 3473 case X86::BI__builtin_ia32_scatterdiv4di: 3474 case X86::BI__builtin_ia32_scatterdiv4sf: 3475 case X86::BI__builtin_ia32_scatterdiv4si: 3476 case X86::BI__builtin_ia32_scatterdiv8sf: 3477 case X86::BI__builtin_ia32_scatterdiv8si: 3478 case X86::BI__builtin_ia32_scattersiv2df: 3479 case X86::BI__builtin_ia32_scattersiv2di: 3480 case X86::BI__builtin_ia32_scattersiv4df: 3481 case X86::BI__builtin_ia32_scattersiv4di: 3482 case X86::BI__builtin_ia32_scattersiv4sf: 3483 case X86::BI__builtin_ia32_scattersiv4si: 3484 case X86::BI__builtin_ia32_scattersiv8sf: 3485 case X86::BI__builtin_ia32_scattersiv8si: 3486 case X86::BI__builtin_ia32_scattersiv8df: 3487 case X86::BI__builtin_ia32_scattersiv16sf: 3488 case X86::BI__builtin_ia32_scatterdiv8df: 3489 case X86::BI__builtin_ia32_scatterdiv16sf: 3490 case X86::BI__builtin_ia32_scattersiv8di: 3491 case X86::BI__builtin_ia32_scattersiv16si: 3492 case X86::BI__builtin_ia32_scatterdiv8di: 3493 case X86::BI__builtin_ia32_scatterdiv16si: 3494 ArgNum = 4; 3495 break; 3496 } 3497 3498 llvm::APSInt Result; 3499 3500 // We can't check the value of a dependent argument. 3501 Expr *Arg = TheCall->getArg(ArgNum); 3502 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3503 return false; 3504 3505 // Check constant-ness first. 3506 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3507 return true; 3508 3509 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 3510 return false; 3511 3512 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 3513 << Arg->getSourceRange(); 3514 } 3515 3516 static bool isX86_32Builtin(unsigned BuiltinID) { 3517 // These builtins only work on x86-32 targets. 3518 switch (BuiltinID) { 3519 case X86::BI__builtin_ia32_readeflags_u32: 3520 case X86::BI__builtin_ia32_writeeflags_u32: 3521 return true; 3522 } 3523 3524 return false; 3525 } 3526 3527 bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3528 CallExpr *TheCall) { 3529 if (BuiltinID == X86::BI__builtin_cpu_supports) 3530 return SemaBuiltinCpuSupports(*this, TI, TheCall); 3531 3532 if (BuiltinID == X86::BI__builtin_cpu_is) 3533 return SemaBuiltinCpuIs(*this, TI, TheCall); 3534 3535 // Check for 32-bit only builtins on a 64-bit target. 3536 const llvm::Triple &TT = TI.getTriple(); 3537 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 3538 return Diag(TheCall->getCallee()->getBeginLoc(), 3539 diag::err_32_bit_builtin_64_bit_tgt); 3540 3541 // If the intrinsic has rounding or SAE make sure its valid. 3542 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 3543 return true; 3544 3545 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 3546 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 3547 return true; 3548 3549 // For intrinsics which take an immediate value as part of the instruction, 3550 // range check them here. 3551 int i = 0, l = 0, u = 0; 3552 switch (BuiltinID) { 3553 default: 3554 return false; 3555 case X86::BI__builtin_ia32_vec_ext_v2si: 3556 case X86::BI__builtin_ia32_vec_ext_v2di: 3557 case X86::BI__builtin_ia32_vextractf128_pd256: 3558 case X86::BI__builtin_ia32_vextractf128_ps256: 3559 case X86::BI__builtin_ia32_vextractf128_si256: 3560 case X86::BI__builtin_ia32_extract128i256: 3561 case X86::BI__builtin_ia32_extractf64x4_mask: 3562 case X86::BI__builtin_ia32_extracti64x4_mask: 3563 case X86::BI__builtin_ia32_extractf32x8_mask: 3564 case X86::BI__builtin_ia32_extracti32x8_mask: 3565 case X86::BI__builtin_ia32_extractf64x2_256_mask: 3566 case X86::BI__builtin_ia32_extracti64x2_256_mask: 3567 case X86::BI__builtin_ia32_extractf32x4_256_mask: 3568 case X86::BI__builtin_ia32_extracti32x4_256_mask: 3569 i = 1; l = 0; u = 1; 3570 break; 3571 case X86::BI__builtin_ia32_vec_set_v2di: 3572 case X86::BI__builtin_ia32_vinsertf128_pd256: 3573 case X86::BI__builtin_ia32_vinsertf128_ps256: 3574 case X86::BI__builtin_ia32_vinsertf128_si256: 3575 case X86::BI__builtin_ia32_insert128i256: 3576 case X86::BI__builtin_ia32_insertf32x8: 3577 case X86::BI__builtin_ia32_inserti32x8: 3578 case X86::BI__builtin_ia32_insertf64x4: 3579 case X86::BI__builtin_ia32_inserti64x4: 3580 case X86::BI__builtin_ia32_insertf64x2_256: 3581 case X86::BI__builtin_ia32_inserti64x2_256: 3582 case X86::BI__builtin_ia32_insertf32x4_256: 3583 case X86::BI__builtin_ia32_inserti32x4_256: 3584 i = 2; l = 0; u = 1; 3585 break; 3586 case X86::BI__builtin_ia32_vpermilpd: 3587 case X86::BI__builtin_ia32_vec_ext_v4hi: 3588 case X86::BI__builtin_ia32_vec_ext_v4si: 3589 case X86::BI__builtin_ia32_vec_ext_v4sf: 3590 case X86::BI__builtin_ia32_vec_ext_v4di: 3591 case X86::BI__builtin_ia32_extractf32x4_mask: 3592 case X86::BI__builtin_ia32_extracti32x4_mask: 3593 case X86::BI__builtin_ia32_extractf64x2_512_mask: 3594 case X86::BI__builtin_ia32_extracti64x2_512_mask: 3595 i = 1; l = 0; u = 3; 3596 break; 3597 case X86::BI_mm_prefetch: 3598 case X86::BI__builtin_ia32_vec_ext_v8hi: 3599 case X86::BI__builtin_ia32_vec_ext_v8si: 3600 i = 1; l = 0; u = 7; 3601 break; 3602 case X86::BI__builtin_ia32_sha1rnds4: 3603 case X86::BI__builtin_ia32_blendpd: 3604 case X86::BI__builtin_ia32_shufpd: 3605 case X86::BI__builtin_ia32_vec_set_v4hi: 3606 case X86::BI__builtin_ia32_vec_set_v4si: 3607 case X86::BI__builtin_ia32_vec_set_v4di: 3608 case X86::BI__builtin_ia32_shuf_f32x4_256: 3609 case X86::BI__builtin_ia32_shuf_f64x2_256: 3610 case X86::BI__builtin_ia32_shuf_i32x4_256: 3611 case X86::BI__builtin_ia32_shuf_i64x2_256: 3612 case X86::BI__builtin_ia32_insertf64x2_512: 3613 case X86::BI__builtin_ia32_inserti64x2_512: 3614 case X86::BI__builtin_ia32_insertf32x4: 3615 case X86::BI__builtin_ia32_inserti32x4: 3616 i = 2; l = 0; u = 3; 3617 break; 3618 case X86::BI__builtin_ia32_vpermil2pd: 3619 case X86::BI__builtin_ia32_vpermil2pd256: 3620 case X86::BI__builtin_ia32_vpermil2ps: 3621 case X86::BI__builtin_ia32_vpermil2ps256: 3622 i = 3; l = 0; u = 3; 3623 break; 3624 case X86::BI__builtin_ia32_cmpb128_mask: 3625 case X86::BI__builtin_ia32_cmpw128_mask: 3626 case X86::BI__builtin_ia32_cmpd128_mask: 3627 case X86::BI__builtin_ia32_cmpq128_mask: 3628 case X86::BI__builtin_ia32_cmpb256_mask: 3629 case X86::BI__builtin_ia32_cmpw256_mask: 3630 case X86::BI__builtin_ia32_cmpd256_mask: 3631 case X86::BI__builtin_ia32_cmpq256_mask: 3632 case X86::BI__builtin_ia32_cmpb512_mask: 3633 case X86::BI__builtin_ia32_cmpw512_mask: 3634 case X86::BI__builtin_ia32_cmpd512_mask: 3635 case X86::BI__builtin_ia32_cmpq512_mask: 3636 case X86::BI__builtin_ia32_ucmpb128_mask: 3637 case X86::BI__builtin_ia32_ucmpw128_mask: 3638 case X86::BI__builtin_ia32_ucmpd128_mask: 3639 case X86::BI__builtin_ia32_ucmpq128_mask: 3640 case X86::BI__builtin_ia32_ucmpb256_mask: 3641 case X86::BI__builtin_ia32_ucmpw256_mask: 3642 case X86::BI__builtin_ia32_ucmpd256_mask: 3643 case X86::BI__builtin_ia32_ucmpq256_mask: 3644 case X86::BI__builtin_ia32_ucmpb512_mask: 3645 case X86::BI__builtin_ia32_ucmpw512_mask: 3646 case X86::BI__builtin_ia32_ucmpd512_mask: 3647 case X86::BI__builtin_ia32_ucmpq512_mask: 3648 case X86::BI__builtin_ia32_vpcomub: 3649 case X86::BI__builtin_ia32_vpcomuw: 3650 case X86::BI__builtin_ia32_vpcomud: 3651 case X86::BI__builtin_ia32_vpcomuq: 3652 case X86::BI__builtin_ia32_vpcomb: 3653 case X86::BI__builtin_ia32_vpcomw: 3654 case X86::BI__builtin_ia32_vpcomd: 3655 case X86::BI__builtin_ia32_vpcomq: 3656 case X86::BI__builtin_ia32_vec_set_v8hi: 3657 case X86::BI__builtin_ia32_vec_set_v8si: 3658 i = 2; l = 0; u = 7; 3659 break; 3660 case X86::BI__builtin_ia32_vpermilpd256: 3661 case X86::BI__builtin_ia32_roundps: 3662 case X86::BI__builtin_ia32_roundpd: 3663 case X86::BI__builtin_ia32_roundps256: 3664 case X86::BI__builtin_ia32_roundpd256: 3665 case X86::BI__builtin_ia32_getmantpd128_mask: 3666 case X86::BI__builtin_ia32_getmantpd256_mask: 3667 case X86::BI__builtin_ia32_getmantps128_mask: 3668 case X86::BI__builtin_ia32_getmantps256_mask: 3669 case X86::BI__builtin_ia32_getmantpd512_mask: 3670 case X86::BI__builtin_ia32_getmantps512_mask: 3671 case X86::BI__builtin_ia32_vec_ext_v16qi: 3672 case X86::BI__builtin_ia32_vec_ext_v16hi: 3673 i = 1; l = 0; u = 15; 3674 break; 3675 case X86::BI__builtin_ia32_pblendd128: 3676 case X86::BI__builtin_ia32_blendps: 3677 case X86::BI__builtin_ia32_blendpd256: 3678 case X86::BI__builtin_ia32_shufpd256: 3679 case X86::BI__builtin_ia32_roundss: 3680 case X86::BI__builtin_ia32_roundsd: 3681 case X86::BI__builtin_ia32_rangepd128_mask: 3682 case X86::BI__builtin_ia32_rangepd256_mask: 3683 case X86::BI__builtin_ia32_rangepd512_mask: 3684 case X86::BI__builtin_ia32_rangeps128_mask: 3685 case X86::BI__builtin_ia32_rangeps256_mask: 3686 case X86::BI__builtin_ia32_rangeps512_mask: 3687 case X86::BI__builtin_ia32_getmantsd_round_mask: 3688 case X86::BI__builtin_ia32_getmantss_round_mask: 3689 case X86::BI__builtin_ia32_vec_set_v16qi: 3690 case X86::BI__builtin_ia32_vec_set_v16hi: 3691 i = 2; l = 0; u = 15; 3692 break; 3693 case X86::BI__builtin_ia32_vec_ext_v32qi: 3694 i = 1; l = 0; u = 31; 3695 break; 3696 case X86::BI__builtin_ia32_cmpps: 3697 case X86::BI__builtin_ia32_cmpss: 3698 case X86::BI__builtin_ia32_cmppd: 3699 case X86::BI__builtin_ia32_cmpsd: 3700 case X86::BI__builtin_ia32_cmpps256: 3701 case X86::BI__builtin_ia32_cmppd256: 3702 case X86::BI__builtin_ia32_cmpps128_mask: 3703 case X86::BI__builtin_ia32_cmppd128_mask: 3704 case X86::BI__builtin_ia32_cmpps256_mask: 3705 case X86::BI__builtin_ia32_cmppd256_mask: 3706 case X86::BI__builtin_ia32_cmpps512_mask: 3707 case X86::BI__builtin_ia32_cmppd512_mask: 3708 case X86::BI__builtin_ia32_cmpsd_mask: 3709 case X86::BI__builtin_ia32_cmpss_mask: 3710 case X86::BI__builtin_ia32_vec_set_v32qi: 3711 i = 2; l = 0; u = 31; 3712 break; 3713 case X86::BI__builtin_ia32_permdf256: 3714 case X86::BI__builtin_ia32_permdi256: 3715 case X86::BI__builtin_ia32_permdf512: 3716 case X86::BI__builtin_ia32_permdi512: 3717 case X86::BI__builtin_ia32_vpermilps: 3718 case X86::BI__builtin_ia32_vpermilps256: 3719 case X86::BI__builtin_ia32_vpermilpd512: 3720 case X86::BI__builtin_ia32_vpermilps512: 3721 case X86::BI__builtin_ia32_pshufd: 3722 case X86::BI__builtin_ia32_pshufd256: 3723 case X86::BI__builtin_ia32_pshufd512: 3724 case X86::BI__builtin_ia32_pshufhw: 3725 case X86::BI__builtin_ia32_pshufhw256: 3726 case X86::BI__builtin_ia32_pshufhw512: 3727 case X86::BI__builtin_ia32_pshuflw: 3728 case X86::BI__builtin_ia32_pshuflw256: 3729 case X86::BI__builtin_ia32_pshuflw512: 3730 case X86::BI__builtin_ia32_vcvtps2ph: 3731 case X86::BI__builtin_ia32_vcvtps2ph_mask: 3732 case X86::BI__builtin_ia32_vcvtps2ph256: 3733 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 3734 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 3735 case X86::BI__builtin_ia32_rndscaleps_128_mask: 3736 case X86::BI__builtin_ia32_rndscalepd_128_mask: 3737 case X86::BI__builtin_ia32_rndscaleps_256_mask: 3738 case X86::BI__builtin_ia32_rndscalepd_256_mask: 3739 case X86::BI__builtin_ia32_rndscaleps_mask: 3740 case X86::BI__builtin_ia32_rndscalepd_mask: 3741 case X86::BI__builtin_ia32_reducepd128_mask: 3742 case X86::BI__builtin_ia32_reducepd256_mask: 3743 case X86::BI__builtin_ia32_reducepd512_mask: 3744 case X86::BI__builtin_ia32_reduceps128_mask: 3745 case X86::BI__builtin_ia32_reduceps256_mask: 3746 case X86::BI__builtin_ia32_reduceps512_mask: 3747 case X86::BI__builtin_ia32_prold512: 3748 case X86::BI__builtin_ia32_prolq512: 3749 case X86::BI__builtin_ia32_prold128: 3750 case X86::BI__builtin_ia32_prold256: 3751 case X86::BI__builtin_ia32_prolq128: 3752 case X86::BI__builtin_ia32_prolq256: 3753 case X86::BI__builtin_ia32_prord512: 3754 case X86::BI__builtin_ia32_prorq512: 3755 case X86::BI__builtin_ia32_prord128: 3756 case X86::BI__builtin_ia32_prord256: 3757 case X86::BI__builtin_ia32_prorq128: 3758 case X86::BI__builtin_ia32_prorq256: 3759 case X86::BI__builtin_ia32_fpclasspd128_mask: 3760 case X86::BI__builtin_ia32_fpclasspd256_mask: 3761 case X86::BI__builtin_ia32_fpclassps128_mask: 3762 case X86::BI__builtin_ia32_fpclassps256_mask: 3763 case X86::BI__builtin_ia32_fpclassps512_mask: 3764 case X86::BI__builtin_ia32_fpclasspd512_mask: 3765 case X86::BI__builtin_ia32_fpclasssd_mask: 3766 case X86::BI__builtin_ia32_fpclassss_mask: 3767 case X86::BI__builtin_ia32_pslldqi128_byteshift: 3768 case X86::BI__builtin_ia32_pslldqi256_byteshift: 3769 case X86::BI__builtin_ia32_pslldqi512_byteshift: 3770 case X86::BI__builtin_ia32_psrldqi128_byteshift: 3771 case X86::BI__builtin_ia32_psrldqi256_byteshift: 3772 case X86::BI__builtin_ia32_psrldqi512_byteshift: 3773 case X86::BI__builtin_ia32_kshiftliqi: 3774 case X86::BI__builtin_ia32_kshiftlihi: 3775 case X86::BI__builtin_ia32_kshiftlisi: 3776 case X86::BI__builtin_ia32_kshiftlidi: 3777 case X86::BI__builtin_ia32_kshiftriqi: 3778 case X86::BI__builtin_ia32_kshiftrihi: 3779 case X86::BI__builtin_ia32_kshiftrisi: 3780 case X86::BI__builtin_ia32_kshiftridi: 3781 i = 1; l = 0; u = 255; 3782 break; 3783 case X86::BI__builtin_ia32_vperm2f128_pd256: 3784 case X86::BI__builtin_ia32_vperm2f128_ps256: 3785 case X86::BI__builtin_ia32_vperm2f128_si256: 3786 case X86::BI__builtin_ia32_permti256: 3787 case X86::BI__builtin_ia32_pblendw128: 3788 case X86::BI__builtin_ia32_pblendw256: 3789 case X86::BI__builtin_ia32_blendps256: 3790 case X86::BI__builtin_ia32_pblendd256: 3791 case X86::BI__builtin_ia32_palignr128: 3792 case X86::BI__builtin_ia32_palignr256: 3793 case X86::BI__builtin_ia32_palignr512: 3794 case X86::BI__builtin_ia32_alignq512: 3795 case X86::BI__builtin_ia32_alignd512: 3796 case X86::BI__builtin_ia32_alignd128: 3797 case X86::BI__builtin_ia32_alignd256: 3798 case X86::BI__builtin_ia32_alignq128: 3799 case X86::BI__builtin_ia32_alignq256: 3800 case X86::BI__builtin_ia32_vcomisd: 3801 case X86::BI__builtin_ia32_vcomiss: 3802 case X86::BI__builtin_ia32_shuf_f32x4: 3803 case X86::BI__builtin_ia32_shuf_f64x2: 3804 case X86::BI__builtin_ia32_shuf_i32x4: 3805 case X86::BI__builtin_ia32_shuf_i64x2: 3806 case X86::BI__builtin_ia32_shufpd512: 3807 case X86::BI__builtin_ia32_shufps: 3808 case X86::BI__builtin_ia32_shufps256: 3809 case X86::BI__builtin_ia32_shufps512: 3810 case X86::BI__builtin_ia32_dbpsadbw128: 3811 case X86::BI__builtin_ia32_dbpsadbw256: 3812 case X86::BI__builtin_ia32_dbpsadbw512: 3813 case X86::BI__builtin_ia32_vpshldd128: 3814 case X86::BI__builtin_ia32_vpshldd256: 3815 case X86::BI__builtin_ia32_vpshldd512: 3816 case X86::BI__builtin_ia32_vpshldq128: 3817 case X86::BI__builtin_ia32_vpshldq256: 3818 case X86::BI__builtin_ia32_vpshldq512: 3819 case X86::BI__builtin_ia32_vpshldw128: 3820 case X86::BI__builtin_ia32_vpshldw256: 3821 case X86::BI__builtin_ia32_vpshldw512: 3822 case X86::BI__builtin_ia32_vpshrdd128: 3823 case X86::BI__builtin_ia32_vpshrdd256: 3824 case X86::BI__builtin_ia32_vpshrdd512: 3825 case X86::BI__builtin_ia32_vpshrdq128: 3826 case X86::BI__builtin_ia32_vpshrdq256: 3827 case X86::BI__builtin_ia32_vpshrdq512: 3828 case X86::BI__builtin_ia32_vpshrdw128: 3829 case X86::BI__builtin_ia32_vpshrdw256: 3830 case X86::BI__builtin_ia32_vpshrdw512: 3831 i = 2; l = 0; u = 255; 3832 break; 3833 case X86::BI__builtin_ia32_fixupimmpd512_mask: 3834 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 3835 case X86::BI__builtin_ia32_fixupimmps512_mask: 3836 case X86::BI__builtin_ia32_fixupimmps512_maskz: 3837 case X86::BI__builtin_ia32_fixupimmsd_mask: 3838 case X86::BI__builtin_ia32_fixupimmsd_maskz: 3839 case X86::BI__builtin_ia32_fixupimmss_mask: 3840 case X86::BI__builtin_ia32_fixupimmss_maskz: 3841 case X86::BI__builtin_ia32_fixupimmpd128_mask: 3842 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 3843 case X86::BI__builtin_ia32_fixupimmpd256_mask: 3844 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 3845 case X86::BI__builtin_ia32_fixupimmps128_mask: 3846 case X86::BI__builtin_ia32_fixupimmps128_maskz: 3847 case X86::BI__builtin_ia32_fixupimmps256_mask: 3848 case X86::BI__builtin_ia32_fixupimmps256_maskz: 3849 case X86::BI__builtin_ia32_pternlogd512_mask: 3850 case X86::BI__builtin_ia32_pternlogd512_maskz: 3851 case X86::BI__builtin_ia32_pternlogq512_mask: 3852 case X86::BI__builtin_ia32_pternlogq512_maskz: 3853 case X86::BI__builtin_ia32_pternlogd128_mask: 3854 case X86::BI__builtin_ia32_pternlogd128_maskz: 3855 case X86::BI__builtin_ia32_pternlogd256_mask: 3856 case X86::BI__builtin_ia32_pternlogd256_maskz: 3857 case X86::BI__builtin_ia32_pternlogq128_mask: 3858 case X86::BI__builtin_ia32_pternlogq128_maskz: 3859 case X86::BI__builtin_ia32_pternlogq256_mask: 3860 case X86::BI__builtin_ia32_pternlogq256_maskz: 3861 i = 3; l = 0; u = 255; 3862 break; 3863 case X86::BI__builtin_ia32_gatherpfdpd: 3864 case X86::BI__builtin_ia32_gatherpfdps: 3865 case X86::BI__builtin_ia32_gatherpfqpd: 3866 case X86::BI__builtin_ia32_gatherpfqps: 3867 case X86::BI__builtin_ia32_scatterpfdpd: 3868 case X86::BI__builtin_ia32_scatterpfdps: 3869 case X86::BI__builtin_ia32_scatterpfqpd: 3870 case X86::BI__builtin_ia32_scatterpfqps: 3871 i = 4; l = 2; u = 3; 3872 break; 3873 case X86::BI__builtin_ia32_reducesd_mask: 3874 case X86::BI__builtin_ia32_reducess_mask: 3875 case X86::BI__builtin_ia32_rndscalesd_round_mask: 3876 case X86::BI__builtin_ia32_rndscaless_round_mask: 3877 i = 4; l = 0; u = 255; 3878 break; 3879 } 3880 3881 // Note that we don't force a hard error on the range check here, allowing 3882 // template-generated or macro-generated dead code to potentially have out-of- 3883 // range values. These need to code generate, but don't need to necessarily 3884 // make any sense. We use a warning that defaults to an error. 3885 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 3886 } 3887 3888 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 3889 /// parameter with the FormatAttr's correct format_idx and firstDataArg. 3890 /// Returns true when the format fits the function and the FormatStringInfo has 3891 /// been populated. 3892 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 3893 FormatStringInfo *FSI) { 3894 FSI->HasVAListArg = Format->getFirstArg() == 0; 3895 FSI->FormatIdx = Format->getFormatIdx() - 1; 3896 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1; 3897 3898 // The way the format attribute works in GCC, the implicit this argument 3899 // of member functions is counted. However, it doesn't appear in our own 3900 // lists, so decrement format_idx in that case. 3901 if (IsCXXMember) { 3902 if(FSI->FormatIdx == 0) 3903 return false; 3904 --FSI->FormatIdx; 3905 if (FSI->FirstDataArg != 0) 3906 --FSI->FirstDataArg; 3907 } 3908 return true; 3909 } 3910 3911 /// Checks if a the given expression evaluates to null. 3912 /// 3913 /// Returns true if the value evaluates to null. 3914 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 3915 // If the expression has non-null type, it doesn't evaluate to null. 3916 if (auto nullability 3917 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { 3918 if (*nullability == NullabilityKind::NonNull) 3919 return false; 3920 } 3921 3922 // As a special case, transparent unions initialized with zero are 3923 // considered null for the purposes of the nonnull attribute. 3924 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 3925 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 3926 if (const CompoundLiteralExpr *CLE = 3927 dyn_cast<CompoundLiteralExpr>(Expr)) 3928 if (const InitListExpr *ILE = 3929 dyn_cast<InitListExpr>(CLE->getInitializer())) 3930 Expr = ILE->getInit(0); 3931 } 3932 3933 bool Result; 3934 return (!Expr->isValueDependent() && 3935 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 3936 !Result); 3937 } 3938 3939 static void CheckNonNullArgument(Sema &S, 3940 const Expr *ArgExpr, 3941 SourceLocation CallSiteLoc) { 3942 if (CheckNonNullExpr(S, ArgExpr)) 3943 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 3944 S.PDiag(diag::warn_null_arg) 3945 << ArgExpr->getSourceRange()); 3946 } 3947 3948 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 3949 FormatStringInfo FSI; 3950 if ((GetFormatStringType(Format) == FST_NSString) && 3951 getFormatStringInfo(Format, false, &FSI)) { 3952 Idx = FSI.FormatIdx; 3953 return true; 3954 } 3955 return false; 3956 } 3957 3958 /// Diagnose use of %s directive in an NSString which is being passed 3959 /// as formatting string to formatting method. 3960 static void 3961 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 3962 const NamedDecl *FDecl, 3963 Expr **Args, 3964 unsigned NumArgs) { 3965 unsigned Idx = 0; 3966 bool Format = false; 3967 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 3968 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 3969 Idx = 2; 3970 Format = true; 3971 } 3972 else 3973 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 3974 if (S.GetFormatNSStringIdx(I, Idx)) { 3975 Format = true; 3976 break; 3977 } 3978 } 3979 if (!Format || NumArgs <= Idx) 3980 return; 3981 const Expr *FormatExpr = Args[Idx]; 3982 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 3983 FormatExpr = CSCE->getSubExpr(); 3984 const StringLiteral *FormatString; 3985 if (const ObjCStringLiteral *OSL = 3986 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 3987 FormatString = OSL->getString(); 3988 else 3989 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 3990 if (!FormatString) 3991 return; 3992 if (S.FormatStringHasSArg(FormatString)) { 3993 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 3994 << "%s" << 1 << 1; 3995 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 3996 << FDecl->getDeclName(); 3997 } 3998 } 3999 4000 /// Determine whether the given type has a non-null nullability annotation. 4001 static bool isNonNullType(ASTContext &ctx, QualType type) { 4002 if (auto nullability = type->getNullability(ctx)) 4003 return *nullability == NullabilityKind::NonNull; 4004 4005 return false; 4006 } 4007 4008 static void CheckNonNullArguments(Sema &S, 4009 const NamedDecl *FDecl, 4010 const FunctionProtoType *Proto, 4011 ArrayRef<const Expr *> Args, 4012 SourceLocation CallSiteLoc) { 4013 assert((FDecl || Proto) && "Need a function declaration or prototype"); 4014 4015 // Already checked by by constant evaluator. 4016 if (S.isConstantEvaluated()) 4017 return; 4018 // Check the attributes attached to the method/function itself. 4019 llvm::SmallBitVector NonNullArgs; 4020 if (FDecl) { 4021 // Handle the nonnull attribute on the function/method declaration itself. 4022 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 4023 if (!NonNull->args_size()) { 4024 // Easy case: all pointer arguments are nonnull. 4025 for (const auto *Arg : Args) 4026 if (S.isValidPointerAttrType(Arg->getType())) 4027 CheckNonNullArgument(S, Arg, CallSiteLoc); 4028 return; 4029 } 4030 4031 for (const ParamIdx &Idx : NonNull->args()) { 4032 unsigned IdxAST = Idx.getASTIndex(); 4033 if (IdxAST >= Args.size()) 4034 continue; 4035 if (NonNullArgs.empty()) 4036 NonNullArgs.resize(Args.size()); 4037 NonNullArgs.set(IdxAST); 4038 } 4039 } 4040 } 4041 4042 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 4043 // Handle the nonnull attribute on the parameters of the 4044 // function/method. 4045 ArrayRef<ParmVarDecl*> parms; 4046 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 4047 parms = FD->parameters(); 4048 else 4049 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 4050 4051 unsigned ParamIndex = 0; 4052 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 4053 I != E; ++I, ++ParamIndex) { 4054 const ParmVarDecl *PVD = *I; 4055 if (PVD->hasAttr<NonNullAttr>() || 4056 isNonNullType(S.Context, PVD->getType())) { 4057 if (NonNullArgs.empty()) 4058 NonNullArgs.resize(Args.size()); 4059 4060 NonNullArgs.set(ParamIndex); 4061 } 4062 } 4063 } else { 4064 // If we have a non-function, non-method declaration but no 4065 // function prototype, try to dig out the function prototype. 4066 if (!Proto) { 4067 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 4068 QualType type = VD->getType().getNonReferenceType(); 4069 if (auto pointerType = type->getAs<PointerType>()) 4070 type = pointerType->getPointeeType(); 4071 else if (auto blockType = type->getAs<BlockPointerType>()) 4072 type = blockType->getPointeeType(); 4073 // FIXME: data member pointers? 4074 4075 // Dig out the function prototype, if there is one. 4076 Proto = type->getAs<FunctionProtoType>(); 4077 } 4078 } 4079 4080 // Fill in non-null argument information from the nullability 4081 // information on the parameter types (if we have them). 4082 if (Proto) { 4083 unsigned Index = 0; 4084 for (auto paramType : Proto->getParamTypes()) { 4085 if (isNonNullType(S.Context, paramType)) { 4086 if (NonNullArgs.empty()) 4087 NonNullArgs.resize(Args.size()); 4088 4089 NonNullArgs.set(Index); 4090 } 4091 4092 ++Index; 4093 } 4094 } 4095 } 4096 4097 // Check for non-null arguments. 4098 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 4099 ArgIndex != ArgIndexEnd; ++ArgIndex) { 4100 if (NonNullArgs[ArgIndex]) 4101 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc); 4102 } 4103 } 4104 4105 /// Handles the checks for format strings, non-POD arguments to vararg 4106 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if 4107 /// attributes. 4108 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 4109 const Expr *ThisArg, ArrayRef<const Expr *> Args, 4110 bool IsMemberFunction, SourceLocation Loc, 4111 SourceRange Range, VariadicCallType CallType) { 4112 // FIXME: We should check as much as we can in the template definition. 4113 if (CurContext->isDependentContext()) 4114 return; 4115 4116 // Printf and scanf checking. 4117 llvm::SmallBitVector CheckedVarArgs; 4118 if (FDecl) { 4119 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 4120 // Only create vector if there are format attributes. 4121 CheckedVarArgs.resize(Args.size()); 4122 4123 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 4124 CheckedVarArgs); 4125 } 4126 } 4127 4128 // Refuse POD arguments that weren't caught by the format string 4129 // checks above. 4130 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 4131 if (CallType != VariadicDoesNotApply && 4132 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 4133 unsigned NumParams = Proto ? Proto->getNumParams() 4134 : FDecl && isa<FunctionDecl>(FDecl) 4135 ? cast<FunctionDecl>(FDecl)->getNumParams() 4136 : FDecl && isa<ObjCMethodDecl>(FDecl) 4137 ? cast<ObjCMethodDecl>(FDecl)->param_size() 4138 : 0; 4139 4140 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 4141 // Args[ArgIdx] can be null in malformed code. 4142 if (const Expr *Arg = Args[ArgIdx]) { 4143 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 4144 checkVariadicArgument(Arg, CallType); 4145 } 4146 } 4147 } 4148 4149 if (FDecl || Proto) { 4150 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 4151 4152 // Type safety checking. 4153 if (FDecl) { 4154 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 4155 CheckArgumentWithTypeTag(I, Args, Loc); 4156 } 4157 } 4158 4159 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) { 4160 auto *AA = FDecl->getAttr<AllocAlignAttr>(); 4161 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()]; 4162 if (!Arg->isValueDependent()) { 4163 Expr::EvalResult Align; 4164 if (Arg->EvaluateAsInt(Align, Context)) { 4165 const llvm::APSInt &I = Align.Val.getInt(); 4166 if (!I.isPowerOf2()) 4167 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two) 4168 << Arg->getSourceRange(); 4169 4170 if (I > Sema::MaximumAlignment) 4171 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great) 4172 << Arg->getSourceRange() << Sema::MaximumAlignment; 4173 } 4174 } 4175 } 4176 4177 if (FD) 4178 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 4179 } 4180 4181 /// CheckConstructorCall - Check a constructor call for correctness and safety 4182 /// properties not enforced by the C type system. 4183 void Sema::CheckConstructorCall(FunctionDecl *FDecl, 4184 ArrayRef<const Expr *> Args, 4185 const FunctionProtoType *Proto, 4186 SourceLocation Loc) { 4187 VariadicCallType CallType = 4188 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 4189 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 4190 Loc, SourceRange(), CallType); 4191 } 4192 4193 /// CheckFunctionCall - Check a direct function call for various correctness 4194 /// and safety properties not strictly enforced by the C type system. 4195 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 4196 const FunctionProtoType *Proto) { 4197 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 4198 isa<CXXMethodDecl>(FDecl); 4199 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 4200 IsMemberOperatorCall; 4201 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 4202 TheCall->getCallee()); 4203 Expr** Args = TheCall->getArgs(); 4204 unsigned NumArgs = TheCall->getNumArgs(); 4205 4206 Expr *ImplicitThis = nullptr; 4207 if (IsMemberOperatorCall) { 4208 // If this is a call to a member operator, hide the first argument 4209 // from checkCall. 4210 // FIXME: Our choice of AST representation here is less than ideal. 4211 ImplicitThis = Args[0]; 4212 ++Args; 4213 --NumArgs; 4214 } else if (IsMemberFunction) 4215 ImplicitThis = 4216 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 4217 4218 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs), 4219 IsMemberFunction, TheCall->getRParenLoc(), 4220 TheCall->getCallee()->getSourceRange(), CallType); 4221 4222 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 4223 // None of the checks below are needed for functions that don't have 4224 // simple names (e.g., C++ conversion functions). 4225 if (!FnInfo) 4226 return false; 4227 4228 CheckAbsoluteValueFunction(TheCall, FDecl); 4229 CheckMaxUnsignedZero(TheCall, FDecl); 4230 4231 if (getLangOpts().ObjC) 4232 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 4233 4234 unsigned CMId = FDecl->getMemoryFunctionKind(); 4235 if (CMId == 0) 4236 return false; 4237 4238 // Handle memory setting and copying functions. 4239 if (CMId == Builtin::BIstrlcpy || CMId == Builtin::BIstrlcat) 4240 CheckStrlcpycatArguments(TheCall, FnInfo); 4241 else if (CMId == Builtin::BIstrncat) 4242 CheckStrncatArguments(TheCall, FnInfo); 4243 else 4244 CheckMemaccessArguments(TheCall, CMId, FnInfo); 4245 4246 return false; 4247 } 4248 4249 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 4250 ArrayRef<const Expr *> Args) { 4251 VariadicCallType CallType = 4252 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 4253 4254 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 4255 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 4256 CallType); 4257 4258 return false; 4259 } 4260 4261 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 4262 const FunctionProtoType *Proto) { 4263 QualType Ty; 4264 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 4265 Ty = V->getType().getNonReferenceType(); 4266 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 4267 Ty = F->getType().getNonReferenceType(); 4268 else 4269 return false; 4270 4271 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 4272 !Ty->isFunctionProtoType()) 4273 return false; 4274 4275 VariadicCallType CallType; 4276 if (!Proto || !Proto->isVariadic()) { 4277 CallType = VariadicDoesNotApply; 4278 } else if (Ty->isBlockPointerType()) { 4279 CallType = VariadicBlock; 4280 } else { // Ty->isFunctionPointerType() 4281 CallType = VariadicFunction; 4282 } 4283 4284 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 4285 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 4286 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 4287 TheCall->getCallee()->getSourceRange(), CallType); 4288 4289 return false; 4290 } 4291 4292 /// Checks function calls when a FunctionDecl or a NamedDecl is not available, 4293 /// such as function pointers returned from functions. 4294 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 4295 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 4296 TheCall->getCallee()); 4297 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 4298 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 4299 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 4300 TheCall->getCallee()->getSourceRange(), CallType); 4301 4302 return false; 4303 } 4304 4305 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 4306 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 4307 return false; 4308 4309 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 4310 switch (Op) { 4311 case AtomicExpr::AO__c11_atomic_init: 4312 case AtomicExpr::AO__opencl_atomic_init: 4313 llvm_unreachable("There is no ordering argument for an init"); 4314 4315 case AtomicExpr::AO__c11_atomic_load: 4316 case AtomicExpr::AO__opencl_atomic_load: 4317 case AtomicExpr::AO__atomic_load_n: 4318 case AtomicExpr::AO__atomic_load: 4319 return OrderingCABI != llvm::AtomicOrderingCABI::release && 4320 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 4321 4322 case AtomicExpr::AO__c11_atomic_store: 4323 case AtomicExpr::AO__opencl_atomic_store: 4324 case AtomicExpr::AO__atomic_store: 4325 case AtomicExpr::AO__atomic_store_n: 4326 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 4327 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 4328 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 4329 4330 default: 4331 return true; 4332 } 4333 } 4334 4335 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 4336 AtomicExpr::AtomicOp Op) { 4337 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 4338 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 4339 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()}; 4340 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()}, 4341 DRE->getSourceRange(), TheCall->getRParenLoc(), Args, 4342 Op); 4343 } 4344 4345 ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, 4346 SourceLocation RParenLoc, MultiExprArg Args, 4347 AtomicExpr::AtomicOp Op, 4348 AtomicArgumentOrder ArgOrder) { 4349 // All the non-OpenCL operations take one of the following forms. 4350 // The OpenCL operations take the __c11 forms with one extra argument for 4351 // synchronization scope. 4352 enum { 4353 // C __c11_atomic_init(A *, C) 4354 Init, 4355 4356 // C __c11_atomic_load(A *, int) 4357 Load, 4358 4359 // void __atomic_load(A *, CP, int) 4360 LoadCopy, 4361 4362 // void __atomic_store(A *, CP, int) 4363 Copy, 4364 4365 // C __c11_atomic_add(A *, M, int) 4366 Arithmetic, 4367 4368 // C __atomic_exchange_n(A *, CP, int) 4369 Xchg, 4370 4371 // void __atomic_exchange(A *, C *, CP, int) 4372 GNUXchg, 4373 4374 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 4375 C11CmpXchg, 4376 4377 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 4378 GNUCmpXchg 4379 } Form = Init; 4380 4381 const unsigned NumForm = GNUCmpXchg + 1; 4382 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 4383 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 4384 // where: 4385 // C is an appropriate type, 4386 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 4387 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 4388 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 4389 // the int parameters are for orderings. 4390 4391 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 4392 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 4393 "need to update code for modified forms"); 4394 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 4395 AtomicExpr::AO__c11_atomic_fetch_min + 1 == 4396 AtomicExpr::AO__atomic_load, 4397 "need to update code for modified C11 atomics"); 4398 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 4399 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 4400 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 4401 Op <= AtomicExpr::AO__c11_atomic_fetch_min) || 4402 IsOpenCL; 4403 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 4404 Op == AtomicExpr::AO__atomic_store_n || 4405 Op == AtomicExpr::AO__atomic_exchange_n || 4406 Op == AtomicExpr::AO__atomic_compare_exchange_n; 4407 bool IsAddSub = false; 4408 4409 switch (Op) { 4410 case AtomicExpr::AO__c11_atomic_init: 4411 case AtomicExpr::AO__opencl_atomic_init: 4412 Form = Init; 4413 break; 4414 4415 case AtomicExpr::AO__c11_atomic_load: 4416 case AtomicExpr::AO__opencl_atomic_load: 4417 case AtomicExpr::AO__atomic_load_n: 4418 Form = Load; 4419 break; 4420 4421 case AtomicExpr::AO__atomic_load: 4422 Form = LoadCopy; 4423 break; 4424 4425 case AtomicExpr::AO__c11_atomic_store: 4426 case AtomicExpr::AO__opencl_atomic_store: 4427 case AtomicExpr::AO__atomic_store: 4428 case AtomicExpr::AO__atomic_store_n: 4429 Form = Copy; 4430 break; 4431 4432 case AtomicExpr::AO__c11_atomic_fetch_add: 4433 case AtomicExpr::AO__c11_atomic_fetch_sub: 4434 case AtomicExpr::AO__opencl_atomic_fetch_add: 4435 case AtomicExpr::AO__opencl_atomic_fetch_sub: 4436 case AtomicExpr::AO__atomic_fetch_add: 4437 case AtomicExpr::AO__atomic_fetch_sub: 4438 case AtomicExpr::AO__atomic_add_fetch: 4439 case AtomicExpr::AO__atomic_sub_fetch: 4440 IsAddSub = true; 4441 LLVM_FALLTHROUGH; 4442 case AtomicExpr::AO__c11_atomic_fetch_and: 4443 case AtomicExpr::AO__c11_atomic_fetch_or: 4444 case AtomicExpr::AO__c11_atomic_fetch_xor: 4445 case AtomicExpr::AO__opencl_atomic_fetch_and: 4446 case AtomicExpr::AO__opencl_atomic_fetch_or: 4447 case AtomicExpr::AO__opencl_atomic_fetch_xor: 4448 case AtomicExpr::AO__atomic_fetch_and: 4449 case AtomicExpr::AO__atomic_fetch_or: 4450 case AtomicExpr::AO__atomic_fetch_xor: 4451 case AtomicExpr::AO__atomic_fetch_nand: 4452 case AtomicExpr::AO__atomic_and_fetch: 4453 case AtomicExpr::AO__atomic_or_fetch: 4454 case AtomicExpr::AO__atomic_xor_fetch: 4455 case AtomicExpr::AO__atomic_nand_fetch: 4456 case AtomicExpr::AO__c11_atomic_fetch_min: 4457 case AtomicExpr::AO__c11_atomic_fetch_max: 4458 case AtomicExpr::AO__opencl_atomic_fetch_min: 4459 case AtomicExpr::AO__opencl_atomic_fetch_max: 4460 case AtomicExpr::AO__atomic_min_fetch: 4461 case AtomicExpr::AO__atomic_max_fetch: 4462 case AtomicExpr::AO__atomic_fetch_min: 4463 case AtomicExpr::AO__atomic_fetch_max: 4464 Form = Arithmetic; 4465 break; 4466 4467 case AtomicExpr::AO__c11_atomic_exchange: 4468 case AtomicExpr::AO__opencl_atomic_exchange: 4469 case AtomicExpr::AO__atomic_exchange_n: 4470 Form = Xchg; 4471 break; 4472 4473 case AtomicExpr::AO__atomic_exchange: 4474 Form = GNUXchg; 4475 break; 4476 4477 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 4478 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 4479 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 4480 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 4481 Form = C11CmpXchg; 4482 break; 4483 4484 case AtomicExpr::AO__atomic_compare_exchange: 4485 case AtomicExpr::AO__atomic_compare_exchange_n: 4486 Form = GNUCmpXchg; 4487 break; 4488 } 4489 4490 unsigned AdjustedNumArgs = NumArgs[Form]; 4491 if (IsOpenCL && Op != AtomicExpr::AO__opencl_atomic_init) 4492 ++AdjustedNumArgs; 4493 // Check we have the right number of arguments. 4494 if (Args.size() < AdjustedNumArgs) { 4495 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args) 4496 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 4497 << ExprRange; 4498 return ExprError(); 4499 } else if (Args.size() > AdjustedNumArgs) { 4500 Diag(Args[AdjustedNumArgs]->getBeginLoc(), 4501 diag::err_typecheck_call_too_many_args) 4502 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 4503 << ExprRange; 4504 return ExprError(); 4505 } 4506 4507 // Inspect the first argument of the atomic operation. 4508 Expr *Ptr = Args[0]; 4509 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 4510 if (ConvertedPtr.isInvalid()) 4511 return ExprError(); 4512 4513 Ptr = ConvertedPtr.get(); 4514 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 4515 if (!pointerType) { 4516 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer) 4517 << Ptr->getType() << Ptr->getSourceRange(); 4518 return ExprError(); 4519 } 4520 4521 // For a __c11 builtin, this should be a pointer to an _Atomic type. 4522 QualType AtomTy = pointerType->getPointeeType(); // 'A' 4523 QualType ValType = AtomTy; // 'C' 4524 if (IsC11) { 4525 if (!AtomTy->isAtomicType()) { 4526 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic) 4527 << Ptr->getType() << Ptr->getSourceRange(); 4528 return ExprError(); 4529 } 4530 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 4531 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 4532 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic) 4533 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 4534 << Ptr->getSourceRange(); 4535 return ExprError(); 4536 } 4537 ValType = AtomTy->castAs<AtomicType>()->getValueType(); 4538 } else if (Form != Load && Form != LoadCopy) { 4539 if (ValType.isConstQualified()) { 4540 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer) 4541 << Ptr->getType() << Ptr->getSourceRange(); 4542 return ExprError(); 4543 } 4544 } 4545 4546 // For an arithmetic operation, the implied arithmetic must be well-formed. 4547 if (Form == Arithmetic) { 4548 // gcc does not enforce these rules for GNU atomics, but we do so for sanity. 4549 if (IsAddSub && !ValType->isIntegerType() 4550 && !ValType->isPointerType()) { 4551 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 4552 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4553 return ExprError(); 4554 } 4555 if (!IsAddSub && !ValType->isIntegerType()) { 4556 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int) 4557 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4558 return ExprError(); 4559 } 4560 if (IsC11 && ValType->isPointerType() && 4561 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 4562 diag::err_incomplete_type)) { 4563 return ExprError(); 4564 } 4565 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 4566 // For __atomic_*_n operations, the value type must be a scalar integral or 4567 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 4568 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 4569 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4570 return ExprError(); 4571 } 4572 4573 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 4574 !AtomTy->isScalarType()) { 4575 // For GNU atomics, require a trivially-copyable type. This is not part of 4576 // the GNU atomics specification, but we enforce it for sanity. 4577 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy) 4578 << Ptr->getType() << Ptr->getSourceRange(); 4579 return ExprError(); 4580 } 4581 4582 switch (ValType.getObjCLifetime()) { 4583 case Qualifiers::OCL_None: 4584 case Qualifiers::OCL_ExplicitNone: 4585 // okay 4586 break; 4587 4588 case Qualifiers::OCL_Weak: 4589 case Qualifiers::OCL_Strong: 4590 case Qualifiers::OCL_Autoreleasing: 4591 // FIXME: Can this happen? By this point, ValType should be known 4592 // to be trivially copyable. 4593 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership) 4594 << ValType << Ptr->getSourceRange(); 4595 return ExprError(); 4596 } 4597 4598 // All atomic operations have an overload which takes a pointer to a volatile 4599 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 4600 // into the result or the other operands. Similarly atomic_load takes a 4601 // pointer to a const 'A'. 4602 ValType.removeLocalVolatile(); 4603 ValType.removeLocalConst(); 4604 QualType ResultType = ValType; 4605 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 4606 Form == Init) 4607 ResultType = Context.VoidTy; 4608 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 4609 ResultType = Context.BoolTy; 4610 4611 // The type of a parameter passed 'by value'. In the GNU atomics, such 4612 // arguments are actually passed as pointers. 4613 QualType ByValType = ValType; // 'CP' 4614 bool IsPassedByAddress = false; 4615 if (!IsC11 && !IsN) { 4616 ByValType = Ptr->getType(); 4617 IsPassedByAddress = true; 4618 } 4619 4620 SmallVector<Expr *, 5> APIOrderedArgs; 4621 if (ArgOrder == Sema::AtomicArgumentOrder::AST) { 4622 APIOrderedArgs.push_back(Args[0]); 4623 switch (Form) { 4624 case Init: 4625 case Load: 4626 APIOrderedArgs.push_back(Args[1]); // Val1/Order 4627 break; 4628 case LoadCopy: 4629 case Copy: 4630 case Arithmetic: 4631 case Xchg: 4632 APIOrderedArgs.push_back(Args[2]); // Val1 4633 APIOrderedArgs.push_back(Args[1]); // Order 4634 break; 4635 case GNUXchg: 4636 APIOrderedArgs.push_back(Args[2]); // Val1 4637 APIOrderedArgs.push_back(Args[3]); // Val2 4638 APIOrderedArgs.push_back(Args[1]); // Order 4639 break; 4640 case C11CmpXchg: 4641 APIOrderedArgs.push_back(Args[2]); // Val1 4642 APIOrderedArgs.push_back(Args[4]); // Val2 4643 APIOrderedArgs.push_back(Args[1]); // Order 4644 APIOrderedArgs.push_back(Args[3]); // OrderFail 4645 break; 4646 case GNUCmpXchg: 4647 APIOrderedArgs.push_back(Args[2]); // Val1 4648 APIOrderedArgs.push_back(Args[4]); // Val2 4649 APIOrderedArgs.push_back(Args[5]); // Weak 4650 APIOrderedArgs.push_back(Args[1]); // Order 4651 APIOrderedArgs.push_back(Args[3]); // OrderFail 4652 break; 4653 } 4654 } else 4655 APIOrderedArgs.append(Args.begin(), Args.end()); 4656 4657 // The first argument's non-CV pointer type is used to deduce the type of 4658 // subsequent arguments, except for: 4659 // - weak flag (always converted to bool) 4660 // - memory order (always converted to int) 4661 // - scope (always converted to int) 4662 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) { 4663 QualType Ty; 4664 if (i < NumVals[Form] + 1) { 4665 switch (i) { 4666 case 0: 4667 // The first argument is always a pointer. It has a fixed type. 4668 // It is always dereferenced, a nullptr is undefined. 4669 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 4670 // Nothing else to do: we already know all we want about this pointer. 4671 continue; 4672 case 1: 4673 // The second argument is the non-atomic operand. For arithmetic, this 4674 // is always passed by value, and for a compare_exchange it is always 4675 // passed by address. For the rest, GNU uses by-address and C11 uses 4676 // by-value. 4677 assert(Form != Load); 4678 if (Form == Init || (Form == Arithmetic && ValType->isIntegerType())) 4679 Ty = ValType; 4680 else if (Form == Copy || Form == Xchg) { 4681 if (IsPassedByAddress) { 4682 // The value pointer is always dereferenced, a nullptr is undefined. 4683 CheckNonNullArgument(*this, APIOrderedArgs[i], 4684 ExprRange.getBegin()); 4685 } 4686 Ty = ByValType; 4687 } else if (Form == Arithmetic) 4688 Ty = Context.getPointerDiffType(); 4689 else { 4690 Expr *ValArg = APIOrderedArgs[i]; 4691 // The value pointer is always dereferenced, a nullptr is undefined. 4692 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin()); 4693 LangAS AS = LangAS::Default; 4694 // Keep address space of non-atomic pointer type. 4695 if (const PointerType *PtrTy = 4696 ValArg->getType()->getAs<PointerType>()) { 4697 AS = PtrTy->getPointeeType().getAddressSpace(); 4698 } 4699 Ty = Context.getPointerType( 4700 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 4701 } 4702 break; 4703 case 2: 4704 // The third argument to compare_exchange / GNU exchange is the desired 4705 // value, either by-value (for the C11 and *_n variant) or as a pointer. 4706 if (IsPassedByAddress) 4707 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 4708 Ty = ByValType; 4709 break; 4710 case 3: 4711 // The fourth argument to GNU compare_exchange is a 'weak' flag. 4712 Ty = Context.BoolTy; 4713 break; 4714 } 4715 } else { 4716 // The order(s) and scope are always converted to int. 4717 Ty = Context.IntTy; 4718 } 4719 4720 InitializedEntity Entity = 4721 InitializedEntity::InitializeParameter(Context, Ty, false); 4722 ExprResult Arg = APIOrderedArgs[i]; 4723 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 4724 if (Arg.isInvalid()) 4725 return true; 4726 APIOrderedArgs[i] = Arg.get(); 4727 } 4728 4729 // Permute the arguments into a 'consistent' order. 4730 SmallVector<Expr*, 5> SubExprs; 4731 SubExprs.push_back(Ptr); 4732 switch (Form) { 4733 case Init: 4734 // Note, AtomicExpr::getVal1() has a special case for this atomic. 4735 SubExprs.push_back(APIOrderedArgs[1]); // Val1 4736 break; 4737 case Load: 4738 SubExprs.push_back(APIOrderedArgs[1]); // Order 4739 break; 4740 case LoadCopy: 4741 case Copy: 4742 case Arithmetic: 4743 case Xchg: 4744 SubExprs.push_back(APIOrderedArgs[2]); // Order 4745 SubExprs.push_back(APIOrderedArgs[1]); // Val1 4746 break; 4747 case GNUXchg: 4748 // Note, AtomicExpr::getVal2() has a special case for this atomic. 4749 SubExprs.push_back(APIOrderedArgs[3]); // Order 4750 SubExprs.push_back(APIOrderedArgs[1]); // Val1 4751 SubExprs.push_back(APIOrderedArgs[2]); // Val2 4752 break; 4753 case C11CmpXchg: 4754 SubExprs.push_back(APIOrderedArgs[3]); // Order 4755 SubExprs.push_back(APIOrderedArgs[1]); // Val1 4756 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail 4757 SubExprs.push_back(APIOrderedArgs[2]); // Val2 4758 break; 4759 case GNUCmpXchg: 4760 SubExprs.push_back(APIOrderedArgs[4]); // Order 4761 SubExprs.push_back(APIOrderedArgs[1]); // Val1 4762 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail 4763 SubExprs.push_back(APIOrderedArgs[2]); // Val2 4764 SubExprs.push_back(APIOrderedArgs[3]); // Weak 4765 break; 4766 } 4767 4768 if (SubExprs.size() >= 2 && Form != Init) { 4769 llvm::APSInt Result(32); 4770 if (SubExprs[1]->isIntegerConstantExpr(Result, Context) && 4771 !isValidOrderingForOp(Result.getSExtValue(), Op)) 4772 Diag(SubExprs[1]->getBeginLoc(), 4773 diag::warn_atomic_op_has_invalid_memory_order) 4774 << SubExprs[1]->getSourceRange(); 4775 } 4776 4777 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 4778 auto *Scope = Args[Args.size() - 1]; 4779 llvm::APSInt Result(32); 4780 if (Scope->isIntegerConstantExpr(Result, Context) && 4781 !ScopeModel->isValid(Result.getZExtValue())) { 4782 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 4783 << Scope->getSourceRange(); 4784 } 4785 SubExprs.push_back(Scope); 4786 } 4787 4788 AtomicExpr *AE = new (Context) 4789 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc); 4790 4791 if ((Op == AtomicExpr::AO__c11_atomic_load || 4792 Op == AtomicExpr::AO__c11_atomic_store || 4793 Op == AtomicExpr::AO__opencl_atomic_load || 4794 Op == AtomicExpr::AO__opencl_atomic_store ) && 4795 Context.AtomicUsesUnsupportedLibcall(AE)) 4796 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 4797 << ((Op == AtomicExpr::AO__c11_atomic_load || 4798 Op == AtomicExpr::AO__opencl_atomic_load) 4799 ? 0 4800 : 1); 4801 4802 return AE; 4803 } 4804 4805 /// checkBuiltinArgument - Given a call to a builtin function, perform 4806 /// normal type-checking on the given argument, updating the call in 4807 /// place. This is useful when a builtin function requires custom 4808 /// type-checking for some of its arguments but not necessarily all of 4809 /// them. 4810 /// 4811 /// Returns true on error. 4812 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 4813 FunctionDecl *Fn = E->getDirectCallee(); 4814 assert(Fn && "builtin call without direct callee!"); 4815 4816 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 4817 InitializedEntity Entity = 4818 InitializedEntity::InitializeParameter(S.Context, Param); 4819 4820 ExprResult Arg = E->getArg(0); 4821 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 4822 if (Arg.isInvalid()) 4823 return true; 4824 4825 E->setArg(ArgIndex, Arg.get()); 4826 return false; 4827 } 4828 4829 /// We have a call to a function like __sync_fetch_and_add, which is an 4830 /// overloaded function based on the pointer type of its first argument. 4831 /// The main BuildCallExpr routines have already promoted the types of 4832 /// arguments because all of these calls are prototyped as void(...). 4833 /// 4834 /// This function goes through and does final semantic checking for these 4835 /// builtins, as well as generating any warnings. 4836 ExprResult 4837 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 4838 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 4839 Expr *Callee = TheCall->getCallee(); 4840 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 4841 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 4842 4843 // Ensure that we have at least one argument to do type inference from. 4844 if (TheCall->getNumArgs() < 1) { 4845 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 4846 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange(); 4847 return ExprError(); 4848 } 4849 4850 // Inspect the first argument of the atomic builtin. This should always be 4851 // a pointer type, whose element is an integral scalar or pointer type. 4852 // Because it is a pointer type, we don't have to worry about any implicit 4853 // casts here. 4854 // FIXME: We don't allow floating point scalars as input. 4855 Expr *FirstArg = TheCall->getArg(0); 4856 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 4857 if (FirstArgResult.isInvalid()) 4858 return ExprError(); 4859 FirstArg = FirstArgResult.get(); 4860 TheCall->setArg(0, FirstArg); 4861 4862 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 4863 if (!pointerType) { 4864 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 4865 << FirstArg->getType() << FirstArg->getSourceRange(); 4866 return ExprError(); 4867 } 4868 4869 QualType ValType = pointerType->getPointeeType(); 4870 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 4871 !ValType->isBlockPointerType()) { 4872 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 4873 << FirstArg->getType() << FirstArg->getSourceRange(); 4874 return ExprError(); 4875 } 4876 4877 if (ValType.isConstQualified()) { 4878 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 4879 << FirstArg->getType() << FirstArg->getSourceRange(); 4880 return ExprError(); 4881 } 4882 4883 switch (ValType.getObjCLifetime()) { 4884 case Qualifiers::OCL_None: 4885 case Qualifiers::OCL_ExplicitNone: 4886 // okay 4887 break; 4888 4889 case Qualifiers::OCL_Weak: 4890 case Qualifiers::OCL_Strong: 4891 case Qualifiers::OCL_Autoreleasing: 4892 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 4893 << ValType << FirstArg->getSourceRange(); 4894 return ExprError(); 4895 } 4896 4897 // Strip any qualifiers off ValType. 4898 ValType = ValType.getUnqualifiedType(); 4899 4900 // The majority of builtins return a value, but a few have special return 4901 // types, so allow them to override appropriately below. 4902 QualType ResultType = ValType; 4903 4904 // We need to figure out which concrete builtin this maps onto. For example, 4905 // __sync_fetch_and_add with a 2 byte object turns into 4906 // __sync_fetch_and_add_2. 4907 #define BUILTIN_ROW(x) \ 4908 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 4909 Builtin::BI##x##_8, Builtin::BI##x##_16 } 4910 4911 static const unsigned BuiltinIndices[][5] = { 4912 BUILTIN_ROW(__sync_fetch_and_add), 4913 BUILTIN_ROW(__sync_fetch_and_sub), 4914 BUILTIN_ROW(__sync_fetch_and_or), 4915 BUILTIN_ROW(__sync_fetch_and_and), 4916 BUILTIN_ROW(__sync_fetch_and_xor), 4917 BUILTIN_ROW(__sync_fetch_and_nand), 4918 4919 BUILTIN_ROW(__sync_add_and_fetch), 4920 BUILTIN_ROW(__sync_sub_and_fetch), 4921 BUILTIN_ROW(__sync_and_and_fetch), 4922 BUILTIN_ROW(__sync_or_and_fetch), 4923 BUILTIN_ROW(__sync_xor_and_fetch), 4924 BUILTIN_ROW(__sync_nand_and_fetch), 4925 4926 BUILTIN_ROW(__sync_val_compare_and_swap), 4927 BUILTIN_ROW(__sync_bool_compare_and_swap), 4928 BUILTIN_ROW(__sync_lock_test_and_set), 4929 BUILTIN_ROW(__sync_lock_release), 4930 BUILTIN_ROW(__sync_swap) 4931 }; 4932 #undef BUILTIN_ROW 4933 4934 // Determine the index of the size. 4935 unsigned SizeIndex; 4936 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 4937 case 1: SizeIndex = 0; break; 4938 case 2: SizeIndex = 1; break; 4939 case 4: SizeIndex = 2; break; 4940 case 8: SizeIndex = 3; break; 4941 case 16: SizeIndex = 4; break; 4942 default: 4943 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 4944 << FirstArg->getType() << FirstArg->getSourceRange(); 4945 return ExprError(); 4946 } 4947 4948 // Each of these builtins has one pointer argument, followed by some number of 4949 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 4950 // that we ignore. Find out which row of BuiltinIndices to read from as well 4951 // as the number of fixed args. 4952 unsigned BuiltinID = FDecl->getBuiltinID(); 4953 unsigned BuiltinIndex, NumFixed = 1; 4954 bool WarnAboutSemanticsChange = false; 4955 switch (BuiltinID) { 4956 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 4957 case Builtin::BI__sync_fetch_and_add: 4958 case Builtin::BI__sync_fetch_and_add_1: 4959 case Builtin::BI__sync_fetch_and_add_2: 4960 case Builtin::BI__sync_fetch_and_add_4: 4961 case Builtin::BI__sync_fetch_and_add_8: 4962 case Builtin::BI__sync_fetch_and_add_16: 4963 BuiltinIndex = 0; 4964 break; 4965 4966 case Builtin::BI__sync_fetch_and_sub: 4967 case Builtin::BI__sync_fetch_and_sub_1: 4968 case Builtin::BI__sync_fetch_and_sub_2: 4969 case Builtin::BI__sync_fetch_and_sub_4: 4970 case Builtin::BI__sync_fetch_and_sub_8: 4971 case Builtin::BI__sync_fetch_and_sub_16: 4972 BuiltinIndex = 1; 4973 break; 4974 4975 case Builtin::BI__sync_fetch_and_or: 4976 case Builtin::BI__sync_fetch_and_or_1: 4977 case Builtin::BI__sync_fetch_and_or_2: 4978 case Builtin::BI__sync_fetch_and_or_4: 4979 case Builtin::BI__sync_fetch_and_or_8: 4980 case Builtin::BI__sync_fetch_and_or_16: 4981 BuiltinIndex = 2; 4982 break; 4983 4984 case Builtin::BI__sync_fetch_and_and: 4985 case Builtin::BI__sync_fetch_and_and_1: 4986 case Builtin::BI__sync_fetch_and_and_2: 4987 case Builtin::BI__sync_fetch_and_and_4: 4988 case Builtin::BI__sync_fetch_and_and_8: 4989 case Builtin::BI__sync_fetch_and_and_16: 4990 BuiltinIndex = 3; 4991 break; 4992 4993 case Builtin::BI__sync_fetch_and_xor: 4994 case Builtin::BI__sync_fetch_and_xor_1: 4995 case Builtin::BI__sync_fetch_and_xor_2: 4996 case Builtin::BI__sync_fetch_and_xor_4: 4997 case Builtin::BI__sync_fetch_and_xor_8: 4998 case Builtin::BI__sync_fetch_and_xor_16: 4999 BuiltinIndex = 4; 5000 break; 5001 5002 case Builtin::BI__sync_fetch_and_nand: 5003 case Builtin::BI__sync_fetch_and_nand_1: 5004 case Builtin::BI__sync_fetch_and_nand_2: 5005 case Builtin::BI__sync_fetch_and_nand_4: 5006 case Builtin::BI__sync_fetch_and_nand_8: 5007 case Builtin::BI__sync_fetch_and_nand_16: 5008 BuiltinIndex = 5; 5009 WarnAboutSemanticsChange = true; 5010 break; 5011 5012 case Builtin::BI__sync_add_and_fetch: 5013 case Builtin::BI__sync_add_and_fetch_1: 5014 case Builtin::BI__sync_add_and_fetch_2: 5015 case Builtin::BI__sync_add_and_fetch_4: 5016 case Builtin::BI__sync_add_and_fetch_8: 5017 case Builtin::BI__sync_add_and_fetch_16: 5018 BuiltinIndex = 6; 5019 break; 5020 5021 case Builtin::BI__sync_sub_and_fetch: 5022 case Builtin::BI__sync_sub_and_fetch_1: 5023 case Builtin::BI__sync_sub_and_fetch_2: 5024 case Builtin::BI__sync_sub_and_fetch_4: 5025 case Builtin::BI__sync_sub_and_fetch_8: 5026 case Builtin::BI__sync_sub_and_fetch_16: 5027 BuiltinIndex = 7; 5028 break; 5029 5030 case Builtin::BI__sync_and_and_fetch: 5031 case Builtin::BI__sync_and_and_fetch_1: 5032 case Builtin::BI__sync_and_and_fetch_2: 5033 case Builtin::BI__sync_and_and_fetch_4: 5034 case Builtin::BI__sync_and_and_fetch_8: 5035 case Builtin::BI__sync_and_and_fetch_16: 5036 BuiltinIndex = 8; 5037 break; 5038 5039 case Builtin::BI__sync_or_and_fetch: 5040 case Builtin::BI__sync_or_and_fetch_1: 5041 case Builtin::BI__sync_or_and_fetch_2: 5042 case Builtin::BI__sync_or_and_fetch_4: 5043 case Builtin::BI__sync_or_and_fetch_8: 5044 case Builtin::BI__sync_or_and_fetch_16: 5045 BuiltinIndex = 9; 5046 break; 5047 5048 case Builtin::BI__sync_xor_and_fetch: 5049 case Builtin::BI__sync_xor_and_fetch_1: 5050 case Builtin::BI__sync_xor_and_fetch_2: 5051 case Builtin::BI__sync_xor_and_fetch_4: 5052 case Builtin::BI__sync_xor_and_fetch_8: 5053 case Builtin::BI__sync_xor_and_fetch_16: 5054 BuiltinIndex = 10; 5055 break; 5056 5057 case Builtin::BI__sync_nand_and_fetch: 5058 case Builtin::BI__sync_nand_and_fetch_1: 5059 case Builtin::BI__sync_nand_and_fetch_2: 5060 case Builtin::BI__sync_nand_and_fetch_4: 5061 case Builtin::BI__sync_nand_and_fetch_8: 5062 case Builtin::BI__sync_nand_and_fetch_16: 5063 BuiltinIndex = 11; 5064 WarnAboutSemanticsChange = true; 5065 break; 5066 5067 case Builtin::BI__sync_val_compare_and_swap: 5068 case Builtin::BI__sync_val_compare_and_swap_1: 5069 case Builtin::BI__sync_val_compare_and_swap_2: 5070 case Builtin::BI__sync_val_compare_and_swap_4: 5071 case Builtin::BI__sync_val_compare_and_swap_8: 5072 case Builtin::BI__sync_val_compare_and_swap_16: 5073 BuiltinIndex = 12; 5074 NumFixed = 2; 5075 break; 5076 5077 case Builtin::BI__sync_bool_compare_and_swap: 5078 case Builtin::BI__sync_bool_compare_and_swap_1: 5079 case Builtin::BI__sync_bool_compare_and_swap_2: 5080 case Builtin::BI__sync_bool_compare_and_swap_4: 5081 case Builtin::BI__sync_bool_compare_and_swap_8: 5082 case Builtin::BI__sync_bool_compare_and_swap_16: 5083 BuiltinIndex = 13; 5084 NumFixed = 2; 5085 ResultType = Context.BoolTy; 5086 break; 5087 5088 case Builtin::BI__sync_lock_test_and_set: 5089 case Builtin::BI__sync_lock_test_and_set_1: 5090 case Builtin::BI__sync_lock_test_and_set_2: 5091 case Builtin::BI__sync_lock_test_and_set_4: 5092 case Builtin::BI__sync_lock_test_and_set_8: 5093 case Builtin::BI__sync_lock_test_and_set_16: 5094 BuiltinIndex = 14; 5095 break; 5096 5097 case Builtin::BI__sync_lock_release: 5098 case Builtin::BI__sync_lock_release_1: 5099 case Builtin::BI__sync_lock_release_2: 5100 case Builtin::BI__sync_lock_release_4: 5101 case Builtin::BI__sync_lock_release_8: 5102 case Builtin::BI__sync_lock_release_16: 5103 BuiltinIndex = 15; 5104 NumFixed = 0; 5105 ResultType = Context.VoidTy; 5106 break; 5107 5108 case Builtin::BI__sync_swap: 5109 case Builtin::BI__sync_swap_1: 5110 case Builtin::BI__sync_swap_2: 5111 case Builtin::BI__sync_swap_4: 5112 case Builtin::BI__sync_swap_8: 5113 case Builtin::BI__sync_swap_16: 5114 BuiltinIndex = 16; 5115 break; 5116 } 5117 5118 // Now that we know how many fixed arguments we expect, first check that we 5119 // have at least that many. 5120 if (TheCall->getNumArgs() < 1+NumFixed) { 5121 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 5122 << 0 << 1 + NumFixed << TheCall->getNumArgs() 5123 << Callee->getSourceRange(); 5124 return ExprError(); 5125 } 5126 5127 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 5128 << Callee->getSourceRange(); 5129 5130 if (WarnAboutSemanticsChange) { 5131 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 5132 << Callee->getSourceRange(); 5133 } 5134 5135 // Get the decl for the concrete builtin from this, we can tell what the 5136 // concrete integer type we should convert to is. 5137 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 5138 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 5139 FunctionDecl *NewBuiltinDecl; 5140 if (NewBuiltinID == BuiltinID) 5141 NewBuiltinDecl = FDecl; 5142 else { 5143 // Perform builtin lookup to avoid redeclaring it. 5144 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 5145 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 5146 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 5147 assert(Res.getFoundDecl()); 5148 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 5149 if (!NewBuiltinDecl) 5150 return ExprError(); 5151 } 5152 5153 // The first argument --- the pointer --- has a fixed type; we 5154 // deduce the types of the rest of the arguments accordingly. Walk 5155 // the remaining arguments, converting them to the deduced value type. 5156 for (unsigned i = 0; i != NumFixed; ++i) { 5157 ExprResult Arg = TheCall->getArg(i+1); 5158 5159 // GCC does an implicit conversion to the pointer or integer ValType. This 5160 // can fail in some cases (1i -> int**), check for this error case now. 5161 // Initialize the argument. 5162 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 5163 ValType, /*consume*/ false); 5164 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5165 if (Arg.isInvalid()) 5166 return ExprError(); 5167 5168 // Okay, we have something that *can* be converted to the right type. Check 5169 // to see if there is a potentially weird extension going on here. This can 5170 // happen when you do an atomic operation on something like an char* and 5171 // pass in 42. The 42 gets converted to char. This is even more strange 5172 // for things like 45.123 -> char, etc. 5173 // FIXME: Do this check. 5174 TheCall->setArg(i+1, Arg.get()); 5175 } 5176 5177 // Create a new DeclRefExpr to refer to the new decl. 5178 DeclRefExpr *NewDRE = DeclRefExpr::Create( 5179 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, 5180 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, 5181 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse()); 5182 5183 // Set the callee in the CallExpr. 5184 // FIXME: This loses syntactic information. 5185 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 5186 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 5187 CK_BuiltinFnToFnPtr); 5188 TheCall->setCallee(PromotedCall.get()); 5189 5190 // Change the result type of the call to match the original value type. This 5191 // is arbitrary, but the codegen for these builtins ins design to handle it 5192 // gracefully. 5193 TheCall->setType(ResultType); 5194 5195 return TheCallResult; 5196 } 5197 5198 /// SemaBuiltinNontemporalOverloaded - We have a call to 5199 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 5200 /// overloaded function based on the pointer type of its last argument. 5201 /// 5202 /// This function goes through and does final semantic checking for these 5203 /// builtins. 5204 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 5205 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 5206 DeclRefExpr *DRE = 5207 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 5208 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 5209 unsigned BuiltinID = FDecl->getBuiltinID(); 5210 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 5211 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 5212 "Unexpected nontemporal load/store builtin!"); 5213 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 5214 unsigned numArgs = isStore ? 2 : 1; 5215 5216 // Ensure that we have the proper number of arguments. 5217 if (checkArgCount(*this, TheCall, numArgs)) 5218 return ExprError(); 5219 5220 // Inspect the last argument of the nontemporal builtin. This should always 5221 // be a pointer type, from which we imply the type of the memory access. 5222 // Because it is a pointer type, we don't have to worry about any implicit 5223 // casts here. 5224 Expr *PointerArg = TheCall->getArg(numArgs - 1); 5225 ExprResult PointerArgResult = 5226 DefaultFunctionArrayLvalueConversion(PointerArg); 5227 5228 if (PointerArgResult.isInvalid()) 5229 return ExprError(); 5230 PointerArg = PointerArgResult.get(); 5231 TheCall->setArg(numArgs - 1, PointerArg); 5232 5233 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 5234 if (!pointerType) { 5235 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 5236 << PointerArg->getType() << PointerArg->getSourceRange(); 5237 return ExprError(); 5238 } 5239 5240 QualType ValType = pointerType->getPointeeType(); 5241 5242 // Strip any qualifiers off ValType. 5243 ValType = ValType.getUnqualifiedType(); 5244 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 5245 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 5246 !ValType->isVectorType()) { 5247 Diag(DRE->getBeginLoc(), 5248 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 5249 << PointerArg->getType() << PointerArg->getSourceRange(); 5250 return ExprError(); 5251 } 5252 5253 if (!isStore) { 5254 TheCall->setType(ValType); 5255 return TheCallResult; 5256 } 5257 5258 ExprResult ValArg = TheCall->getArg(0); 5259 InitializedEntity Entity = InitializedEntity::InitializeParameter( 5260 Context, ValType, /*consume*/ false); 5261 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 5262 if (ValArg.isInvalid()) 5263 return ExprError(); 5264 5265 TheCall->setArg(0, ValArg.get()); 5266 TheCall->setType(Context.VoidTy); 5267 return TheCallResult; 5268 } 5269 5270 /// CheckObjCString - Checks that the argument to the builtin 5271 /// CFString constructor is correct 5272 /// Note: It might also make sense to do the UTF-16 conversion here (would 5273 /// simplify the backend). 5274 bool Sema::CheckObjCString(Expr *Arg) { 5275 Arg = Arg->IgnoreParenCasts(); 5276 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 5277 5278 if (!Literal || !Literal->isAscii()) { 5279 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 5280 << Arg->getSourceRange(); 5281 return true; 5282 } 5283 5284 if (Literal->containsNonAsciiOrNull()) { 5285 StringRef String = Literal->getString(); 5286 unsigned NumBytes = String.size(); 5287 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 5288 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 5289 llvm::UTF16 *ToPtr = &ToBuf[0]; 5290 5291 llvm::ConversionResult Result = 5292 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 5293 ToPtr + NumBytes, llvm::strictConversion); 5294 // Check for conversion failure. 5295 if (Result != llvm::conversionOK) 5296 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 5297 << Arg->getSourceRange(); 5298 } 5299 return false; 5300 } 5301 5302 /// CheckObjCString - Checks that the format string argument to the os_log() 5303 /// and os_trace() functions is correct, and converts it to const char *. 5304 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 5305 Arg = Arg->IgnoreParenCasts(); 5306 auto *Literal = dyn_cast<StringLiteral>(Arg); 5307 if (!Literal) { 5308 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 5309 Literal = ObjcLiteral->getString(); 5310 } 5311 } 5312 5313 if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) { 5314 return ExprError( 5315 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 5316 << Arg->getSourceRange()); 5317 } 5318 5319 ExprResult Result(Literal); 5320 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 5321 InitializedEntity Entity = 5322 InitializedEntity::InitializeParameter(Context, ResultTy, false); 5323 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 5324 return Result; 5325 } 5326 5327 /// Check that the user is calling the appropriate va_start builtin for the 5328 /// target and calling convention. 5329 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 5330 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 5331 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 5332 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 || 5333 TT.getArch() == llvm::Triple::aarch64_32); 5334 bool IsWindows = TT.isOSWindows(); 5335 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 5336 if (IsX64 || IsAArch64) { 5337 CallingConv CC = CC_C; 5338 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 5339 CC = FD->getType()->castAs<FunctionType>()->getCallConv(); 5340 if (IsMSVAStart) { 5341 // Don't allow this in System V ABI functions. 5342 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 5343 return S.Diag(Fn->getBeginLoc(), 5344 diag::err_ms_va_start_used_in_sysv_function); 5345 } else { 5346 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 5347 // On x64 Windows, don't allow this in System V ABI functions. 5348 // (Yes, that means there's no corresponding way to support variadic 5349 // System V ABI functions on Windows.) 5350 if ((IsWindows && CC == CC_X86_64SysV) || 5351 (!IsWindows && CC == CC_Win64)) 5352 return S.Diag(Fn->getBeginLoc(), 5353 diag::err_va_start_used_in_wrong_abi_function) 5354 << !IsWindows; 5355 } 5356 return false; 5357 } 5358 5359 if (IsMSVAStart) 5360 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 5361 return false; 5362 } 5363 5364 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 5365 ParmVarDecl **LastParam = nullptr) { 5366 // Determine whether the current function, block, or obj-c method is variadic 5367 // and get its parameter list. 5368 bool IsVariadic = false; 5369 ArrayRef<ParmVarDecl *> Params; 5370 DeclContext *Caller = S.CurContext; 5371 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 5372 IsVariadic = Block->isVariadic(); 5373 Params = Block->parameters(); 5374 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 5375 IsVariadic = FD->isVariadic(); 5376 Params = FD->parameters(); 5377 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 5378 IsVariadic = MD->isVariadic(); 5379 // FIXME: This isn't correct for methods (results in bogus warning). 5380 Params = MD->parameters(); 5381 } else if (isa<CapturedDecl>(Caller)) { 5382 // We don't support va_start in a CapturedDecl. 5383 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 5384 return true; 5385 } else { 5386 // This must be some other declcontext that parses exprs. 5387 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 5388 return true; 5389 } 5390 5391 if (!IsVariadic) { 5392 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 5393 return true; 5394 } 5395 5396 if (LastParam) 5397 *LastParam = Params.empty() ? nullptr : Params.back(); 5398 5399 return false; 5400 } 5401 5402 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 5403 /// for validity. Emit an error and return true on failure; return false 5404 /// on success. 5405 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 5406 Expr *Fn = TheCall->getCallee(); 5407 5408 if (checkVAStartABI(*this, BuiltinID, Fn)) 5409 return true; 5410 5411 if (TheCall->getNumArgs() > 2) { 5412 Diag(TheCall->getArg(2)->getBeginLoc(), 5413 diag::err_typecheck_call_too_many_args) 5414 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 5415 << Fn->getSourceRange() 5416 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 5417 (*(TheCall->arg_end() - 1))->getEndLoc()); 5418 return true; 5419 } 5420 5421 if (TheCall->getNumArgs() < 2) { 5422 return Diag(TheCall->getEndLoc(), 5423 diag::err_typecheck_call_too_few_args_at_least) 5424 << 0 /*function call*/ << 2 << TheCall->getNumArgs(); 5425 } 5426 5427 // Type-check the first argument normally. 5428 if (checkBuiltinArgument(*this, TheCall, 0)) 5429 return true; 5430 5431 // Check that the current function is variadic, and get its last parameter. 5432 ParmVarDecl *LastParam; 5433 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 5434 return true; 5435 5436 // Verify that the second argument to the builtin is the last argument of the 5437 // current function or method. 5438 bool SecondArgIsLastNamedArgument = false; 5439 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 5440 5441 // These are valid if SecondArgIsLastNamedArgument is false after the next 5442 // block. 5443 QualType Type; 5444 SourceLocation ParamLoc; 5445 bool IsCRegister = false; 5446 5447 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 5448 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 5449 SecondArgIsLastNamedArgument = PV == LastParam; 5450 5451 Type = PV->getType(); 5452 ParamLoc = PV->getLocation(); 5453 IsCRegister = 5454 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 5455 } 5456 } 5457 5458 if (!SecondArgIsLastNamedArgument) 5459 Diag(TheCall->getArg(1)->getBeginLoc(), 5460 diag::warn_second_arg_of_va_start_not_last_named_param); 5461 else if (IsCRegister || Type->isReferenceType() || 5462 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 5463 // Promotable integers are UB, but enumerations need a bit of 5464 // extra checking to see what their promotable type actually is. 5465 if (!Type->isPromotableIntegerType()) 5466 return false; 5467 if (!Type->isEnumeralType()) 5468 return true; 5469 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl(); 5470 return !(ED && 5471 Context.typesAreCompatible(ED->getPromotionType(), Type)); 5472 }()) { 5473 unsigned Reason = 0; 5474 if (Type->isReferenceType()) Reason = 1; 5475 else if (IsCRegister) Reason = 2; 5476 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 5477 Diag(ParamLoc, diag::note_parameter_type) << Type; 5478 } 5479 5480 TheCall->setType(Context.VoidTy); 5481 return false; 5482 } 5483 5484 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 5485 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 5486 // const char *named_addr); 5487 5488 Expr *Func = Call->getCallee(); 5489 5490 if (Call->getNumArgs() < 3) 5491 return Diag(Call->getEndLoc(), 5492 diag::err_typecheck_call_too_few_args_at_least) 5493 << 0 /*function call*/ << 3 << Call->getNumArgs(); 5494 5495 // Type-check the first argument normally. 5496 if (checkBuiltinArgument(*this, Call, 0)) 5497 return true; 5498 5499 // Check that the current function is variadic. 5500 if (checkVAStartIsInVariadicFunction(*this, Func)) 5501 return true; 5502 5503 // __va_start on Windows does not validate the parameter qualifiers 5504 5505 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 5506 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 5507 5508 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 5509 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 5510 5511 const QualType &ConstCharPtrTy = 5512 Context.getPointerType(Context.CharTy.withConst()); 5513 if (!Arg1Ty->isPointerType() || 5514 Arg1Ty->getPointeeType().withoutLocalFastQualifiers() != Context.CharTy) 5515 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 5516 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 5517 << 0 /* qualifier difference */ 5518 << 3 /* parameter mismatch */ 5519 << 2 << Arg1->getType() << ConstCharPtrTy; 5520 5521 const QualType SizeTy = Context.getSizeType(); 5522 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 5523 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 5524 << Arg2->getType() << SizeTy << 1 /* different class */ 5525 << 0 /* qualifier difference */ 5526 << 3 /* parameter mismatch */ 5527 << 3 << Arg2->getType() << SizeTy; 5528 5529 return false; 5530 } 5531 5532 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 5533 /// friends. This is declared to take (...), so we have to check everything. 5534 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 5535 if (TheCall->getNumArgs() < 2) 5536 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 5537 << 0 << 2 << TheCall->getNumArgs() /*function call*/; 5538 if (TheCall->getNumArgs() > 2) 5539 return Diag(TheCall->getArg(2)->getBeginLoc(), 5540 diag::err_typecheck_call_too_many_args) 5541 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 5542 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 5543 (*(TheCall->arg_end() - 1))->getEndLoc()); 5544 5545 ExprResult OrigArg0 = TheCall->getArg(0); 5546 ExprResult OrigArg1 = TheCall->getArg(1); 5547 5548 // Do standard promotions between the two arguments, returning their common 5549 // type. 5550 QualType Res = UsualArithmeticConversions( 5551 OrigArg0, OrigArg1, TheCall->getExprLoc(), ACK_Comparison); 5552 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 5553 return true; 5554 5555 // Make sure any conversions are pushed back into the call; this is 5556 // type safe since unordered compare builtins are declared as "_Bool 5557 // foo(...)". 5558 TheCall->setArg(0, OrigArg0.get()); 5559 TheCall->setArg(1, OrigArg1.get()); 5560 5561 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 5562 return false; 5563 5564 // If the common type isn't a real floating type, then the arguments were 5565 // invalid for this operation. 5566 if (Res.isNull() || !Res->isRealFloatingType()) 5567 return Diag(OrigArg0.get()->getBeginLoc(), 5568 diag::err_typecheck_call_invalid_ordered_compare) 5569 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 5570 << SourceRange(OrigArg0.get()->getBeginLoc(), 5571 OrigArg1.get()->getEndLoc()); 5572 5573 return false; 5574 } 5575 5576 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 5577 /// __builtin_isnan and friends. This is declared to take (...), so we have 5578 /// to check everything. We expect the last argument to be a floating point 5579 /// value. 5580 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 5581 if (TheCall->getNumArgs() < NumArgs) 5582 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 5583 << 0 << NumArgs << TheCall->getNumArgs() /*function call*/; 5584 if (TheCall->getNumArgs() > NumArgs) 5585 return Diag(TheCall->getArg(NumArgs)->getBeginLoc(), 5586 diag::err_typecheck_call_too_many_args) 5587 << 0 /*function call*/ << NumArgs << TheCall->getNumArgs() 5588 << SourceRange(TheCall->getArg(NumArgs)->getBeginLoc(), 5589 (*(TheCall->arg_end() - 1))->getEndLoc()); 5590 5591 // __builtin_fpclassify is the only case where NumArgs != 1, so we can count 5592 // on all preceding parameters just being int. Try all of those. 5593 for (unsigned i = 0; i < NumArgs - 1; ++i) { 5594 Expr *Arg = TheCall->getArg(i); 5595 5596 if (Arg->isTypeDependent()) 5597 return false; 5598 5599 ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing); 5600 5601 if (Res.isInvalid()) 5602 return true; 5603 TheCall->setArg(i, Res.get()); 5604 } 5605 5606 Expr *OrigArg = TheCall->getArg(NumArgs-1); 5607 5608 if (OrigArg->isTypeDependent()) 5609 return false; 5610 5611 // Usual Unary Conversions will convert half to float, which we want for 5612 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the 5613 // type how it is, but do normal L->Rvalue conversions. 5614 if (Context.getTargetInfo().useFP16ConversionIntrinsics()) 5615 OrigArg = UsualUnaryConversions(OrigArg).get(); 5616 else 5617 OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get(); 5618 TheCall->setArg(NumArgs - 1, OrigArg); 5619 5620 // This operation requires a non-_Complex floating-point number. 5621 if (!OrigArg->getType()->isRealFloatingType()) 5622 return Diag(OrigArg->getBeginLoc(), 5623 diag::err_typecheck_call_invalid_unary_fp) 5624 << OrigArg->getType() << OrigArg->getSourceRange(); 5625 5626 return false; 5627 } 5628 5629 // Customized Sema Checking for VSX builtins that have the following signature: 5630 // vector [...] builtinName(vector [...], vector [...], const int); 5631 // Which takes the same type of vectors (any legal vector type) for the first 5632 // two arguments and takes compile time constant for the third argument. 5633 // Example builtins are : 5634 // vector double vec_xxpermdi(vector double, vector double, int); 5635 // vector short vec_xxsldwi(vector short, vector short, int); 5636 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 5637 unsigned ExpectedNumArgs = 3; 5638 if (TheCall->getNumArgs() < ExpectedNumArgs) 5639 return Diag(TheCall->getEndLoc(), 5640 diag::err_typecheck_call_too_few_args_at_least) 5641 << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs() 5642 << TheCall->getSourceRange(); 5643 5644 if (TheCall->getNumArgs() > ExpectedNumArgs) 5645 return Diag(TheCall->getEndLoc(), 5646 diag::err_typecheck_call_too_many_args_at_most) 5647 << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs() 5648 << TheCall->getSourceRange(); 5649 5650 // Check the third argument is a compile time constant 5651 llvm::APSInt Value; 5652 if(!TheCall->getArg(2)->isIntegerConstantExpr(Value, Context)) 5653 return Diag(TheCall->getBeginLoc(), 5654 diag::err_vsx_builtin_nonconstant_argument) 5655 << 3 /* argument index */ << TheCall->getDirectCallee() 5656 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 5657 TheCall->getArg(2)->getEndLoc()); 5658 5659 QualType Arg1Ty = TheCall->getArg(0)->getType(); 5660 QualType Arg2Ty = TheCall->getArg(1)->getType(); 5661 5662 // Check the type of argument 1 and argument 2 are vectors. 5663 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 5664 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 5665 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 5666 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 5667 << TheCall->getDirectCallee() 5668 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5669 TheCall->getArg(1)->getEndLoc()); 5670 } 5671 5672 // Check the first two arguments are the same type. 5673 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 5674 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 5675 << TheCall->getDirectCallee() 5676 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5677 TheCall->getArg(1)->getEndLoc()); 5678 } 5679 5680 // When default clang type checking is turned off and the customized type 5681 // checking is used, the returning type of the function must be explicitly 5682 // set. Otherwise it is _Bool by default. 5683 TheCall->setType(Arg1Ty); 5684 5685 return false; 5686 } 5687 5688 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 5689 // This is declared to take (...), so we have to check everything. 5690 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 5691 if (TheCall->getNumArgs() < 2) 5692 return ExprError(Diag(TheCall->getEndLoc(), 5693 diag::err_typecheck_call_too_few_args_at_least) 5694 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 5695 << TheCall->getSourceRange()); 5696 5697 // Determine which of the following types of shufflevector we're checking: 5698 // 1) unary, vector mask: (lhs, mask) 5699 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 5700 QualType resType = TheCall->getArg(0)->getType(); 5701 unsigned numElements = 0; 5702 5703 if (!TheCall->getArg(0)->isTypeDependent() && 5704 !TheCall->getArg(1)->isTypeDependent()) { 5705 QualType LHSType = TheCall->getArg(0)->getType(); 5706 QualType RHSType = TheCall->getArg(1)->getType(); 5707 5708 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 5709 return ExprError( 5710 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 5711 << TheCall->getDirectCallee() 5712 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5713 TheCall->getArg(1)->getEndLoc())); 5714 5715 numElements = LHSType->castAs<VectorType>()->getNumElements(); 5716 unsigned numResElements = TheCall->getNumArgs() - 2; 5717 5718 // Check to see if we have a call with 2 vector arguments, the unary shuffle 5719 // with mask. If so, verify that RHS is an integer vector type with the 5720 // same number of elts as lhs. 5721 if (TheCall->getNumArgs() == 2) { 5722 if (!RHSType->hasIntegerRepresentation() || 5723 RHSType->castAs<VectorType>()->getNumElements() != numElements) 5724 return ExprError(Diag(TheCall->getBeginLoc(), 5725 diag::err_vec_builtin_incompatible_vector) 5726 << TheCall->getDirectCallee() 5727 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 5728 TheCall->getArg(1)->getEndLoc())); 5729 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 5730 return ExprError(Diag(TheCall->getBeginLoc(), 5731 diag::err_vec_builtin_incompatible_vector) 5732 << TheCall->getDirectCallee() 5733 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5734 TheCall->getArg(1)->getEndLoc())); 5735 } else if (numElements != numResElements) { 5736 QualType eltType = LHSType->castAs<VectorType>()->getElementType(); 5737 resType = Context.getVectorType(eltType, numResElements, 5738 VectorType::GenericVector); 5739 } 5740 } 5741 5742 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 5743 if (TheCall->getArg(i)->isTypeDependent() || 5744 TheCall->getArg(i)->isValueDependent()) 5745 continue; 5746 5747 llvm::APSInt Result(32); 5748 if (!TheCall->getArg(i)->isIntegerConstantExpr(Result, Context)) 5749 return ExprError(Diag(TheCall->getBeginLoc(), 5750 diag::err_shufflevector_nonconstant_argument) 5751 << TheCall->getArg(i)->getSourceRange()); 5752 5753 // Allow -1 which will be translated to undef in the IR. 5754 if (Result.isSigned() && Result.isAllOnesValue()) 5755 continue; 5756 5757 if (Result.getActiveBits() > 64 || Result.getZExtValue() >= numElements*2) 5758 return ExprError(Diag(TheCall->getBeginLoc(), 5759 diag::err_shufflevector_argument_too_large) 5760 << TheCall->getArg(i)->getSourceRange()); 5761 } 5762 5763 SmallVector<Expr*, 32> exprs; 5764 5765 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 5766 exprs.push_back(TheCall->getArg(i)); 5767 TheCall->setArg(i, nullptr); 5768 } 5769 5770 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 5771 TheCall->getCallee()->getBeginLoc(), 5772 TheCall->getRParenLoc()); 5773 } 5774 5775 /// SemaConvertVectorExpr - Handle __builtin_convertvector 5776 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 5777 SourceLocation BuiltinLoc, 5778 SourceLocation RParenLoc) { 5779 ExprValueKind VK = VK_RValue; 5780 ExprObjectKind OK = OK_Ordinary; 5781 QualType DstTy = TInfo->getType(); 5782 QualType SrcTy = E->getType(); 5783 5784 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 5785 return ExprError(Diag(BuiltinLoc, 5786 diag::err_convertvector_non_vector) 5787 << E->getSourceRange()); 5788 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 5789 return ExprError(Diag(BuiltinLoc, 5790 diag::err_convertvector_non_vector_type)); 5791 5792 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 5793 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements(); 5794 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements(); 5795 if (SrcElts != DstElts) 5796 return ExprError(Diag(BuiltinLoc, 5797 diag::err_convertvector_incompatible_vector) 5798 << E->getSourceRange()); 5799 } 5800 5801 return new (Context) 5802 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 5803 } 5804 5805 /// SemaBuiltinPrefetch - Handle __builtin_prefetch. 5806 // This is declared to take (const void*, ...) and can take two 5807 // optional constant int args. 5808 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 5809 unsigned NumArgs = TheCall->getNumArgs(); 5810 5811 if (NumArgs > 3) 5812 return Diag(TheCall->getEndLoc(), 5813 diag::err_typecheck_call_too_many_args_at_most) 5814 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 5815 5816 // Argument 0 is checked for us and the remaining arguments must be 5817 // constant integers. 5818 for (unsigned i = 1; i != NumArgs; ++i) 5819 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 5820 return true; 5821 5822 return false; 5823 } 5824 5825 /// SemaBuiltinAssume - Handle __assume (MS Extension). 5826 // __assume does not evaluate its arguments, and should warn if its argument 5827 // has side effects. 5828 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 5829 Expr *Arg = TheCall->getArg(0); 5830 if (Arg->isInstantiationDependent()) return false; 5831 5832 if (Arg->HasSideEffects(Context)) 5833 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 5834 << Arg->getSourceRange() 5835 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 5836 5837 return false; 5838 } 5839 5840 /// Handle __builtin_alloca_with_align. This is declared 5841 /// as (size_t, size_t) where the second size_t must be a power of 2 greater 5842 /// than 8. 5843 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 5844 // The alignment must be a constant integer. 5845 Expr *Arg = TheCall->getArg(1); 5846 5847 // We can't check the value of a dependent argument. 5848 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 5849 if (const auto *UE = 5850 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 5851 if (UE->getKind() == UETT_AlignOf || 5852 UE->getKind() == UETT_PreferredAlignOf) 5853 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 5854 << Arg->getSourceRange(); 5855 5856 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 5857 5858 if (!Result.isPowerOf2()) 5859 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 5860 << Arg->getSourceRange(); 5861 5862 if (Result < Context.getCharWidth()) 5863 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 5864 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 5865 5866 if (Result > std::numeric_limits<int32_t>::max()) 5867 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 5868 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 5869 } 5870 5871 return false; 5872 } 5873 5874 /// Handle __builtin_assume_aligned. This is declared 5875 /// as (const void*, size_t, ...) and can take one optional constant int arg. 5876 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 5877 unsigned NumArgs = TheCall->getNumArgs(); 5878 5879 if (NumArgs > 3) 5880 return Diag(TheCall->getEndLoc(), 5881 diag::err_typecheck_call_too_many_args_at_most) 5882 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 5883 5884 // The alignment must be a constant integer. 5885 Expr *Arg = TheCall->getArg(1); 5886 5887 // We can't check the value of a dependent argument. 5888 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 5889 llvm::APSInt Result; 5890 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 5891 return true; 5892 5893 if (!Result.isPowerOf2()) 5894 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 5895 << Arg->getSourceRange(); 5896 5897 if (Result > Sema::MaximumAlignment) 5898 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great) 5899 << Arg->getSourceRange() << Sema::MaximumAlignment; 5900 } 5901 5902 if (NumArgs > 2) { 5903 ExprResult Arg(TheCall->getArg(2)); 5904 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 5905 Context.getSizeType(), false); 5906 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5907 if (Arg.isInvalid()) return true; 5908 TheCall->setArg(2, Arg.get()); 5909 } 5910 5911 return false; 5912 } 5913 5914 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 5915 unsigned BuiltinID = 5916 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 5917 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 5918 5919 unsigned NumArgs = TheCall->getNumArgs(); 5920 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 5921 if (NumArgs < NumRequiredArgs) { 5922 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 5923 << 0 /* function call */ << NumRequiredArgs << NumArgs 5924 << TheCall->getSourceRange(); 5925 } 5926 if (NumArgs >= NumRequiredArgs + 0x100) { 5927 return Diag(TheCall->getEndLoc(), 5928 diag::err_typecheck_call_too_many_args_at_most) 5929 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 5930 << TheCall->getSourceRange(); 5931 } 5932 unsigned i = 0; 5933 5934 // For formatting call, check buffer arg. 5935 if (!IsSizeCall) { 5936 ExprResult Arg(TheCall->getArg(i)); 5937 InitializedEntity Entity = InitializedEntity::InitializeParameter( 5938 Context, Context.VoidPtrTy, false); 5939 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5940 if (Arg.isInvalid()) 5941 return true; 5942 TheCall->setArg(i, Arg.get()); 5943 i++; 5944 } 5945 5946 // Check string literal arg. 5947 unsigned FormatIdx = i; 5948 { 5949 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 5950 if (Arg.isInvalid()) 5951 return true; 5952 TheCall->setArg(i, Arg.get()); 5953 i++; 5954 } 5955 5956 // Make sure variadic args are scalar. 5957 unsigned FirstDataArg = i; 5958 while (i < NumArgs) { 5959 ExprResult Arg = DefaultVariadicArgumentPromotion( 5960 TheCall->getArg(i), VariadicFunction, nullptr); 5961 if (Arg.isInvalid()) 5962 return true; 5963 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 5964 if (ArgSize.getQuantity() >= 0x100) { 5965 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 5966 << i << (int)ArgSize.getQuantity() << 0xff 5967 << TheCall->getSourceRange(); 5968 } 5969 TheCall->setArg(i, Arg.get()); 5970 i++; 5971 } 5972 5973 // Check formatting specifiers. NOTE: We're only doing this for the non-size 5974 // call to avoid duplicate diagnostics. 5975 if (!IsSizeCall) { 5976 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 5977 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 5978 bool Success = CheckFormatArguments( 5979 Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog, 5980 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 5981 CheckedVarArgs); 5982 if (!Success) 5983 return true; 5984 } 5985 5986 if (IsSizeCall) { 5987 TheCall->setType(Context.getSizeType()); 5988 } else { 5989 TheCall->setType(Context.VoidPtrTy); 5990 } 5991 return false; 5992 } 5993 5994 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 5995 /// TheCall is a constant expression. 5996 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 5997 llvm::APSInt &Result) { 5998 Expr *Arg = TheCall->getArg(ArgNum); 5999 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6000 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6001 6002 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 6003 6004 if (!Arg->isIntegerConstantExpr(Result, Context)) 6005 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 6006 << FDecl->getDeclName() << Arg->getSourceRange(); 6007 6008 return false; 6009 } 6010 6011 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 6012 /// TheCall is a constant expression in the range [Low, High]. 6013 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 6014 int Low, int High, bool RangeIsError) { 6015 if (isConstantEvaluated()) 6016 return false; 6017 llvm::APSInt Result; 6018 6019 // We can't check the value of a dependent argument. 6020 Expr *Arg = TheCall->getArg(ArgNum); 6021 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6022 return false; 6023 6024 // Check constant-ness first. 6025 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6026 return true; 6027 6028 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 6029 if (RangeIsError) 6030 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 6031 << Result.toString(10) << Low << High << Arg->getSourceRange(); 6032 else 6033 // Defer the warning until we know if the code will be emitted so that 6034 // dead code can ignore this. 6035 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 6036 PDiag(diag::warn_argument_invalid_range) 6037 << Result.toString(10) << Low << High 6038 << Arg->getSourceRange()); 6039 } 6040 6041 return false; 6042 } 6043 6044 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 6045 /// TheCall is a constant expression is a multiple of Num.. 6046 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 6047 unsigned Num) { 6048 llvm::APSInt Result; 6049 6050 // We can't check the value of a dependent argument. 6051 Expr *Arg = TheCall->getArg(ArgNum); 6052 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6053 return false; 6054 6055 // Check constant-ness first. 6056 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6057 return true; 6058 6059 if (Result.getSExtValue() % Num != 0) 6060 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 6061 << Num << Arg->getSourceRange(); 6062 6063 return false; 6064 } 6065 6066 /// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a 6067 /// constant expression representing a power of 2. 6068 bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) { 6069 llvm::APSInt Result; 6070 6071 // We can't check the value of a dependent argument. 6072 Expr *Arg = TheCall->getArg(ArgNum); 6073 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6074 return false; 6075 6076 // Check constant-ness first. 6077 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6078 return true; 6079 6080 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if 6081 // and only if x is a power of 2. 6082 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0) 6083 return false; 6084 6085 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2) 6086 << Arg->getSourceRange(); 6087 } 6088 6089 static bool IsShiftedByte(llvm::APSInt Value) { 6090 if (Value.isNegative()) 6091 return false; 6092 6093 // Check if it's a shifted byte, by shifting it down 6094 while (true) { 6095 // If the value fits in the bottom byte, the check passes. 6096 if (Value < 0x100) 6097 return true; 6098 6099 // Otherwise, if the value has _any_ bits in the bottom byte, the check 6100 // fails. 6101 if ((Value & 0xFF) != 0) 6102 return false; 6103 6104 // If the bottom 8 bits are all 0, but something above that is nonzero, 6105 // then shifting the value right by 8 bits won't affect whether it's a 6106 // shifted byte or not. So do that, and go round again. 6107 Value >>= 8; 6108 } 6109 } 6110 6111 /// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is 6112 /// a constant expression representing an arbitrary byte value shifted left by 6113 /// a multiple of 8 bits. 6114 bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, 6115 unsigned ArgBits) { 6116 llvm::APSInt Result; 6117 6118 // We can't check the value of a dependent argument. 6119 Expr *Arg = TheCall->getArg(ArgNum); 6120 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6121 return false; 6122 6123 // Check constant-ness first. 6124 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6125 return true; 6126 6127 // Truncate to the given size. 6128 Result = Result.getLoBits(ArgBits); 6129 Result.setIsUnsigned(true); 6130 6131 if (IsShiftedByte(Result)) 6132 return false; 6133 6134 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte) 6135 << Arg->getSourceRange(); 6136 } 6137 6138 /// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of 6139 /// TheCall is a constant expression representing either a shifted byte value, 6140 /// or a value of the form 0x??FF (i.e. a member of the arithmetic progression 6141 /// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some 6142 /// Arm MVE intrinsics. 6143 bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, 6144 int ArgNum, 6145 unsigned ArgBits) { 6146 llvm::APSInt Result; 6147 6148 // We can't check the value of a dependent argument. 6149 Expr *Arg = TheCall->getArg(ArgNum); 6150 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6151 return false; 6152 6153 // Check constant-ness first. 6154 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6155 return true; 6156 6157 // Truncate to the given size. 6158 Result = Result.getLoBits(ArgBits); 6159 Result.setIsUnsigned(true); 6160 6161 // Check to see if it's in either of the required forms. 6162 if (IsShiftedByte(Result) || 6163 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF)) 6164 return false; 6165 6166 return Diag(TheCall->getBeginLoc(), 6167 diag::err_argument_not_shifted_byte_or_xxff) 6168 << Arg->getSourceRange(); 6169 } 6170 6171 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 6172 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { 6173 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 6174 if (checkArgCount(*this, TheCall, 2)) 6175 return true; 6176 Expr *Arg0 = TheCall->getArg(0); 6177 Expr *Arg1 = TheCall->getArg(1); 6178 6179 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6180 if (FirstArg.isInvalid()) 6181 return true; 6182 QualType FirstArgType = FirstArg.get()->getType(); 6183 if (!FirstArgType->isAnyPointerType()) 6184 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6185 << "first" << FirstArgType << Arg0->getSourceRange(); 6186 TheCall->setArg(0, FirstArg.get()); 6187 6188 ExprResult SecArg = DefaultLvalueConversion(Arg1); 6189 if (SecArg.isInvalid()) 6190 return true; 6191 QualType SecArgType = SecArg.get()->getType(); 6192 if (!SecArgType->isIntegerType()) 6193 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 6194 << "second" << SecArgType << Arg1->getSourceRange(); 6195 6196 // Derive the return type from the pointer argument. 6197 TheCall->setType(FirstArgType); 6198 return false; 6199 } 6200 6201 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 6202 if (checkArgCount(*this, TheCall, 2)) 6203 return true; 6204 6205 Expr *Arg0 = TheCall->getArg(0); 6206 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6207 if (FirstArg.isInvalid()) 6208 return true; 6209 QualType FirstArgType = FirstArg.get()->getType(); 6210 if (!FirstArgType->isAnyPointerType()) 6211 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6212 << "first" << FirstArgType << Arg0->getSourceRange(); 6213 TheCall->setArg(0, FirstArg.get()); 6214 6215 // Derive the return type from the pointer argument. 6216 TheCall->setType(FirstArgType); 6217 6218 // Second arg must be an constant in range [0,15] 6219 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 6220 } 6221 6222 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 6223 if (checkArgCount(*this, TheCall, 2)) 6224 return true; 6225 Expr *Arg0 = TheCall->getArg(0); 6226 Expr *Arg1 = TheCall->getArg(1); 6227 6228 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6229 if (FirstArg.isInvalid()) 6230 return true; 6231 QualType FirstArgType = FirstArg.get()->getType(); 6232 if (!FirstArgType->isAnyPointerType()) 6233 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6234 << "first" << FirstArgType << Arg0->getSourceRange(); 6235 6236 QualType SecArgType = Arg1->getType(); 6237 if (!SecArgType->isIntegerType()) 6238 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 6239 << "second" << SecArgType << Arg1->getSourceRange(); 6240 TheCall->setType(Context.IntTy); 6241 return false; 6242 } 6243 6244 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 6245 BuiltinID == AArch64::BI__builtin_arm_stg) { 6246 if (checkArgCount(*this, TheCall, 1)) 6247 return true; 6248 Expr *Arg0 = TheCall->getArg(0); 6249 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6250 if (FirstArg.isInvalid()) 6251 return true; 6252 6253 QualType FirstArgType = FirstArg.get()->getType(); 6254 if (!FirstArgType->isAnyPointerType()) 6255 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6256 << "first" << FirstArgType << Arg0->getSourceRange(); 6257 TheCall->setArg(0, FirstArg.get()); 6258 6259 // Derive the return type from the pointer argument. 6260 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 6261 TheCall->setType(FirstArgType); 6262 return false; 6263 } 6264 6265 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 6266 Expr *ArgA = TheCall->getArg(0); 6267 Expr *ArgB = TheCall->getArg(1); 6268 6269 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA); 6270 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB); 6271 6272 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 6273 return true; 6274 6275 QualType ArgTypeA = ArgExprA.get()->getType(); 6276 QualType ArgTypeB = ArgExprB.get()->getType(); 6277 6278 auto isNull = [&] (Expr *E) -> bool { 6279 return E->isNullPointerConstant( 6280 Context, Expr::NPC_ValueDependentIsNotNull); }; 6281 6282 // argument should be either a pointer or null 6283 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 6284 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 6285 << "first" << ArgTypeA << ArgA->getSourceRange(); 6286 6287 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 6288 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 6289 << "second" << ArgTypeB << ArgB->getSourceRange(); 6290 6291 // Ensure Pointee types are compatible 6292 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 6293 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 6294 QualType pointeeA = ArgTypeA->getPointeeType(); 6295 QualType pointeeB = ArgTypeB->getPointeeType(); 6296 if (!Context.typesAreCompatible( 6297 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 6298 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 6299 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) 6300 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 6301 << ArgB->getSourceRange(); 6302 } 6303 } 6304 6305 // at least one argument should be pointer type 6306 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 6307 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 6308 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 6309 6310 if (isNull(ArgA)) // adopt type of the other pointer 6311 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 6312 6313 if (isNull(ArgB)) 6314 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 6315 6316 TheCall->setArg(0, ArgExprA.get()); 6317 TheCall->setArg(1, ArgExprB.get()); 6318 TheCall->setType(Context.LongLongTy); 6319 return false; 6320 } 6321 assert(false && "Unhandled ARM MTE intrinsic"); 6322 return true; 6323 } 6324 6325 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 6326 /// TheCall is an ARM/AArch64 special register string literal. 6327 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 6328 int ArgNum, unsigned ExpectedFieldNum, 6329 bool AllowName) { 6330 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 6331 BuiltinID == ARM::BI__builtin_arm_wsr64 || 6332 BuiltinID == ARM::BI__builtin_arm_rsr || 6333 BuiltinID == ARM::BI__builtin_arm_rsrp || 6334 BuiltinID == ARM::BI__builtin_arm_wsr || 6335 BuiltinID == ARM::BI__builtin_arm_wsrp; 6336 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 6337 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 6338 BuiltinID == AArch64::BI__builtin_arm_rsr || 6339 BuiltinID == AArch64::BI__builtin_arm_rsrp || 6340 BuiltinID == AArch64::BI__builtin_arm_wsr || 6341 BuiltinID == AArch64::BI__builtin_arm_wsrp; 6342 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 6343 6344 // We can't check the value of a dependent argument. 6345 Expr *Arg = TheCall->getArg(ArgNum); 6346 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6347 return false; 6348 6349 // Check if the argument is a string literal. 6350 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 6351 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 6352 << Arg->getSourceRange(); 6353 6354 // Check the type of special register given. 6355 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 6356 SmallVector<StringRef, 6> Fields; 6357 Reg.split(Fields, ":"); 6358 6359 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 6360 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 6361 << Arg->getSourceRange(); 6362 6363 // If the string is the name of a register then we cannot check that it is 6364 // valid here but if the string is of one the forms described in ACLE then we 6365 // can check that the supplied fields are integers and within the valid 6366 // ranges. 6367 if (Fields.size() > 1) { 6368 bool FiveFields = Fields.size() == 5; 6369 6370 bool ValidString = true; 6371 if (IsARMBuiltin) { 6372 ValidString &= Fields[0].startswith_lower("cp") || 6373 Fields[0].startswith_lower("p"); 6374 if (ValidString) 6375 Fields[0] = 6376 Fields[0].drop_front(Fields[0].startswith_lower("cp") ? 2 : 1); 6377 6378 ValidString &= Fields[2].startswith_lower("c"); 6379 if (ValidString) 6380 Fields[2] = Fields[2].drop_front(1); 6381 6382 if (FiveFields) { 6383 ValidString &= Fields[3].startswith_lower("c"); 6384 if (ValidString) 6385 Fields[3] = Fields[3].drop_front(1); 6386 } 6387 } 6388 6389 SmallVector<int, 5> Ranges; 6390 if (FiveFields) 6391 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 6392 else 6393 Ranges.append({15, 7, 15}); 6394 6395 for (unsigned i=0; i<Fields.size(); ++i) { 6396 int IntField; 6397 ValidString &= !Fields[i].getAsInteger(10, IntField); 6398 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 6399 } 6400 6401 if (!ValidString) 6402 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 6403 << Arg->getSourceRange(); 6404 } else if (IsAArch64Builtin && Fields.size() == 1) { 6405 // If the register name is one of those that appear in the condition below 6406 // and the special register builtin being used is one of the write builtins, 6407 // then we require that the argument provided for writing to the register 6408 // is an integer constant expression. This is because it will be lowered to 6409 // an MSR (immediate) instruction, so we need to know the immediate at 6410 // compile time. 6411 if (TheCall->getNumArgs() != 2) 6412 return false; 6413 6414 std::string RegLower = Reg.lower(); 6415 if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" && 6416 RegLower != "pan" && RegLower != "uao") 6417 return false; 6418 6419 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 6420 } 6421 6422 return false; 6423 } 6424 6425 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 6426 /// This checks that the target supports __builtin_longjmp and 6427 /// that val is a constant 1. 6428 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 6429 if (!Context.getTargetInfo().hasSjLjLowering()) 6430 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 6431 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 6432 6433 Expr *Arg = TheCall->getArg(1); 6434 llvm::APSInt Result; 6435 6436 // TODO: This is less than ideal. Overload this to take a value. 6437 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 6438 return true; 6439 6440 if (Result != 1) 6441 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 6442 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 6443 6444 return false; 6445 } 6446 6447 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 6448 /// This checks that the target supports __builtin_setjmp. 6449 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 6450 if (!Context.getTargetInfo().hasSjLjLowering()) 6451 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 6452 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 6453 return false; 6454 } 6455 6456 namespace { 6457 6458 class UncoveredArgHandler { 6459 enum { Unknown = -1, AllCovered = -2 }; 6460 6461 signed FirstUncoveredArg = Unknown; 6462 SmallVector<const Expr *, 4> DiagnosticExprs; 6463 6464 public: 6465 UncoveredArgHandler() = default; 6466 6467 bool hasUncoveredArg() const { 6468 return (FirstUncoveredArg >= 0); 6469 } 6470 6471 unsigned getUncoveredArg() const { 6472 assert(hasUncoveredArg() && "no uncovered argument"); 6473 return FirstUncoveredArg; 6474 } 6475 6476 void setAllCovered() { 6477 // A string has been found with all arguments covered, so clear out 6478 // the diagnostics. 6479 DiagnosticExprs.clear(); 6480 FirstUncoveredArg = AllCovered; 6481 } 6482 6483 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 6484 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 6485 6486 // Don't update if a previous string covers all arguments. 6487 if (FirstUncoveredArg == AllCovered) 6488 return; 6489 6490 // UncoveredArgHandler tracks the highest uncovered argument index 6491 // and with it all the strings that match this index. 6492 if (NewFirstUncoveredArg == FirstUncoveredArg) 6493 DiagnosticExprs.push_back(StrExpr); 6494 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 6495 DiagnosticExprs.clear(); 6496 DiagnosticExprs.push_back(StrExpr); 6497 FirstUncoveredArg = NewFirstUncoveredArg; 6498 } 6499 } 6500 6501 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 6502 }; 6503 6504 enum StringLiteralCheckType { 6505 SLCT_NotALiteral, 6506 SLCT_UncheckedLiteral, 6507 SLCT_CheckedLiteral 6508 }; 6509 6510 } // namespace 6511 6512 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 6513 BinaryOperatorKind BinOpKind, 6514 bool AddendIsRight) { 6515 unsigned BitWidth = Offset.getBitWidth(); 6516 unsigned AddendBitWidth = Addend.getBitWidth(); 6517 // There might be negative interim results. 6518 if (Addend.isUnsigned()) { 6519 Addend = Addend.zext(++AddendBitWidth); 6520 Addend.setIsSigned(true); 6521 } 6522 // Adjust the bit width of the APSInts. 6523 if (AddendBitWidth > BitWidth) { 6524 Offset = Offset.sext(AddendBitWidth); 6525 BitWidth = AddendBitWidth; 6526 } else if (BitWidth > AddendBitWidth) { 6527 Addend = Addend.sext(BitWidth); 6528 } 6529 6530 bool Ov = false; 6531 llvm::APSInt ResOffset = Offset; 6532 if (BinOpKind == BO_Add) 6533 ResOffset = Offset.sadd_ov(Addend, Ov); 6534 else { 6535 assert(AddendIsRight && BinOpKind == BO_Sub && 6536 "operator must be add or sub with addend on the right"); 6537 ResOffset = Offset.ssub_ov(Addend, Ov); 6538 } 6539 6540 // We add an offset to a pointer here so we should support an offset as big as 6541 // possible. 6542 if (Ov) { 6543 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 6544 "index (intermediate) result too big"); 6545 Offset = Offset.sext(2 * BitWidth); 6546 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 6547 return; 6548 } 6549 6550 Offset = ResOffset; 6551 } 6552 6553 namespace { 6554 6555 // This is a wrapper class around StringLiteral to support offsetted string 6556 // literals as format strings. It takes the offset into account when returning 6557 // the string and its length or the source locations to display notes correctly. 6558 class FormatStringLiteral { 6559 const StringLiteral *FExpr; 6560 int64_t Offset; 6561 6562 public: 6563 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 6564 : FExpr(fexpr), Offset(Offset) {} 6565 6566 StringRef getString() const { 6567 return FExpr->getString().drop_front(Offset); 6568 } 6569 6570 unsigned getByteLength() const { 6571 return FExpr->getByteLength() - getCharByteWidth() * Offset; 6572 } 6573 6574 unsigned getLength() const { return FExpr->getLength() - Offset; } 6575 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 6576 6577 StringLiteral::StringKind getKind() const { return FExpr->getKind(); } 6578 6579 QualType getType() const { return FExpr->getType(); } 6580 6581 bool isAscii() const { return FExpr->isAscii(); } 6582 bool isWide() const { return FExpr->isWide(); } 6583 bool isUTF8() const { return FExpr->isUTF8(); } 6584 bool isUTF16() const { return FExpr->isUTF16(); } 6585 bool isUTF32() const { return FExpr->isUTF32(); } 6586 bool isPascal() const { return FExpr->isPascal(); } 6587 6588 SourceLocation getLocationOfByte( 6589 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 6590 const TargetInfo &Target, unsigned *StartToken = nullptr, 6591 unsigned *StartTokenByteOffset = nullptr) const { 6592 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 6593 StartToken, StartTokenByteOffset); 6594 } 6595 6596 SourceLocation getBeginLoc() const LLVM_READONLY { 6597 return FExpr->getBeginLoc().getLocWithOffset(Offset); 6598 } 6599 6600 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 6601 }; 6602 6603 } // namespace 6604 6605 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 6606 const Expr *OrigFormatExpr, 6607 ArrayRef<const Expr *> Args, 6608 bool HasVAListArg, unsigned format_idx, 6609 unsigned firstDataArg, 6610 Sema::FormatStringType Type, 6611 bool inFunctionCall, 6612 Sema::VariadicCallType CallType, 6613 llvm::SmallBitVector &CheckedVarArgs, 6614 UncoveredArgHandler &UncoveredArg, 6615 bool IgnoreStringsWithoutSpecifiers); 6616 6617 // Determine if an expression is a string literal or constant string. 6618 // If this function returns false on the arguments to a function expecting a 6619 // format string, we will usually need to emit a warning. 6620 // True string literals are then checked by CheckFormatString. 6621 static StringLiteralCheckType 6622 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 6623 bool HasVAListArg, unsigned format_idx, 6624 unsigned firstDataArg, Sema::FormatStringType Type, 6625 Sema::VariadicCallType CallType, bool InFunctionCall, 6626 llvm::SmallBitVector &CheckedVarArgs, 6627 UncoveredArgHandler &UncoveredArg, 6628 llvm::APSInt Offset, 6629 bool IgnoreStringsWithoutSpecifiers = false) { 6630 if (S.isConstantEvaluated()) 6631 return SLCT_NotALiteral; 6632 tryAgain: 6633 assert(Offset.isSigned() && "invalid offset"); 6634 6635 if (E->isTypeDependent() || E->isValueDependent()) 6636 return SLCT_NotALiteral; 6637 6638 E = E->IgnoreParenCasts(); 6639 6640 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 6641 // Technically -Wformat-nonliteral does not warn about this case. 6642 // The behavior of printf and friends in this case is implementation 6643 // dependent. Ideally if the format string cannot be null then 6644 // it should have a 'nonnull' attribute in the function prototype. 6645 return SLCT_UncheckedLiteral; 6646 6647 switch (E->getStmtClass()) { 6648 case Stmt::BinaryConditionalOperatorClass: 6649 case Stmt::ConditionalOperatorClass: { 6650 // The expression is a literal if both sub-expressions were, and it was 6651 // completely checked only if both sub-expressions were checked. 6652 const AbstractConditionalOperator *C = 6653 cast<AbstractConditionalOperator>(E); 6654 6655 // Determine whether it is necessary to check both sub-expressions, for 6656 // example, because the condition expression is a constant that can be 6657 // evaluated at compile time. 6658 bool CheckLeft = true, CheckRight = true; 6659 6660 bool Cond; 6661 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(), 6662 S.isConstantEvaluated())) { 6663 if (Cond) 6664 CheckRight = false; 6665 else 6666 CheckLeft = false; 6667 } 6668 6669 // We need to maintain the offsets for the right and the left hand side 6670 // separately to check if every possible indexed expression is a valid 6671 // string literal. They might have different offsets for different string 6672 // literals in the end. 6673 StringLiteralCheckType Left; 6674 if (!CheckLeft) 6675 Left = SLCT_UncheckedLiteral; 6676 else { 6677 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, 6678 HasVAListArg, format_idx, firstDataArg, 6679 Type, CallType, InFunctionCall, 6680 CheckedVarArgs, UncoveredArg, Offset, 6681 IgnoreStringsWithoutSpecifiers); 6682 if (Left == SLCT_NotALiteral || !CheckRight) { 6683 return Left; 6684 } 6685 } 6686 6687 StringLiteralCheckType Right = checkFormatStringExpr( 6688 S, C->getFalseExpr(), Args, HasVAListArg, format_idx, firstDataArg, 6689 Type, CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 6690 IgnoreStringsWithoutSpecifiers); 6691 6692 return (CheckLeft && Left < Right) ? Left : Right; 6693 } 6694 6695 case Stmt::ImplicitCastExprClass: 6696 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 6697 goto tryAgain; 6698 6699 case Stmt::OpaqueValueExprClass: 6700 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 6701 E = src; 6702 goto tryAgain; 6703 } 6704 return SLCT_NotALiteral; 6705 6706 case Stmt::PredefinedExprClass: 6707 // While __func__, etc., are technically not string literals, they 6708 // cannot contain format specifiers and thus are not a security 6709 // liability. 6710 return SLCT_UncheckedLiteral; 6711 6712 case Stmt::DeclRefExprClass: { 6713 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 6714 6715 // As an exception, do not flag errors for variables binding to 6716 // const string literals. 6717 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 6718 bool isConstant = false; 6719 QualType T = DR->getType(); 6720 6721 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 6722 isConstant = AT->getElementType().isConstant(S.Context); 6723 } else if (const PointerType *PT = T->getAs<PointerType>()) { 6724 isConstant = T.isConstant(S.Context) && 6725 PT->getPointeeType().isConstant(S.Context); 6726 } else if (T->isObjCObjectPointerType()) { 6727 // In ObjC, there is usually no "const ObjectPointer" type, 6728 // so don't check if the pointee type is constant. 6729 isConstant = T.isConstant(S.Context); 6730 } 6731 6732 if (isConstant) { 6733 if (const Expr *Init = VD->getAnyInitializer()) { 6734 // Look through initializers like const char c[] = { "foo" } 6735 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 6736 if (InitList->isStringLiteralInit()) 6737 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 6738 } 6739 return checkFormatStringExpr(S, Init, Args, 6740 HasVAListArg, format_idx, 6741 firstDataArg, Type, CallType, 6742 /*InFunctionCall*/ false, CheckedVarArgs, 6743 UncoveredArg, Offset); 6744 } 6745 } 6746 6747 // For vprintf* functions (i.e., HasVAListArg==true), we add a 6748 // special check to see if the format string is a function parameter 6749 // of the function calling the printf function. If the function 6750 // has an attribute indicating it is a printf-like function, then we 6751 // should suppress warnings concerning non-literals being used in a call 6752 // to a vprintf function. For example: 6753 // 6754 // void 6755 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){ 6756 // va_list ap; 6757 // va_start(ap, fmt); 6758 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt". 6759 // ... 6760 // } 6761 if (HasVAListArg) { 6762 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) { 6763 if (const NamedDecl *ND = dyn_cast<NamedDecl>(PV->getDeclContext())) { 6764 int PVIndex = PV->getFunctionScopeIndex() + 1; 6765 for (const auto *PVFormat : ND->specific_attrs<FormatAttr>()) { 6766 // adjust for implicit parameter 6767 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND)) 6768 if (MD->isInstance()) 6769 ++PVIndex; 6770 // We also check if the formats are compatible. 6771 // We can't pass a 'scanf' string to a 'printf' function. 6772 if (PVIndex == PVFormat->getFormatIdx() && 6773 Type == S.GetFormatStringType(PVFormat)) 6774 return SLCT_UncheckedLiteral; 6775 } 6776 } 6777 } 6778 } 6779 } 6780 6781 return SLCT_NotALiteral; 6782 } 6783 6784 case Stmt::CallExprClass: 6785 case Stmt::CXXMemberCallExprClass: { 6786 const CallExpr *CE = cast<CallExpr>(E); 6787 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 6788 bool IsFirst = true; 6789 StringLiteralCheckType CommonResult; 6790 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 6791 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 6792 StringLiteralCheckType Result = checkFormatStringExpr( 6793 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 6794 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 6795 IgnoreStringsWithoutSpecifiers); 6796 if (IsFirst) { 6797 CommonResult = Result; 6798 IsFirst = false; 6799 } 6800 } 6801 if (!IsFirst) 6802 return CommonResult; 6803 6804 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 6805 unsigned BuiltinID = FD->getBuiltinID(); 6806 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 6807 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 6808 const Expr *Arg = CE->getArg(0); 6809 return checkFormatStringExpr(S, Arg, Args, 6810 HasVAListArg, format_idx, 6811 firstDataArg, Type, CallType, 6812 InFunctionCall, CheckedVarArgs, 6813 UncoveredArg, Offset, 6814 IgnoreStringsWithoutSpecifiers); 6815 } 6816 } 6817 } 6818 6819 return SLCT_NotALiteral; 6820 } 6821 case Stmt::ObjCMessageExprClass: { 6822 const auto *ME = cast<ObjCMessageExpr>(E); 6823 if (const auto *MD = ME->getMethodDecl()) { 6824 if (const auto *FA = MD->getAttr<FormatArgAttr>()) { 6825 // As a special case heuristic, if we're using the method -[NSBundle 6826 // localizedStringForKey:value:table:], ignore any key strings that lack 6827 // format specifiers. The idea is that if the key doesn't have any 6828 // format specifiers then its probably just a key to map to the 6829 // localized strings. If it does have format specifiers though, then its 6830 // likely that the text of the key is the format string in the 6831 // programmer's language, and should be checked. 6832 const ObjCInterfaceDecl *IFace; 6833 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) && 6834 IFace->getIdentifier()->isStr("NSBundle") && 6835 MD->getSelector().isKeywordSelector( 6836 {"localizedStringForKey", "value", "table"})) { 6837 IgnoreStringsWithoutSpecifiers = true; 6838 } 6839 6840 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 6841 return checkFormatStringExpr( 6842 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 6843 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 6844 IgnoreStringsWithoutSpecifiers); 6845 } 6846 } 6847 6848 return SLCT_NotALiteral; 6849 } 6850 case Stmt::ObjCStringLiteralClass: 6851 case Stmt::StringLiteralClass: { 6852 const StringLiteral *StrE = nullptr; 6853 6854 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 6855 StrE = ObjCFExpr->getString(); 6856 else 6857 StrE = cast<StringLiteral>(E); 6858 6859 if (StrE) { 6860 if (Offset.isNegative() || Offset > StrE->getLength()) { 6861 // TODO: It would be better to have an explicit warning for out of 6862 // bounds literals. 6863 return SLCT_NotALiteral; 6864 } 6865 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 6866 CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx, 6867 firstDataArg, Type, InFunctionCall, CallType, 6868 CheckedVarArgs, UncoveredArg, 6869 IgnoreStringsWithoutSpecifiers); 6870 return SLCT_CheckedLiteral; 6871 } 6872 6873 return SLCT_NotALiteral; 6874 } 6875 case Stmt::BinaryOperatorClass: { 6876 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 6877 6878 // A string literal + an int offset is still a string literal. 6879 if (BinOp->isAdditiveOp()) { 6880 Expr::EvalResult LResult, RResult; 6881 6882 bool LIsInt = BinOp->getLHS()->EvaluateAsInt( 6883 LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 6884 bool RIsInt = BinOp->getRHS()->EvaluateAsInt( 6885 RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 6886 6887 if (LIsInt != RIsInt) { 6888 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 6889 6890 if (LIsInt) { 6891 if (BinOpKind == BO_Add) { 6892 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt); 6893 E = BinOp->getRHS(); 6894 goto tryAgain; 6895 } 6896 } else { 6897 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt); 6898 E = BinOp->getLHS(); 6899 goto tryAgain; 6900 } 6901 } 6902 } 6903 6904 return SLCT_NotALiteral; 6905 } 6906 case Stmt::UnaryOperatorClass: { 6907 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 6908 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 6909 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 6910 Expr::EvalResult IndexResult; 6911 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context, 6912 Expr::SE_NoSideEffects, 6913 S.isConstantEvaluated())) { 6914 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add, 6915 /*RHS is int*/ true); 6916 E = ASE->getBase(); 6917 goto tryAgain; 6918 } 6919 } 6920 6921 return SLCT_NotALiteral; 6922 } 6923 6924 default: 6925 return SLCT_NotALiteral; 6926 } 6927 } 6928 6929 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 6930 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 6931 .Case("scanf", FST_Scanf) 6932 .Cases("printf", "printf0", FST_Printf) 6933 .Cases("NSString", "CFString", FST_NSString) 6934 .Case("strftime", FST_Strftime) 6935 .Case("strfmon", FST_Strfmon) 6936 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 6937 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 6938 .Case("os_trace", FST_OSLog) 6939 .Case("os_log", FST_OSLog) 6940 .Default(FST_Unknown); 6941 } 6942 6943 /// CheckFormatArguments - Check calls to printf and scanf (and similar 6944 /// functions) for correct use of format strings. 6945 /// Returns true if a format string has been fully checked. 6946 bool Sema::CheckFormatArguments(const FormatAttr *Format, 6947 ArrayRef<const Expr *> Args, 6948 bool IsCXXMember, 6949 VariadicCallType CallType, 6950 SourceLocation Loc, SourceRange Range, 6951 llvm::SmallBitVector &CheckedVarArgs) { 6952 FormatStringInfo FSI; 6953 if (getFormatStringInfo(Format, IsCXXMember, &FSI)) 6954 return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx, 6955 FSI.FirstDataArg, GetFormatStringType(Format), 6956 CallType, Loc, Range, CheckedVarArgs); 6957 return false; 6958 } 6959 6960 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 6961 bool HasVAListArg, unsigned format_idx, 6962 unsigned firstDataArg, FormatStringType Type, 6963 VariadicCallType CallType, 6964 SourceLocation Loc, SourceRange Range, 6965 llvm::SmallBitVector &CheckedVarArgs) { 6966 // CHECK: printf/scanf-like function is called with no format string. 6967 if (format_idx >= Args.size()) { 6968 Diag(Loc, diag::warn_missing_format_string) << Range; 6969 return false; 6970 } 6971 6972 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 6973 6974 // CHECK: format string is not a string literal. 6975 // 6976 // Dynamically generated format strings are difficult to 6977 // automatically vet at compile time. Requiring that format strings 6978 // are string literals: (1) permits the checking of format strings by 6979 // the compiler and thereby (2) can practically remove the source of 6980 // many format string exploits. 6981 6982 // Format string can be either ObjC string (e.g. @"%d") or 6983 // C string (e.g. "%d") 6984 // ObjC string uses the same format specifiers as C string, so we can use 6985 // the same format string checking logic for both ObjC and C strings. 6986 UncoveredArgHandler UncoveredArg; 6987 StringLiteralCheckType CT = 6988 checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg, 6989 format_idx, firstDataArg, Type, CallType, 6990 /*IsFunctionCall*/ true, CheckedVarArgs, 6991 UncoveredArg, 6992 /*no string offset*/ llvm::APSInt(64, false) = 0); 6993 6994 // Generate a diagnostic where an uncovered argument is detected. 6995 if (UncoveredArg.hasUncoveredArg()) { 6996 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 6997 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 6998 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 6999 } 7000 7001 if (CT != SLCT_NotALiteral) 7002 // Literal format string found, check done! 7003 return CT == SLCT_CheckedLiteral; 7004 7005 // Strftime is particular as it always uses a single 'time' argument, 7006 // so it is safe to pass a non-literal string. 7007 if (Type == FST_Strftime) 7008 return false; 7009 7010 // Do not emit diag when the string param is a macro expansion and the 7011 // format is either NSString or CFString. This is a hack to prevent 7012 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 7013 // which are usually used in place of NS and CF string literals. 7014 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 7015 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 7016 return false; 7017 7018 // If there are no arguments specified, warn with -Wformat-security, otherwise 7019 // warn only with -Wformat-nonliteral. 7020 if (Args.size() == firstDataArg) { 7021 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 7022 << OrigFormatExpr->getSourceRange(); 7023 switch (Type) { 7024 default: 7025 break; 7026 case FST_Kprintf: 7027 case FST_FreeBSDKPrintf: 7028 case FST_Printf: 7029 Diag(FormatLoc, diag::note_format_security_fixit) 7030 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 7031 break; 7032 case FST_NSString: 7033 Diag(FormatLoc, diag::note_format_security_fixit) 7034 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 7035 break; 7036 } 7037 } else { 7038 Diag(FormatLoc, diag::warn_format_nonliteral) 7039 << OrigFormatExpr->getSourceRange(); 7040 } 7041 return false; 7042 } 7043 7044 namespace { 7045 7046 class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 7047 protected: 7048 Sema &S; 7049 const FormatStringLiteral *FExpr; 7050 const Expr *OrigFormatExpr; 7051 const Sema::FormatStringType FSType; 7052 const unsigned FirstDataArg; 7053 const unsigned NumDataArgs; 7054 const char *Beg; // Start of format string. 7055 const bool HasVAListArg; 7056 ArrayRef<const Expr *> Args; 7057 unsigned FormatIdx; 7058 llvm::SmallBitVector CoveredArgs; 7059 bool usesPositionalArgs = false; 7060 bool atFirstArg = true; 7061 bool inFunctionCall; 7062 Sema::VariadicCallType CallType; 7063 llvm::SmallBitVector &CheckedVarArgs; 7064 UncoveredArgHandler &UncoveredArg; 7065 7066 public: 7067 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 7068 const Expr *origFormatExpr, 7069 const Sema::FormatStringType type, unsigned firstDataArg, 7070 unsigned numDataArgs, const char *beg, bool hasVAListArg, 7071 ArrayRef<const Expr *> Args, unsigned formatIdx, 7072 bool inFunctionCall, Sema::VariadicCallType callType, 7073 llvm::SmallBitVector &CheckedVarArgs, 7074 UncoveredArgHandler &UncoveredArg) 7075 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 7076 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 7077 HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx), 7078 inFunctionCall(inFunctionCall), CallType(callType), 7079 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 7080 CoveredArgs.resize(numDataArgs); 7081 CoveredArgs.reset(); 7082 } 7083 7084 void DoneProcessing(); 7085 7086 void HandleIncompleteSpecifier(const char *startSpecifier, 7087 unsigned specifierLen) override; 7088 7089 void HandleInvalidLengthModifier( 7090 const analyze_format_string::FormatSpecifier &FS, 7091 const analyze_format_string::ConversionSpecifier &CS, 7092 const char *startSpecifier, unsigned specifierLen, 7093 unsigned DiagID); 7094 7095 void HandleNonStandardLengthModifier( 7096 const analyze_format_string::FormatSpecifier &FS, 7097 const char *startSpecifier, unsigned specifierLen); 7098 7099 void HandleNonStandardConversionSpecifier( 7100 const analyze_format_string::ConversionSpecifier &CS, 7101 const char *startSpecifier, unsigned specifierLen); 7102 7103 void HandlePosition(const char *startPos, unsigned posLen) override; 7104 7105 void HandleInvalidPosition(const char *startSpecifier, 7106 unsigned specifierLen, 7107 analyze_format_string::PositionContext p) override; 7108 7109 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 7110 7111 void HandleNullChar(const char *nullCharacter) override; 7112 7113 template <typename Range> 7114 static void 7115 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 7116 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 7117 bool IsStringLocation, Range StringRange, 7118 ArrayRef<FixItHint> Fixit = None); 7119 7120 protected: 7121 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 7122 const char *startSpec, 7123 unsigned specifierLen, 7124 const char *csStart, unsigned csLen); 7125 7126 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 7127 const char *startSpec, 7128 unsigned specifierLen); 7129 7130 SourceRange getFormatStringRange(); 7131 CharSourceRange getSpecifierRange(const char *startSpecifier, 7132 unsigned specifierLen); 7133 SourceLocation getLocationOfByte(const char *x); 7134 7135 const Expr *getDataArg(unsigned i) const; 7136 7137 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 7138 const analyze_format_string::ConversionSpecifier &CS, 7139 const char *startSpecifier, unsigned specifierLen, 7140 unsigned argIndex); 7141 7142 template <typename Range> 7143 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 7144 bool IsStringLocation, Range StringRange, 7145 ArrayRef<FixItHint> Fixit = None); 7146 }; 7147 7148 } // namespace 7149 7150 SourceRange CheckFormatHandler::getFormatStringRange() { 7151 return OrigFormatExpr->getSourceRange(); 7152 } 7153 7154 CharSourceRange CheckFormatHandler:: 7155 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 7156 SourceLocation Start = getLocationOfByte(startSpecifier); 7157 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 7158 7159 // Advance the end SourceLocation by one due to half-open ranges. 7160 End = End.getLocWithOffset(1); 7161 7162 return CharSourceRange::getCharRange(Start, End); 7163 } 7164 7165 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 7166 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 7167 S.getLangOpts(), S.Context.getTargetInfo()); 7168 } 7169 7170 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 7171 unsigned specifierLen){ 7172 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 7173 getLocationOfByte(startSpecifier), 7174 /*IsStringLocation*/true, 7175 getSpecifierRange(startSpecifier, specifierLen)); 7176 } 7177 7178 void CheckFormatHandler::HandleInvalidLengthModifier( 7179 const analyze_format_string::FormatSpecifier &FS, 7180 const analyze_format_string::ConversionSpecifier &CS, 7181 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 7182 using namespace analyze_format_string; 7183 7184 const LengthModifier &LM = FS.getLengthModifier(); 7185 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 7186 7187 // See if we know how to fix this length modifier. 7188 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 7189 if (FixedLM) { 7190 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 7191 getLocationOfByte(LM.getStart()), 7192 /*IsStringLocation*/true, 7193 getSpecifierRange(startSpecifier, specifierLen)); 7194 7195 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 7196 << FixedLM->toString() 7197 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 7198 7199 } else { 7200 FixItHint Hint; 7201 if (DiagID == diag::warn_format_nonsensical_length) 7202 Hint = FixItHint::CreateRemoval(LMRange); 7203 7204 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 7205 getLocationOfByte(LM.getStart()), 7206 /*IsStringLocation*/true, 7207 getSpecifierRange(startSpecifier, specifierLen), 7208 Hint); 7209 } 7210 } 7211 7212 void CheckFormatHandler::HandleNonStandardLengthModifier( 7213 const analyze_format_string::FormatSpecifier &FS, 7214 const char *startSpecifier, unsigned specifierLen) { 7215 using namespace analyze_format_string; 7216 7217 const LengthModifier &LM = FS.getLengthModifier(); 7218 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 7219 7220 // See if we know how to fix this length modifier. 7221 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 7222 if (FixedLM) { 7223 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7224 << LM.toString() << 0, 7225 getLocationOfByte(LM.getStart()), 7226 /*IsStringLocation*/true, 7227 getSpecifierRange(startSpecifier, specifierLen)); 7228 7229 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 7230 << FixedLM->toString() 7231 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 7232 7233 } else { 7234 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7235 << LM.toString() << 0, 7236 getLocationOfByte(LM.getStart()), 7237 /*IsStringLocation*/true, 7238 getSpecifierRange(startSpecifier, specifierLen)); 7239 } 7240 } 7241 7242 void CheckFormatHandler::HandleNonStandardConversionSpecifier( 7243 const analyze_format_string::ConversionSpecifier &CS, 7244 const char *startSpecifier, unsigned specifierLen) { 7245 using namespace analyze_format_string; 7246 7247 // See if we know how to fix this conversion specifier. 7248 Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 7249 if (FixedCS) { 7250 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7251 << CS.toString() << /*conversion specifier*/1, 7252 getLocationOfByte(CS.getStart()), 7253 /*IsStringLocation*/true, 7254 getSpecifierRange(startSpecifier, specifierLen)); 7255 7256 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 7257 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 7258 << FixedCS->toString() 7259 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 7260 } else { 7261 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7262 << CS.toString() << /*conversion specifier*/1, 7263 getLocationOfByte(CS.getStart()), 7264 /*IsStringLocation*/true, 7265 getSpecifierRange(startSpecifier, specifierLen)); 7266 } 7267 } 7268 7269 void CheckFormatHandler::HandlePosition(const char *startPos, 7270 unsigned posLen) { 7271 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 7272 getLocationOfByte(startPos), 7273 /*IsStringLocation*/true, 7274 getSpecifierRange(startPos, posLen)); 7275 } 7276 7277 void 7278 CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, 7279 analyze_format_string::PositionContext p) { 7280 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) 7281 << (unsigned) p, 7282 getLocationOfByte(startPos), /*IsStringLocation*/true, 7283 getSpecifierRange(startPos, posLen)); 7284 } 7285 7286 void CheckFormatHandler::HandleZeroPosition(const char *startPos, 7287 unsigned posLen) { 7288 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 7289 getLocationOfByte(startPos), 7290 /*IsStringLocation*/true, 7291 getSpecifierRange(startPos, posLen)); 7292 } 7293 7294 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 7295 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 7296 // The presence of a null character is likely an error. 7297 EmitFormatDiagnostic( 7298 S.PDiag(diag::warn_printf_format_string_contains_null_char), 7299 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 7300 getFormatStringRange()); 7301 } 7302 } 7303 7304 // Note that this may return NULL if there was an error parsing or building 7305 // one of the argument expressions. 7306 const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 7307 return Args[FirstDataArg + i]; 7308 } 7309 7310 void CheckFormatHandler::DoneProcessing() { 7311 // Does the number of data arguments exceed the number of 7312 // format conversions in the format string? 7313 if (!HasVAListArg) { 7314 // Find any arguments that weren't covered. 7315 CoveredArgs.flip(); 7316 signed notCoveredArg = CoveredArgs.find_first(); 7317 if (notCoveredArg >= 0) { 7318 assert((unsigned)notCoveredArg < NumDataArgs); 7319 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 7320 } else { 7321 UncoveredArg.setAllCovered(); 7322 } 7323 } 7324 } 7325 7326 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 7327 const Expr *ArgExpr) { 7328 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 && 7329 "Invalid state"); 7330 7331 if (!ArgExpr) 7332 return; 7333 7334 SourceLocation Loc = ArgExpr->getBeginLoc(); 7335 7336 if (S.getSourceManager().isInSystemMacro(Loc)) 7337 return; 7338 7339 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 7340 for (auto E : DiagnosticExprs) 7341 PDiag << E->getSourceRange(); 7342 7343 CheckFormatHandler::EmitFormatDiagnostic( 7344 S, IsFunctionCall, DiagnosticExprs[0], 7345 PDiag, Loc, /*IsStringLocation*/false, 7346 DiagnosticExprs[0]->getSourceRange()); 7347 } 7348 7349 bool 7350 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 7351 SourceLocation Loc, 7352 const char *startSpec, 7353 unsigned specifierLen, 7354 const char *csStart, 7355 unsigned csLen) { 7356 bool keepGoing = true; 7357 if (argIndex < NumDataArgs) { 7358 // Consider the argument coverered, even though the specifier doesn't 7359 // make sense. 7360 CoveredArgs.set(argIndex); 7361 } 7362 else { 7363 // If argIndex exceeds the number of data arguments we 7364 // don't issue a warning because that is just a cascade of warnings (and 7365 // they may have intended '%%' anyway). We don't want to continue processing 7366 // the format string after this point, however, as we will like just get 7367 // gibberish when trying to match arguments. 7368 keepGoing = false; 7369 } 7370 7371 StringRef Specifier(csStart, csLen); 7372 7373 // If the specifier in non-printable, it could be the first byte of a UTF-8 7374 // sequence. In that case, print the UTF-8 code point. If not, print the byte 7375 // hex value. 7376 std::string CodePointStr; 7377 if (!llvm::sys::locale::isPrint(*csStart)) { 7378 llvm::UTF32 CodePoint; 7379 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 7380 const llvm::UTF8 *E = 7381 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 7382 llvm::ConversionResult Result = 7383 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 7384 7385 if (Result != llvm::conversionOK) { 7386 unsigned char FirstChar = *csStart; 7387 CodePoint = (llvm::UTF32)FirstChar; 7388 } 7389 7390 llvm::raw_string_ostream OS(CodePointStr); 7391 if (CodePoint < 256) 7392 OS << "\\x" << llvm::format("%02x", CodePoint); 7393 else if (CodePoint <= 0xFFFF) 7394 OS << "\\u" << llvm::format("%04x", CodePoint); 7395 else 7396 OS << "\\U" << llvm::format("%08x", CodePoint); 7397 OS.flush(); 7398 Specifier = CodePointStr; 7399 } 7400 7401 EmitFormatDiagnostic( 7402 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 7403 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 7404 7405 return keepGoing; 7406 } 7407 7408 void 7409 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 7410 const char *startSpec, 7411 unsigned specifierLen) { 7412 EmitFormatDiagnostic( 7413 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 7414 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 7415 } 7416 7417 bool 7418 CheckFormatHandler::CheckNumArgs( 7419 const analyze_format_string::FormatSpecifier &FS, 7420 const analyze_format_string::ConversionSpecifier &CS, 7421 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 7422 7423 if (argIndex >= NumDataArgs) { 7424 PartialDiagnostic PDiag = FS.usesPositionalArg() 7425 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 7426 << (argIndex+1) << NumDataArgs) 7427 : S.PDiag(diag::warn_printf_insufficient_data_args); 7428 EmitFormatDiagnostic( 7429 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 7430 getSpecifierRange(startSpecifier, specifierLen)); 7431 7432 // Since more arguments than conversion tokens are given, by extension 7433 // all arguments are covered, so mark this as so. 7434 UncoveredArg.setAllCovered(); 7435 return false; 7436 } 7437 return true; 7438 } 7439 7440 template<typename Range> 7441 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 7442 SourceLocation Loc, 7443 bool IsStringLocation, 7444 Range StringRange, 7445 ArrayRef<FixItHint> FixIt) { 7446 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 7447 Loc, IsStringLocation, StringRange, FixIt); 7448 } 7449 7450 /// If the format string is not within the function call, emit a note 7451 /// so that the function call and string are in diagnostic messages. 7452 /// 7453 /// \param InFunctionCall if true, the format string is within the function 7454 /// call and only one diagnostic message will be produced. Otherwise, an 7455 /// extra note will be emitted pointing to location of the format string. 7456 /// 7457 /// \param ArgumentExpr the expression that is passed as the format string 7458 /// argument in the function call. Used for getting locations when two 7459 /// diagnostics are emitted. 7460 /// 7461 /// \param PDiag the callee should already have provided any strings for the 7462 /// diagnostic message. This function only adds locations and fixits 7463 /// to diagnostics. 7464 /// 7465 /// \param Loc primary location for diagnostic. If two diagnostics are 7466 /// required, one will be at Loc and a new SourceLocation will be created for 7467 /// the other one. 7468 /// 7469 /// \param IsStringLocation if true, Loc points to the format string should be 7470 /// used for the note. Otherwise, Loc points to the argument list and will 7471 /// be used with PDiag. 7472 /// 7473 /// \param StringRange some or all of the string to highlight. This is 7474 /// templated so it can accept either a CharSourceRange or a SourceRange. 7475 /// 7476 /// \param FixIt optional fix it hint for the format string. 7477 template <typename Range> 7478 void CheckFormatHandler::EmitFormatDiagnostic( 7479 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 7480 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 7481 Range StringRange, ArrayRef<FixItHint> FixIt) { 7482 if (InFunctionCall) { 7483 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 7484 D << StringRange; 7485 D << FixIt; 7486 } else { 7487 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 7488 << ArgumentExpr->getSourceRange(); 7489 7490 const Sema::SemaDiagnosticBuilder &Note = 7491 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 7492 diag::note_format_string_defined); 7493 7494 Note << StringRange; 7495 Note << FixIt; 7496 } 7497 } 7498 7499 //===--- CHECK: Printf format string checking ------------------------------===// 7500 7501 namespace { 7502 7503 class CheckPrintfHandler : public CheckFormatHandler { 7504 public: 7505 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 7506 const Expr *origFormatExpr, 7507 const Sema::FormatStringType type, unsigned firstDataArg, 7508 unsigned numDataArgs, bool isObjC, const char *beg, 7509 bool hasVAListArg, ArrayRef<const Expr *> Args, 7510 unsigned formatIdx, bool inFunctionCall, 7511 Sema::VariadicCallType CallType, 7512 llvm::SmallBitVector &CheckedVarArgs, 7513 UncoveredArgHandler &UncoveredArg) 7514 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 7515 numDataArgs, beg, hasVAListArg, Args, formatIdx, 7516 inFunctionCall, CallType, CheckedVarArgs, 7517 UncoveredArg) {} 7518 7519 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 7520 7521 /// Returns true if '%@' specifiers are allowed in the format string. 7522 bool allowsObjCArg() const { 7523 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 7524 FSType == Sema::FST_OSTrace; 7525 } 7526 7527 bool HandleInvalidPrintfConversionSpecifier( 7528 const analyze_printf::PrintfSpecifier &FS, 7529 const char *startSpecifier, 7530 unsigned specifierLen) override; 7531 7532 void handleInvalidMaskType(StringRef MaskType) override; 7533 7534 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 7535 const char *startSpecifier, 7536 unsigned specifierLen) override; 7537 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 7538 const char *StartSpecifier, 7539 unsigned SpecifierLen, 7540 const Expr *E); 7541 7542 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 7543 const char *startSpecifier, unsigned specifierLen); 7544 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 7545 const analyze_printf::OptionalAmount &Amt, 7546 unsigned type, 7547 const char *startSpecifier, unsigned specifierLen); 7548 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 7549 const analyze_printf::OptionalFlag &flag, 7550 const char *startSpecifier, unsigned specifierLen); 7551 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 7552 const analyze_printf::OptionalFlag &ignoredFlag, 7553 const analyze_printf::OptionalFlag &flag, 7554 const char *startSpecifier, unsigned specifierLen); 7555 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 7556 const Expr *E); 7557 7558 void HandleEmptyObjCModifierFlag(const char *startFlag, 7559 unsigned flagLen) override; 7560 7561 void HandleInvalidObjCModifierFlag(const char *startFlag, 7562 unsigned flagLen) override; 7563 7564 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 7565 const char *flagsEnd, 7566 const char *conversionPosition) 7567 override; 7568 }; 7569 7570 } // namespace 7571 7572 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 7573 const analyze_printf::PrintfSpecifier &FS, 7574 const char *startSpecifier, 7575 unsigned specifierLen) { 7576 const analyze_printf::PrintfConversionSpecifier &CS = 7577 FS.getConversionSpecifier(); 7578 7579 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 7580 getLocationOfByte(CS.getStart()), 7581 startSpecifier, specifierLen, 7582 CS.getStart(), CS.getLength()); 7583 } 7584 7585 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 7586 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 7587 } 7588 7589 bool CheckPrintfHandler::HandleAmount( 7590 const analyze_format_string::OptionalAmount &Amt, 7591 unsigned k, const char *startSpecifier, 7592 unsigned specifierLen) { 7593 if (Amt.hasDataArgument()) { 7594 if (!HasVAListArg) { 7595 unsigned argIndex = Amt.getArgIndex(); 7596 if (argIndex >= NumDataArgs) { 7597 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 7598 << k, 7599 getLocationOfByte(Amt.getStart()), 7600 /*IsStringLocation*/true, 7601 getSpecifierRange(startSpecifier, specifierLen)); 7602 // Don't do any more checking. We will just emit 7603 // spurious errors. 7604 return false; 7605 } 7606 7607 // Type check the data argument. It should be an 'int'. 7608 // Although not in conformance with C99, we also allow the argument to be 7609 // an 'unsigned int' as that is a reasonably safe case. GCC also 7610 // doesn't emit a warning for that case. 7611 CoveredArgs.set(argIndex); 7612 const Expr *Arg = getDataArg(argIndex); 7613 if (!Arg) 7614 return false; 7615 7616 QualType T = Arg->getType(); 7617 7618 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 7619 assert(AT.isValid()); 7620 7621 if (!AT.matchesType(S.Context, T)) { 7622 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 7623 << k << AT.getRepresentativeTypeName(S.Context) 7624 << T << Arg->getSourceRange(), 7625 getLocationOfByte(Amt.getStart()), 7626 /*IsStringLocation*/true, 7627 getSpecifierRange(startSpecifier, specifierLen)); 7628 // Don't do any more checking. We will just emit 7629 // spurious errors. 7630 return false; 7631 } 7632 } 7633 } 7634 return true; 7635 } 7636 7637 void CheckPrintfHandler::HandleInvalidAmount( 7638 const analyze_printf::PrintfSpecifier &FS, 7639 const analyze_printf::OptionalAmount &Amt, 7640 unsigned type, 7641 const char *startSpecifier, 7642 unsigned specifierLen) { 7643 const analyze_printf::PrintfConversionSpecifier &CS = 7644 FS.getConversionSpecifier(); 7645 7646 FixItHint fixit = 7647 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 7648 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 7649 Amt.getConstantLength())) 7650 : FixItHint(); 7651 7652 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 7653 << type << CS.toString(), 7654 getLocationOfByte(Amt.getStart()), 7655 /*IsStringLocation*/true, 7656 getSpecifierRange(startSpecifier, specifierLen), 7657 fixit); 7658 } 7659 7660 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 7661 const analyze_printf::OptionalFlag &flag, 7662 const char *startSpecifier, 7663 unsigned specifierLen) { 7664 // Warn about pointless flag with a fixit removal. 7665 const analyze_printf::PrintfConversionSpecifier &CS = 7666 FS.getConversionSpecifier(); 7667 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 7668 << flag.toString() << CS.toString(), 7669 getLocationOfByte(flag.getPosition()), 7670 /*IsStringLocation*/true, 7671 getSpecifierRange(startSpecifier, specifierLen), 7672 FixItHint::CreateRemoval( 7673 getSpecifierRange(flag.getPosition(), 1))); 7674 } 7675 7676 void CheckPrintfHandler::HandleIgnoredFlag( 7677 const analyze_printf::PrintfSpecifier &FS, 7678 const analyze_printf::OptionalFlag &ignoredFlag, 7679 const analyze_printf::OptionalFlag &flag, 7680 const char *startSpecifier, 7681 unsigned specifierLen) { 7682 // Warn about ignored flag with a fixit removal. 7683 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 7684 << ignoredFlag.toString() << flag.toString(), 7685 getLocationOfByte(ignoredFlag.getPosition()), 7686 /*IsStringLocation*/true, 7687 getSpecifierRange(startSpecifier, specifierLen), 7688 FixItHint::CreateRemoval( 7689 getSpecifierRange(ignoredFlag.getPosition(), 1))); 7690 } 7691 7692 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 7693 unsigned flagLen) { 7694 // Warn about an empty flag. 7695 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 7696 getLocationOfByte(startFlag), 7697 /*IsStringLocation*/true, 7698 getSpecifierRange(startFlag, flagLen)); 7699 } 7700 7701 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 7702 unsigned flagLen) { 7703 // Warn about an invalid flag. 7704 auto Range = getSpecifierRange(startFlag, flagLen); 7705 StringRef flag(startFlag, flagLen); 7706 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 7707 getLocationOfByte(startFlag), 7708 /*IsStringLocation*/true, 7709 Range, FixItHint::CreateRemoval(Range)); 7710 } 7711 7712 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 7713 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 7714 // Warn about using '[...]' without a '@' conversion. 7715 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 7716 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 7717 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 7718 getLocationOfByte(conversionPosition), 7719 /*IsStringLocation*/true, 7720 Range, FixItHint::CreateRemoval(Range)); 7721 } 7722 7723 // Determines if the specified is a C++ class or struct containing 7724 // a member with the specified name and kind (e.g. a CXXMethodDecl named 7725 // "c_str()"). 7726 template<typename MemberKind> 7727 static llvm::SmallPtrSet<MemberKind*, 1> 7728 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 7729 const RecordType *RT = Ty->getAs<RecordType>(); 7730 llvm::SmallPtrSet<MemberKind*, 1> Results; 7731 7732 if (!RT) 7733 return Results; 7734 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 7735 if (!RD || !RD->getDefinition()) 7736 return Results; 7737 7738 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 7739 Sema::LookupMemberName); 7740 R.suppressDiagnostics(); 7741 7742 // We just need to include all members of the right kind turned up by the 7743 // filter, at this point. 7744 if (S.LookupQualifiedName(R, RT->getDecl())) 7745 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 7746 NamedDecl *decl = (*I)->getUnderlyingDecl(); 7747 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 7748 Results.insert(FK); 7749 } 7750 return Results; 7751 } 7752 7753 /// Check if we could call '.c_str()' on an object. 7754 /// 7755 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 7756 /// allow the call, or if it would be ambiguous). 7757 bool Sema::hasCStrMethod(const Expr *E) { 7758 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 7759 7760 MethodSet Results = 7761 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 7762 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 7763 MI != ME; ++MI) 7764 if ((*MI)->getMinRequiredArguments() == 0) 7765 return true; 7766 return false; 7767 } 7768 7769 // Check if a (w)string was passed when a (w)char* was needed, and offer a 7770 // better diagnostic if so. AT is assumed to be valid. 7771 // Returns true when a c_str() conversion method is found. 7772 bool CheckPrintfHandler::checkForCStrMembers( 7773 const analyze_printf::ArgType &AT, const Expr *E) { 7774 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 7775 7776 MethodSet Results = 7777 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 7778 7779 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 7780 MI != ME; ++MI) { 7781 const CXXMethodDecl *Method = *MI; 7782 if (Method->getMinRequiredArguments() == 0 && 7783 AT.matchesType(S.Context, Method->getReturnType())) { 7784 // FIXME: Suggest parens if the expression needs them. 7785 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 7786 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 7787 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 7788 return true; 7789 } 7790 } 7791 7792 return false; 7793 } 7794 7795 bool 7796 CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier 7797 &FS, 7798 const char *startSpecifier, 7799 unsigned specifierLen) { 7800 using namespace analyze_format_string; 7801 using namespace analyze_printf; 7802 7803 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 7804 7805 if (FS.consumesDataArgument()) { 7806 if (atFirstArg) { 7807 atFirstArg = false; 7808 usesPositionalArgs = FS.usesPositionalArg(); 7809 } 7810 else if (usesPositionalArgs != FS.usesPositionalArg()) { 7811 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 7812 startSpecifier, specifierLen); 7813 return false; 7814 } 7815 } 7816 7817 // First check if the field width, precision, and conversion specifier 7818 // have matching data arguments. 7819 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 7820 startSpecifier, specifierLen)) { 7821 return false; 7822 } 7823 7824 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 7825 startSpecifier, specifierLen)) { 7826 return false; 7827 } 7828 7829 if (!CS.consumesDataArgument()) { 7830 // FIXME: Technically specifying a precision or field width here 7831 // makes no sense. Worth issuing a warning at some point. 7832 return true; 7833 } 7834 7835 // Consume the argument. 7836 unsigned argIndex = FS.getArgIndex(); 7837 if (argIndex < NumDataArgs) { 7838 // The check to see if the argIndex is valid will come later. 7839 // We set the bit here because we may exit early from this 7840 // function if we encounter some other error. 7841 CoveredArgs.set(argIndex); 7842 } 7843 7844 // FreeBSD kernel extensions. 7845 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 7846 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 7847 // We need at least two arguments. 7848 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 7849 return false; 7850 7851 // Claim the second argument. 7852 CoveredArgs.set(argIndex + 1); 7853 7854 // Type check the first argument (int for %b, pointer for %D) 7855 const Expr *Ex = getDataArg(argIndex); 7856 const analyze_printf::ArgType &AT = 7857 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? 7858 ArgType(S.Context.IntTy) : ArgType::CPointerTy; 7859 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 7860 EmitFormatDiagnostic( 7861 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 7862 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 7863 << false << Ex->getSourceRange(), 7864 Ex->getBeginLoc(), /*IsStringLocation*/ false, 7865 getSpecifierRange(startSpecifier, specifierLen)); 7866 7867 // Type check the second argument (char * for both %b and %D) 7868 Ex = getDataArg(argIndex + 1); 7869 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 7870 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 7871 EmitFormatDiagnostic( 7872 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 7873 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 7874 << false << Ex->getSourceRange(), 7875 Ex->getBeginLoc(), /*IsStringLocation*/ false, 7876 getSpecifierRange(startSpecifier, specifierLen)); 7877 7878 return true; 7879 } 7880 7881 // Check for using an Objective-C specific conversion specifier 7882 // in a non-ObjC literal. 7883 if (!allowsObjCArg() && CS.isObjCArg()) { 7884 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 7885 specifierLen); 7886 } 7887 7888 // %P can only be used with os_log. 7889 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 7890 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 7891 specifierLen); 7892 } 7893 7894 // %n is not allowed with os_log. 7895 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 7896 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 7897 getLocationOfByte(CS.getStart()), 7898 /*IsStringLocation*/ false, 7899 getSpecifierRange(startSpecifier, specifierLen)); 7900 7901 return true; 7902 } 7903 7904 // Only scalars are allowed for os_trace. 7905 if (FSType == Sema::FST_OSTrace && 7906 (CS.getKind() == ConversionSpecifier::PArg || 7907 CS.getKind() == ConversionSpecifier::sArg || 7908 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 7909 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 7910 specifierLen); 7911 } 7912 7913 // Check for use of public/private annotation outside of os_log(). 7914 if (FSType != Sema::FST_OSLog) { 7915 if (FS.isPublic().isSet()) { 7916 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 7917 << "public", 7918 getLocationOfByte(FS.isPublic().getPosition()), 7919 /*IsStringLocation*/ false, 7920 getSpecifierRange(startSpecifier, specifierLen)); 7921 } 7922 if (FS.isPrivate().isSet()) { 7923 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 7924 << "private", 7925 getLocationOfByte(FS.isPrivate().getPosition()), 7926 /*IsStringLocation*/ false, 7927 getSpecifierRange(startSpecifier, specifierLen)); 7928 } 7929 } 7930 7931 // Check for invalid use of field width 7932 if (!FS.hasValidFieldWidth()) { 7933 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 7934 startSpecifier, specifierLen); 7935 } 7936 7937 // Check for invalid use of precision 7938 if (!FS.hasValidPrecision()) { 7939 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 7940 startSpecifier, specifierLen); 7941 } 7942 7943 // Precision is mandatory for %P specifier. 7944 if (CS.getKind() == ConversionSpecifier::PArg && 7945 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 7946 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 7947 getLocationOfByte(startSpecifier), 7948 /*IsStringLocation*/ false, 7949 getSpecifierRange(startSpecifier, specifierLen)); 7950 } 7951 7952 // Check each flag does not conflict with any other component. 7953 if (!FS.hasValidThousandsGroupingPrefix()) 7954 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 7955 if (!FS.hasValidLeadingZeros()) 7956 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 7957 if (!FS.hasValidPlusPrefix()) 7958 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 7959 if (!FS.hasValidSpacePrefix()) 7960 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 7961 if (!FS.hasValidAlternativeForm()) 7962 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 7963 if (!FS.hasValidLeftJustified()) 7964 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 7965 7966 // Check that flags are not ignored by another flag 7967 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 7968 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 7969 startSpecifier, specifierLen); 7970 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 7971 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 7972 startSpecifier, specifierLen); 7973 7974 // Check the length modifier is valid with the given conversion specifier. 7975 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 7976 S.getLangOpts())) 7977 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 7978 diag::warn_format_nonsensical_length); 7979 else if (!FS.hasStandardLengthModifier()) 7980 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 7981 else if (!FS.hasStandardLengthConversionCombination()) 7982 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 7983 diag::warn_format_non_standard_conversion_spec); 7984 7985 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 7986 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 7987 7988 // The remaining checks depend on the data arguments. 7989 if (HasVAListArg) 7990 return true; 7991 7992 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 7993 return false; 7994 7995 const Expr *Arg = getDataArg(argIndex); 7996 if (!Arg) 7997 return true; 7998 7999 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 8000 } 8001 8002 static bool requiresParensToAddCast(const Expr *E) { 8003 // FIXME: We should have a general way to reason about operator 8004 // precedence and whether parens are actually needed here. 8005 // Take care of a few common cases where they aren't. 8006 const Expr *Inside = E->IgnoreImpCasts(); 8007 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 8008 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 8009 8010 switch (Inside->getStmtClass()) { 8011 case Stmt::ArraySubscriptExprClass: 8012 case Stmt::CallExprClass: 8013 case Stmt::CharacterLiteralClass: 8014 case Stmt::CXXBoolLiteralExprClass: 8015 case Stmt::DeclRefExprClass: 8016 case Stmt::FloatingLiteralClass: 8017 case Stmt::IntegerLiteralClass: 8018 case Stmt::MemberExprClass: 8019 case Stmt::ObjCArrayLiteralClass: 8020 case Stmt::ObjCBoolLiteralExprClass: 8021 case Stmt::ObjCBoxedExprClass: 8022 case Stmt::ObjCDictionaryLiteralClass: 8023 case Stmt::ObjCEncodeExprClass: 8024 case Stmt::ObjCIvarRefExprClass: 8025 case Stmt::ObjCMessageExprClass: 8026 case Stmt::ObjCPropertyRefExprClass: 8027 case Stmt::ObjCStringLiteralClass: 8028 case Stmt::ObjCSubscriptRefExprClass: 8029 case Stmt::ParenExprClass: 8030 case Stmt::StringLiteralClass: 8031 case Stmt::UnaryOperatorClass: 8032 return false; 8033 default: 8034 return true; 8035 } 8036 } 8037 8038 static std::pair<QualType, StringRef> 8039 shouldNotPrintDirectly(const ASTContext &Context, 8040 QualType IntendedTy, 8041 const Expr *E) { 8042 // Use a 'while' to peel off layers of typedefs. 8043 QualType TyTy = IntendedTy; 8044 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 8045 StringRef Name = UserTy->getDecl()->getName(); 8046 QualType CastTy = llvm::StringSwitch<QualType>(Name) 8047 .Case("CFIndex", Context.getNSIntegerType()) 8048 .Case("NSInteger", Context.getNSIntegerType()) 8049 .Case("NSUInteger", Context.getNSUIntegerType()) 8050 .Case("SInt32", Context.IntTy) 8051 .Case("UInt32", Context.UnsignedIntTy) 8052 .Default(QualType()); 8053 8054 if (!CastTy.isNull()) 8055 return std::make_pair(CastTy, Name); 8056 8057 TyTy = UserTy->desugar(); 8058 } 8059 8060 // Strip parens if necessary. 8061 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 8062 return shouldNotPrintDirectly(Context, 8063 PE->getSubExpr()->getType(), 8064 PE->getSubExpr()); 8065 8066 // If this is a conditional expression, then its result type is constructed 8067 // via usual arithmetic conversions and thus there might be no necessary 8068 // typedef sugar there. Recurse to operands to check for NSInteger & 8069 // Co. usage condition. 8070 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 8071 QualType TrueTy, FalseTy; 8072 StringRef TrueName, FalseName; 8073 8074 std::tie(TrueTy, TrueName) = 8075 shouldNotPrintDirectly(Context, 8076 CO->getTrueExpr()->getType(), 8077 CO->getTrueExpr()); 8078 std::tie(FalseTy, FalseName) = 8079 shouldNotPrintDirectly(Context, 8080 CO->getFalseExpr()->getType(), 8081 CO->getFalseExpr()); 8082 8083 if (TrueTy == FalseTy) 8084 return std::make_pair(TrueTy, TrueName); 8085 else if (TrueTy.isNull()) 8086 return std::make_pair(FalseTy, FalseName); 8087 else if (FalseTy.isNull()) 8088 return std::make_pair(TrueTy, TrueName); 8089 } 8090 8091 return std::make_pair(QualType(), StringRef()); 8092 } 8093 8094 /// Return true if \p ICE is an implicit argument promotion of an arithmetic 8095 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked 8096 /// type do not count. 8097 static bool 8098 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { 8099 QualType From = ICE->getSubExpr()->getType(); 8100 QualType To = ICE->getType(); 8101 // It's an integer promotion if the destination type is the promoted 8102 // source type. 8103 if (ICE->getCastKind() == CK_IntegralCast && 8104 From->isPromotableIntegerType() && 8105 S.Context.getPromotedIntegerType(From) == To) 8106 return true; 8107 // Look through vector types, since we do default argument promotion for 8108 // those in OpenCL. 8109 if (const auto *VecTy = From->getAs<ExtVectorType>()) 8110 From = VecTy->getElementType(); 8111 if (const auto *VecTy = To->getAs<ExtVectorType>()) 8112 To = VecTy->getElementType(); 8113 // It's a floating promotion if the source type is a lower rank. 8114 return ICE->getCastKind() == CK_FloatingCast && 8115 S.Context.getFloatingTypeOrder(From, To) < 0; 8116 } 8117 8118 bool 8119 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 8120 const char *StartSpecifier, 8121 unsigned SpecifierLen, 8122 const Expr *E) { 8123 using namespace analyze_format_string; 8124 using namespace analyze_printf; 8125 8126 // Now type check the data expression that matches the 8127 // format specifier. 8128 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 8129 if (!AT.isValid()) 8130 return true; 8131 8132 QualType ExprTy = E->getType(); 8133 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 8134 ExprTy = TET->getUnderlyingExpr()->getType(); 8135 } 8136 8137 // Diagnose attempts to print a boolean value as a character. Unlike other 8138 // -Wformat diagnostics, this is fine from a type perspective, but it still 8139 // doesn't make sense. 8140 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg && 8141 E->isKnownToHaveBooleanValue()) { 8142 const CharSourceRange &CSR = 8143 getSpecifierRange(StartSpecifier, SpecifierLen); 8144 SmallString<4> FSString; 8145 llvm::raw_svector_ostream os(FSString); 8146 FS.toString(os); 8147 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character) 8148 << FSString, 8149 E->getExprLoc(), false, CSR); 8150 return true; 8151 } 8152 8153 analyze_printf::ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy); 8154 if (Match == analyze_printf::ArgType::Match) 8155 return true; 8156 8157 // Look through argument promotions for our error message's reported type. 8158 // This includes the integral and floating promotions, but excludes array 8159 // and function pointer decay (seeing that an argument intended to be a 8160 // string has type 'char [6]' is probably more confusing than 'char *') and 8161 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type). 8162 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 8163 if (isArithmeticArgumentPromotion(S, ICE)) { 8164 E = ICE->getSubExpr(); 8165 ExprTy = E->getType(); 8166 8167 // Check if we didn't match because of an implicit cast from a 'char' 8168 // or 'short' to an 'int'. This is done because printf is a varargs 8169 // function. 8170 if (ICE->getType() == S.Context.IntTy || 8171 ICE->getType() == S.Context.UnsignedIntTy) { 8172 // All further checking is done on the subexpression 8173 const analyze_printf::ArgType::MatchKind ImplicitMatch = 8174 AT.matchesType(S.Context, ExprTy); 8175 if (ImplicitMatch == analyze_printf::ArgType::Match) 8176 return true; 8177 if (ImplicitMatch == ArgType::NoMatchPedantic || 8178 ImplicitMatch == ArgType::NoMatchTypeConfusion) 8179 Match = ImplicitMatch; 8180 } 8181 } 8182 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 8183 // Special case for 'a', which has type 'int' in C. 8184 // Note, however, that we do /not/ want to treat multibyte constants like 8185 // 'MooV' as characters! This form is deprecated but still exists. 8186 if (ExprTy == S.Context.IntTy) 8187 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) 8188 ExprTy = S.Context.CharTy; 8189 } 8190 8191 // Look through enums to their underlying type. 8192 bool IsEnum = false; 8193 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 8194 ExprTy = EnumTy->getDecl()->getIntegerType(); 8195 IsEnum = true; 8196 } 8197 8198 // %C in an Objective-C context prints a unichar, not a wchar_t. 8199 // If the argument is an integer of some kind, believe the %C and suggest 8200 // a cast instead of changing the conversion specifier. 8201 QualType IntendedTy = ExprTy; 8202 if (isObjCContext() && 8203 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 8204 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 8205 !ExprTy->isCharType()) { 8206 // 'unichar' is defined as a typedef of unsigned short, but we should 8207 // prefer using the typedef if it is visible. 8208 IntendedTy = S.Context.UnsignedShortTy; 8209 8210 // While we are here, check if the value is an IntegerLiteral that happens 8211 // to be within the valid range. 8212 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 8213 const llvm::APInt &V = IL->getValue(); 8214 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 8215 return true; 8216 } 8217 8218 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 8219 Sema::LookupOrdinaryName); 8220 if (S.LookupName(Result, S.getCurScope())) { 8221 NamedDecl *ND = Result.getFoundDecl(); 8222 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 8223 if (TD->getUnderlyingType() == IntendedTy) 8224 IntendedTy = S.Context.getTypedefType(TD); 8225 } 8226 } 8227 } 8228 8229 // Special-case some of Darwin's platform-independence types by suggesting 8230 // casts to primitive types that are known to be large enough. 8231 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 8232 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 8233 QualType CastTy; 8234 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 8235 if (!CastTy.isNull()) { 8236 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 8237 // (long in ASTContext). Only complain to pedants. 8238 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 8239 (AT.isSizeT() || AT.isPtrdiffT()) && 8240 AT.matchesType(S.Context, CastTy)) 8241 Match = ArgType::NoMatchPedantic; 8242 IntendedTy = CastTy; 8243 ShouldNotPrintDirectly = true; 8244 } 8245 } 8246 8247 // We may be able to offer a FixItHint if it is a supported type. 8248 PrintfSpecifier fixedFS = FS; 8249 bool Success = 8250 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 8251 8252 if (Success) { 8253 // Get the fix string from the fixed format specifier 8254 SmallString<16> buf; 8255 llvm::raw_svector_ostream os(buf); 8256 fixedFS.toString(os); 8257 8258 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 8259 8260 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) { 8261 unsigned Diag; 8262 switch (Match) { 8263 case ArgType::Match: llvm_unreachable("expected non-matching"); 8264 case ArgType::NoMatchPedantic: 8265 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 8266 break; 8267 case ArgType::NoMatchTypeConfusion: 8268 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 8269 break; 8270 case ArgType::NoMatch: 8271 Diag = diag::warn_format_conversion_argument_type_mismatch; 8272 break; 8273 } 8274 8275 // In this case, the specifier is wrong and should be changed to match 8276 // the argument. 8277 EmitFormatDiagnostic(S.PDiag(Diag) 8278 << AT.getRepresentativeTypeName(S.Context) 8279 << IntendedTy << IsEnum << E->getSourceRange(), 8280 E->getBeginLoc(), 8281 /*IsStringLocation*/ false, SpecRange, 8282 FixItHint::CreateReplacement(SpecRange, os.str())); 8283 } else { 8284 // The canonical type for formatting this value is different from the 8285 // actual type of the expression. (This occurs, for example, with Darwin's 8286 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 8287 // should be printed as 'long' for 64-bit compatibility.) 8288 // Rather than emitting a normal format/argument mismatch, we want to 8289 // add a cast to the recommended type (and correct the format string 8290 // if necessary). 8291 SmallString<16> CastBuf; 8292 llvm::raw_svector_ostream CastFix(CastBuf); 8293 CastFix << "("; 8294 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 8295 CastFix << ")"; 8296 8297 SmallVector<FixItHint,4> Hints; 8298 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly) 8299 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 8300 8301 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 8302 // If there's already a cast present, just replace it. 8303 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 8304 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 8305 8306 } else if (!requiresParensToAddCast(E)) { 8307 // If the expression has high enough precedence, 8308 // just write the C-style cast. 8309 Hints.push_back( 8310 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 8311 } else { 8312 // Otherwise, add parens around the expression as well as the cast. 8313 CastFix << "("; 8314 Hints.push_back( 8315 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 8316 8317 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc()); 8318 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 8319 } 8320 8321 if (ShouldNotPrintDirectly) { 8322 // The expression has a type that should not be printed directly. 8323 // We extract the name from the typedef because we don't want to show 8324 // the underlying type in the diagnostic. 8325 StringRef Name; 8326 if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy)) 8327 Name = TypedefTy->getDecl()->getName(); 8328 else 8329 Name = CastTyName; 8330 unsigned Diag = Match == ArgType::NoMatchPedantic 8331 ? diag::warn_format_argument_needs_cast_pedantic 8332 : diag::warn_format_argument_needs_cast; 8333 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 8334 << E->getSourceRange(), 8335 E->getBeginLoc(), /*IsStringLocation=*/false, 8336 SpecRange, Hints); 8337 } else { 8338 // In this case, the expression could be printed using a different 8339 // specifier, but we've decided that the specifier is probably correct 8340 // and we should cast instead. Just use the normal warning message. 8341 EmitFormatDiagnostic( 8342 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 8343 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 8344 << E->getSourceRange(), 8345 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 8346 } 8347 } 8348 } else { 8349 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 8350 SpecifierLen); 8351 // Since the warning for passing non-POD types to variadic functions 8352 // was deferred until now, we emit a warning for non-POD 8353 // arguments here. 8354 switch (S.isValidVarArgType(ExprTy)) { 8355 case Sema::VAK_Valid: 8356 case Sema::VAK_ValidInCXX11: { 8357 unsigned Diag; 8358 switch (Match) { 8359 case ArgType::Match: llvm_unreachable("expected non-matching"); 8360 case ArgType::NoMatchPedantic: 8361 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 8362 break; 8363 case ArgType::NoMatchTypeConfusion: 8364 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 8365 break; 8366 case ArgType::NoMatch: 8367 Diag = diag::warn_format_conversion_argument_type_mismatch; 8368 break; 8369 } 8370 8371 EmitFormatDiagnostic( 8372 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 8373 << IsEnum << CSR << E->getSourceRange(), 8374 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 8375 break; 8376 } 8377 case Sema::VAK_Undefined: 8378 case Sema::VAK_MSVCUndefined: 8379 EmitFormatDiagnostic(S.PDiag(diag::warn_non_pod_vararg_with_format_string) 8380 << S.getLangOpts().CPlusPlus11 << ExprTy 8381 << CallType 8382 << AT.getRepresentativeTypeName(S.Context) << CSR 8383 << E->getSourceRange(), 8384 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 8385 checkForCStrMembers(AT, E); 8386 break; 8387 8388 case Sema::VAK_Invalid: 8389 if (ExprTy->isObjCObjectType()) 8390 EmitFormatDiagnostic( 8391 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 8392 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 8393 << AT.getRepresentativeTypeName(S.Context) << CSR 8394 << E->getSourceRange(), 8395 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 8396 else 8397 // FIXME: If this is an initializer list, suggest removing the braces 8398 // or inserting a cast to the target type. 8399 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 8400 << isa<InitListExpr>(E) << ExprTy << CallType 8401 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 8402 break; 8403 } 8404 8405 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 8406 "format string specifier index out of range"); 8407 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 8408 } 8409 8410 return true; 8411 } 8412 8413 //===--- CHECK: Scanf format string checking ------------------------------===// 8414 8415 namespace { 8416 8417 class CheckScanfHandler : public CheckFormatHandler { 8418 public: 8419 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 8420 const Expr *origFormatExpr, Sema::FormatStringType type, 8421 unsigned firstDataArg, unsigned numDataArgs, 8422 const char *beg, bool hasVAListArg, 8423 ArrayRef<const Expr *> Args, unsigned formatIdx, 8424 bool inFunctionCall, Sema::VariadicCallType CallType, 8425 llvm::SmallBitVector &CheckedVarArgs, 8426 UncoveredArgHandler &UncoveredArg) 8427 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 8428 numDataArgs, beg, hasVAListArg, Args, formatIdx, 8429 inFunctionCall, CallType, CheckedVarArgs, 8430 UncoveredArg) {} 8431 8432 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 8433 const char *startSpecifier, 8434 unsigned specifierLen) override; 8435 8436 bool HandleInvalidScanfConversionSpecifier( 8437 const analyze_scanf::ScanfSpecifier &FS, 8438 const char *startSpecifier, 8439 unsigned specifierLen) override; 8440 8441 void HandleIncompleteScanList(const char *start, const char *end) override; 8442 }; 8443 8444 } // namespace 8445 8446 void CheckScanfHandler::HandleIncompleteScanList(const char *start, 8447 const char *end) { 8448 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 8449 getLocationOfByte(end), /*IsStringLocation*/true, 8450 getSpecifierRange(start, end - start)); 8451 } 8452 8453 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 8454 const analyze_scanf::ScanfSpecifier &FS, 8455 const char *startSpecifier, 8456 unsigned specifierLen) { 8457 const analyze_scanf::ScanfConversionSpecifier &CS = 8458 FS.getConversionSpecifier(); 8459 8460 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 8461 getLocationOfByte(CS.getStart()), 8462 startSpecifier, specifierLen, 8463 CS.getStart(), CS.getLength()); 8464 } 8465 8466 bool CheckScanfHandler::HandleScanfSpecifier( 8467 const analyze_scanf::ScanfSpecifier &FS, 8468 const char *startSpecifier, 8469 unsigned specifierLen) { 8470 using namespace analyze_scanf; 8471 using namespace analyze_format_string; 8472 8473 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 8474 8475 // Handle case where '%' and '*' don't consume an argument. These shouldn't 8476 // be used to decide if we are using positional arguments consistently. 8477 if (FS.consumesDataArgument()) { 8478 if (atFirstArg) { 8479 atFirstArg = false; 8480 usesPositionalArgs = FS.usesPositionalArg(); 8481 } 8482 else if (usesPositionalArgs != FS.usesPositionalArg()) { 8483 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 8484 startSpecifier, specifierLen); 8485 return false; 8486 } 8487 } 8488 8489 // Check if the field with is non-zero. 8490 const OptionalAmount &Amt = FS.getFieldWidth(); 8491 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 8492 if (Amt.getConstantAmount() == 0) { 8493 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 8494 Amt.getConstantLength()); 8495 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 8496 getLocationOfByte(Amt.getStart()), 8497 /*IsStringLocation*/true, R, 8498 FixItHint::CreateRemoval(R)); 8499 } 8500 } 8501 8502 if (!FS.consumesDataArgument()) { 8503 // FIXME: Technically specifying a precision or field width here 8504 // makes no sense. Worth issuing a warning at some point. 8505 return true; 8506 } 8507 8508 // Consume the argument. 8509 unsigned argIndex = FS.getArgIndex(); 8510 if (argIndex < NumDataArgs) { 8511 // The check to see if the argIndex is valid will come later. 8512 // We set the bit here because we may exit early from this 8513 // function if we encounter some other error. 8514 CoveredArgs.set(argIndex); 8515 } 8516 8517 // Check the length modifier is valid with the given conversion specifier. 8518 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 8519 S.getLangOpts())) 8520 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8521 diag::warn_format_nonsensical_length); 8522 else if (!FS.hasStandardLengthModifier()) 8523 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 8524 else if (!FS.hasStandardLengthConversionCombination()) 8525 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8526 diag::warn_format_non_standard_conversion_spec); 8527 8528 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 8529 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 8530 8531 // The remaining checks depend on the data arguments. 8532 if (HasVAListArg) 8533 return true; 8534 8535 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 8536 return false; 8537 8538 // Check that the argument type matches the format specifier. 8539 const Expr *Ex = getDataArg(argIndex); 8540 if (!Ex) 8541 return true; 8542 8543 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 8544 8545 if (!AT.isValid()) { 8546 return true; 8547 } 8548 8549 analyze_format_string::ArgType::MatchKind Match = 8550 AT.matchesType(S.Context, Ex->getType()); 8551 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 8552 if (Match == analyze_format_string::ArgType::Match) 8553 return true; 8554 8555 ScanfSpecifier fixedFS = FS; 8556 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 8557 S.getLangOpts(), S.Context); 8558 8559 unsigned Diag = 8560 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 8561 : diag::warn_format_conversion_argument_type_mismatch; 8562 8563 if (Success) { 8564 // Get the fix string from the fixed format specifier. 8565 SmallString<128> buf; 8566 llvm::raw_svector_ostream os(buf); 8567 fixedFS.toString(os); 8568 8569 EmitFormatDiagnostic( 8570 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 8571 << Ex->getType() << false << Ex->getSourceRange(), 8572 Ex->getBeginLoc(), 8573 /*IsStringLocation*/ false, 8574 getSpecifierRange(startSpecifier, specifierLen), 8575 FixItHint::CreateReplacement( 8576 getSpecifierRange(startSpecifier, specifierLen), os.str())); 8577 } else { 8578 EmitFormatDiagnostic(S.PDiag(Diag) 8579 << AT.getRepresentativeTypeName(S.Context) 8580 << Ex->getType() << false << Ex->getSourceRange(), 8581 Ex->getBeginLoc(), 8582 /*IsStringLocation*/ false, 8583 getSpecifierRange(startSpecifier, specifierLen)); 8584 } 8585 8586 return true; 8587 } 8588 8589 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 8590 const Expr *OrigFormatExpr, 8591 ArrayRef<const Expr *> Args, 8592 bool HasVAListArg, unsigned format_idx, 8593 unsigned firstDataArg, 8594 Sema::FormatStringType Type, 8595 bool inFunctionCall, 8596 Sema::VariadicCallType CallType, 8597 llvm::SmallBitVector &CheckedVarArgs, 8598 UncoveredArgHandler &UncoveredArg, 8599 bool IgnoreStringsWithoutSpecifiers) { 8600 // CHECK: is the format string a wide literal? 8601 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 8602 CheckFormatHandler::EmitFormatDiagnostic( 8603 S, inFunctionCall, Args[format_idx], 8604 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 8605 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 8606 return; 8607 } 8608 8609 // Str - The format string. NOTE: this is NOT null-terminated! 8610 StringRef StrRef = FExpr->getString(); 8611 const char *Str = StrRef.data(); 8612 // Account for cases where the string literal is truncated in a declaration. 8613 const ConstantArrayType *T = 8614 S.Context.getAsConstantArrayType(FExpr->getType()); 8615 assert(T && "String literal not of constant array type!"); 8616 size_t TypeSize = T->getSize().getZExtValue(); 8617 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 8618 const unsigned numDataArgs = Args.size() - firstDataArg; 8619 8620 if (IgnoreStringsWithoutSpecifiers && 8621 !analyze_format_string::parseFormatStringHasFormattingSpecifiers( 8622 Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 8623 return; 8624 8625 // Emit a warning if the string literal is truncated and does not contain an 8626 // embedded null character. 8627 if (TypeSize <= StrRef.size() && 8628 StrRef.substr(0, TypeSize).find('\0') == StringRef::npos) { 8629 CheckFormatHandler::EmitFormatDiagnostic( 8630 S, inFunctionCall, Args[format_idx], 8631 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 8632 FExpr->getBeginLoc(), 8633 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 8634 return; 8635 } 8636 8637 // CHECK: empty format string? 8638 if (StrLen == 0 && numDataArgs > 0) { 8639 CheckFormatHandler::EmitFormatDiagnostic( 8640 S, inFunctionCall, Args[format_idx], 8641 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 8642 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 8643 return; 8644 } 8645 8646 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 8647 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog || 8648 Type == Sema::FST_OSTrace) { 8649 CheckPrintfHandler H( 8650 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 8651 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, 8652 HasVAListArg, Args, format_idx, inFunctionCall, CallType, 8653 CheckedVarArgs, UncoveredArg); 8654 8655 if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen, 8656 S.getLangOpts(), 8657 S.Context.getTargetInfo(), 8658 Type == Sema::FST_FreeBSDKPrintf)) 8659 H.DoneProcessing(); 8660 } else if (Type == Sema::FST_Scanf) { 8661 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 8662 numDataArgs, Str, HasVAListArg, Args, format_idx, 8663 inFunctionCall, CallType, CheckedVarArgs, UncoveredArg); 8664 8665 if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen, 8666 S.getLangOpts(), 8667 S.Context.getTargetInfo())) 8668 H.DoneProcessing(); 8669 } // TODO: handle other formats 8670 } 8671 8672 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 8673 // Str - The format string. NOTE: this is NOT null-terminated! 8674 StringRef StrRef = FExpr->getString(); 8675 const char *Str = StrRef.data(); 8676 // Account for cases where the string literal is truncated in a declaration. 8677 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 8678 assert(T && "String literal not of constant array type!"); 8679 size_t TypeSize = T->getSize().getZExtValue(); 8680 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 8681 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 8682 getLangOpts(), 8683 Context.getTargetInfo()); 8684 } 8685 8686 //===--- CHECK: Warn on use of wrong absolute value function. -------------===// 8687 8688 // Returns the related absolute value function that is larger, of 0 if one 8689 // does not exist. 8690 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 8691 switch (AbsFunction) { 8692 default: 8693 return 0; 8694 8695 case Builtin::BI__builtin_abs: 8696 return Builtin::BI__builtin_labs; 8697 case Builtin::BI__builtin_labs: 8698 return Builtin::BI__builtin_llabs; 8699 case Builtin::BI__builtin_llabs: 8700 return 0; 8701 8702 case Builtin::BI__builtin_fabsf: 8703 return Builtin::BI__builtin_fabs; 8704 case Builtin::BI__builtin_fabs: 8705 return Builtin::BI__builtin_fabsl; 8706 case Builtin::BI__builtin_fabsl: 8707 return 0; 8708 8709 case Builtin::BI__builtin_cabsf: 8710 return Builtin::BI__builtin_cabs; 8711 case Builtin::BI__builtin_cabs: 8712 return Builtin::BI__builtin_cabsl; 8713 case Builtin::BI__builtin_cabsl: 8714 return 0; 8715 8716 case Builtin::BIabs: 8717 return Builtin::BIlabs; 8718 case Builtin::BIlabs: 8719 return Builtin::BIllabs; 8720 case Builtin::BIllabs: 8721 return 0; 8722 8723 case Builtin::BIfabsf: 8724 return Builtin::BIfabs; 8725 case Builtin::BIfabs: 8726 return Builtin::BIfabsl; 8727 case Builtin::BIfabsl: 8728 return 0; 8729 8730 case Builtin::BIcabsf: 8731 return Builtin::BIcabs; 8732 case Builtin::BIcabs: 8733 return Builtin::BIcabsl; 8734 case Builtin::BIcabsl: 8735 return 0; 8736 } 8737 } 8738 8739 // Returns the argument type of the absolute value function. 8740 static QualType getAbsoluteValueArgumentType(ASTContext &Context, 8741 unsigned AbsType) { 8742 if (AbsType == 0) 8743 return QualType(); 8744 8745 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 8746 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 8747 if (Error != ASTContext::GE_None) 8748 return QualType(); 8749 8750 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 8751 if (!FT) 8752 return QualType(); 8753 8754 if (FT->getNumParams() != 1) 8755 return QualType(); 8756 8757 return FT->getParamType(0); 8758 } 8759 8760 // Returns the best absolute value function, or zero, based on type and 8761 // current absolute value function. 8762 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 8763 unsigned AbsFunctionKind) { 8764 unsigned BestKind = 0; 8765 uint64_t ArgSize = Context.getTypeSize(ArgType); 8766 for (unsigned Kind = AbsFunctionKind; Kind != 0; 8767 Kind = getLargerAbsoluteValueFunction(Kind)) { 8768 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 8769 if (Context.getTypeSize(ParamType) >= ArgSize) { 8770 if (BestKind == 0) 8771 BestKind = Kind; 8772 else if (Context.hasSameType(ParamType, ArgType)) { 8773 BestKind = Kind; 8774 break; 8775 } 8776 } 8777 } 8778 return BestKind; 8779 } 8780 8781 enum AbsoluteValueKind { 8782 AVK_Integer, 8783 AVK_Floating, 8784 AVK_Complex 8785 }; 8786 8787 static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 8788 if (T->isIntegralOrEnumerationType()) 8789 return AVK_Integer; 8790 if (T->isRealFloatingType()) 8791 return AVK_Floating; 8792 if (T->isAnyComplexType()) 8793 return AVK_Complex; 8794 8795 llvm_unreachable("Type not integer, floating, or complex"); 8796 } 8797 8798 // Changes the absolute value function to a different type. Preserves whether 8799 // the function is a builtin. 8800 static unsigned changeAbsFunction(unsigned AbsKind, 8801 AbsoluteValueKind ValueKind) { 8802 switch (ValueKind) { 8803 case AVK_Integer: 8804 switch (AbsKind) { 8805 default: 8806 return 0; 8807 case Builtin::BI__builtin_fabsf: 8808 case Builtin::BI__builtin_fabs: 8809 case Builtin::BI__builtin_fabsl: 8810 case Builtin::BI__builtin_cabsf: 8811 case Builtin::BI__builtin_cabs: 8812 case Builtin::BI__builtin_cabsl: 8813 return Builtin::BI__builtin_abs; 8814 case Builtin::BIfabsf: 8815 case Builtin::BIfabs: 8816 case Builtin::BIfabsl: 8817 case Builtin::BIcabsf: 8818 case Builtin::BIcabs: 8819 case Builtin::BIcabsl: 8820 return Builtin::BIabs; 8821 } 8822 case AVK_Floating: 8823 switch (AbsKind) { 8824 default: 8825 return 0; 8826 case Builtin::BI__builtin_abs: 8827 case Builtin::BI__builtin_labs: 8828 case Builtin::BI__builtin_llabs: 8829 case Builtin::BI__builtin_cabsf: 8830 case Builtin::BI__builtin_cabs: 8831 case Builtin::BI__builtin_cabsl: 8832 return Builtin::BI__builtin_fabsf; 8833 case Builtin::BIabs: 8834 case Builtin::BIlabs: 8835 case Builtin::BIllabs: 8836 case Builtin::BIcabsf: 8837 case Builtin::BIcabs: 8838 case Builtin::BIcabsl: 8839 return Builtin::BIfabsf; 8840 } 8841 case AVK_Complex: 8842 switch (AbsKind) { 8843 default: 8844 return 0; 8845 case Builtin::BI__builtin_abs: 8846 case Builtin::BI__builtin_labs: 8847 case Builtin::BI__builtin_llabs: 8848 case Builtin::BI__builtin_fabsf: 8849 case Builtin::BI__builtin_fabs: 8850 case Builtin::BI__builtin_fabsl: 8851 return Builtin::BI__builtin_cabsf; 8852 case Builtin::BIabs: 8853 case Builtin::BIlabs: 8854 case Builtin::BIllabs: 8855 case Builtin::BIfabsf: 8856 case Builtin::BIfabs: 8857 case Builtin::BIfabsl: 8858 return Builtin::BIcabsf; 8859 } 8860 } 8861 llvm_unreachable("Unable to convert function"); 8862 } 8863 8864 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 8865 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 8866 if (!FnInfo) 8867 return 0; 8868 8869 switch (FDecl->getBuiltinID()) { 8870 default: 8871 return 0; 8872 case Builtin::BI__builtin_abs: 8873 case Builtin::BI__builtin_fabs: 8874 case Builtin::BI__builtin_fabsf: 8875 case Builtin::BI__builtin_fabsl: 8876 case Builtin::BI__builtin_labs: 8877 case Builtin::BI__builtin_llabs: 8878 case Builtin::BI__builtin_cabs: 8879 case Builtin::BI__builtin_cabsf: 8880 case Builtin::BI__builtin_cabsl: 8881 case Builtin::BIabs: 8882 case Builtin::BIlabs: 8883 case Builtin::BIllabs: 8884 case Builtin::BIfabs: 8885 case Builtin::BIfabsf: 8886 case Builtin::BIfabsl: 8887 case Builtin::BIcabs: 8888 case Builtin::BIcabsf: 8889 case Builtin::BIcabsl: 8890 return FDecl->getBuiltinID(); 8891 } 8892 llvm_unreachable("Unknown Builtin type"); 8893 } 8894 8895 // If the replacement is valid, emit a note with replacement function. 8896 // Additionally, suggest including the proper header if not already included. 8897 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 8898 unsigned AbsKind, QualType ArgType) { 8899 bool EmitHeaderHint = true; 8900 const char *HeaderName = nullptr; 8901 const char *FunctionName = nullptr; 8902 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 8903 FunctionName = "std::abs"; 8904 if (ArgType->isIntegralOrEnumerationType()) { 8905 HeaderName = "cstdlib"; 8906 } else if (ArgType->isRealFloatingType()) { 8907 HeaderName = "cmath"; 8908 } else { 8909 llvm_unreachable("Invalid Type"); 8910 } 8911 8912 // Lookup all std::abs 8913 if (NamespaceDecl *Std = S.getStdNamespace()) { 8914 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 8915 R.suppressDiagnostics(); 8916 S.LookupQualifiedName(R, Std); 8917 8918 for (const auto *I : R) { 8919 const FunctionDecl *FDecl = nullptr; 8920 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 8921 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 8922 } else { 8923 FDecl = dyn_cast<FunctionDecl>(I); 8924 } 8925 if (!FDecl) 8926 continue; 8927 8928 // Found std::abs(), check that they are the right ones. 8929 if (FDecl->getNumParams() != 1) 8930 continue; 8931 8932 // Check that the parameter type can handle the argument. 8933 QualType ParamType = FDecl->getParamDecl(0)->getType(); 8934 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 8935 S.Context.getTypeSize(ArgType) <= 8936 S.Context.getTypeSize(ParamType)) { 8937 // Found a function, don't need the header hint. 8938 EmitHeaderHint = false; 8939 break; 8940 } 8941 } 8942 } 8943 } else { 8944 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 8945 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 8946 8947 if (HeaderName) { 8948 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 8949 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 8950 R.suppressDiagnostics(); 8951 S.LookupName(R, S.getCurScope()); 8952 8953 if (R.isSingleResult()) { 8954 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 8955 if (FD && FD->getBuiltinID() == AbsKind) { 8956 EmitHeaderHint = false; 8957 } else { 8958 return; 8959 } 8960 } else if (!R.empty()) { 8961 return; 8962 } 8963 } 8964 } 8965 8966 S.Diag(Loc, diag::note_replace_abs_function) 8967 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 8968 8969 if (!HeaderName) 8970 return; 8971 8972 if (!EmitHeaderHint) 8973 return; 8974 8975 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 8976 << FunctionName; 8977 } 8978 8979 template <std::size_t StrLen> 8980 static bool IsStdFunction(const FunctionDecl *FDecl, 8981 const char (&Str)[StrLen]) { 8982 if (!FDecl) 8983 return false; 8984 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 8985 return false; 8986 if (!FDecl->isInStdNamespace()) 8987 return false; 8988 8989 return true; 8990 } 8991 8992 // Warn when using the wrong abs() function. 8993 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 8994 const FunctionDecl *FDecl) { 8995 if (Call->getNumArgs() != 1) 8996 return; 8997 8998 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 8999 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 9000 if (AbsKind == 0 && !IsStdAbs) 9001 return; 9002 9003 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 9004 QualType ParamType = Call->getArg(0)->getType(); 9005 9006 // Unsigned types cannot be negative. Suggest removing the absolute value 9007 // function call. 9008 if (ArgType->isUnsignedIntegerType()) { 9009 const char *FunctionName = 9010 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 9011 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 9012 Diag(Call->getExprLoc(), diag::note_remove_abs) 9013 << FunctionName 9014 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 9015 return; 9016 } 9017 9018 // Taking the absolute value of a pointer is very suspicious, they probably 9019 // wanted to index into an array, dereference a pointer, call a function, etc. 9020 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 9021 unsigned DiagType = 0; 9022 if (ArgType->isFunctionType()) 9023 DiagType = 1; 9024 else if (ArgType->isArrayType()) 9025 DiagType = 2; 9026 9027 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 9028 return; 9029 } 9030 9031 // std::abs has overloads which prevent most of the absolute value problems 9032 // from occurring. 9033 if (IsStdAbs) 9034 return; 9035 9036 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 9037 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 9038 9039 // The argument and parameter are the same kind. Check if they are the right 9040 // size. 9041 if (ArgValueKind == ParamValueKind) { 9042 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 9043 return; 9044 9045 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 9046 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 9047 << FDecl << ArgType << ParamType; 9048 9049 if (NewAbsKind == 0) 9050 return; 9051 9052 emitReplacement(*this, Call->getExprLoc(), 9053 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 9054 return; 9055 } 9056 9057 // ArgValueKind != ParamValueKind 9058 // The wrong type of absolute value function was used. Attempt to find the 9059 // proper one. 9060 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 9061 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 9062 if (NewAbsKind == 0) 9063 return; 9064 9065 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 9066 << FDecl << ParamValueKind << ArgValueKind; 9067 9068 emitReplacement(*this, Call->getExprLoc(), 9069 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 9070 } 9071 9072 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 9073 void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 9074 const FunctionDecl *FDecl) { 9075 if (!Call || !FDecl) return; 9076 9077 // Ignore template specializations and macros. 9078 if (inTemplateInstantiation()) return; 9079 if (Call->getExprLoc().isMacroID()) return; 9080 9081 // Only care about the one template argument, two function parameter std::max 9082 if (Call->getNumArgs() != 2) return; 9083 if (!IsStdFunction(FDecl, "max")) return; 9084 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 9085 if (!ArgList) return; 9086 if (ArgList->size() != 1) return; 9087 9088 // Check that template type argument is unsigned integer. 9089 const auto& TA = ArgList->get(0); 9090 if (TA.getKind() != TemplateArgument::Type) return; 9091 QualType ArgType = TA.getAsType(); 9092 if (!ArgType->isUnsignedIntegerType()) return; 9093 9094 // See if either argument is a literal zero. 9095 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 9096 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 9097 if (!MTE) return false; 9098 const auto *Num = dyn_cast<IntegerLiteral>(MTE->getSubExpr()); 9099 if (!Num) return false; 9100 if (Num->getValue() != 0) return false; 9101 return true; 9102 }; 9103 9104 const Expr *FirstArg = Call->getArg(0); 9105 const Expr *SecondArg = Call->getArg(1); 9106 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 9107 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 9108 9109 // Only warn when exactly one argument is zero. 9110 if (IsFirstArgZero == IsSecondArgZero) return; 9111 9112 SourceRange FirstRange = FirstArg->getSourceRange(); 9113 SourceRange SecondRange = SecondArg->getSourceRange(); 9114 9115 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 9116 9117 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 9118 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 9119 9120 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 9121 SourceRange RemovalRange; 9122 if (IsFirstArgZero) { 9123 RemovalRange = SourceRange(FirstRange.getBegin(), 9124 SecondRange.getBegin().getLocWithOffset(-1)); 9125 } else { 9126 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 9127 SecondRange.getEnd()); 9128 } 9129 9130 Diag(Call->getExprLoc(), diag::note_remove_max_call) 9131 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 9132 << FixItHint::CreateRemoval(RemovalRange); 9133 } 9134 9135 //===--- CHECK: Standard memory functions ---------------------------------===// 9136 9137 /// Takes the expression passed to the size_t parameter of functions 9138 /// such as memcmp, strncat, etc and warns if it's a comparison. 9139 /// 9140 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 9141 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 9142 IdentifierInfo *FnName, 9143 SourceLocation FnLoc, 9144 SourceLocation RParenLoc) { 9145 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 9146 if (!Size) 9147 return false; 9148 9149 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 9150 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 9151 return false; 9152 9153 SourceRange SizeRange = Size->getSourceRange(); 9154 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 9155 << SizeRange << FnName; 9156 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 9157 << FnName 9158 << FixItHint::CreateInsertion( 9159 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 9160 << FixItHint::CreateRemoval(RParenLoc); 9161 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 9162 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 9163 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 9164 ")"); 9165 9166 return true; 9167 } 9168 9169 /// Determine whether the given type is or contains a dynamic class type 9170 /// (e.g., whether it has a vtable). 9171 static const CXXRecordDecl *getContainedDynamicClass(QualType T, 9172 bool &IsContained) { 9173 // Look through array types while ignoring qualifiers. 9174 const Type *Ty = T->getBaseElementTypeUnsafe(); 9175 IsContained = false; 9176 9177 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 9178 RD = RD ? RD->getDefinition() : nullptr; 9179 if (!RD || RD->isInvalidDecl()) 9180 return nullptr; 9181 9182 if (RD->isDynamicClass()) 9183 return RD; 9184 9185 // Check all the fields. If any bases were dynamic, the class is dynamic. 9186 // It's impossible for a class to transitively contain itself by value, so 9187 // infinite recursion is impossible. 9188 for (auto *FD : RD->fields()) { 9189 bool SubContained; 9190 if (const CXXRecordDecl *ContainedRD = 9191 getContainedDynamicClass(FD->getType(), SubContained)) { 9192 IsContained = true; 9193 return ContainedRD; 9194 } 9195 } 9196 9197 return nullptr; 9198 } 9199 9200 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 9201 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 9202 if (Unary->getKind() == UETT_SizeOf) 9203 return Unary; 9204 return nullptr; 9205 } 9206 9207 /// If E is a sizeof expression, returns its argument expression, 9208 /// otherwise returns NULL. 9209 static const Expr *getSizeOfExprArg(const Expr *E) { 9210 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 9211 if (!SizeOf->isArgumentType()) 9212 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 9213 return nullptr; 9214 } 9215 9216 /// If E is a sizeof expression, returns its argument type. 9217 static QualType getSizeOfArgType(const Expr *E) { 9218 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 9219 return SizeOf->getTypeOfArgument(); 9220 return QualType(); 9221 } 9222 9223 namespace { 9224 9225 struct SearchNonTrivialToInitializeField 9226 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 9227 using Super = 9228 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 9229 9230 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 9231 9232 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 9233 SourceLocation SL) { 9234 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 9235 asDerived().visitArray(PDIK, AT, SL); 9236 return; 9237 } 9238 9239 Super::visitWithKind(PDIK, FT, SL); 9240 } 9241 9242 void visitARCStrong(QualType FT, SourceLocation SL) { 9243 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 9244 } 9245 void visitARCWeak(QualType FT, SourceLocation SL) { 9246 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 9247 } 9248 void visitStruct(QualType FT, SourceLocation SL) { 9249 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 9250 visit(FD->getType(), FD->getLocation()); 9251 } 9252 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 9253 const ArrayType *AT, SourceLocation SL) { 9254 visit(getContext().getBaseElementType(AT), SL); 9255 } 9256 void visitTrivial(QualType FT, SourceLocation SL) {} 9257 9258 static void diag(QualType RT, const Expr *E, Sema &S) { 9259 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 9260 } 9261 9262 ASTContext &getContext() { return S.getASTContext(); } 9263 9264 const Expr *E; 9265 Sema &S; 9266 }; 9267 9268 struct SearchNonTrivialToCopyField 9269 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 9270 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 9271 9272 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 9273 9274 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 9275 SourceLocation SL) { 9276 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 9277 asDerived().visitArray(PCK, AT, SL); 9278 return; 9279 } 9280 9281 Super::visitWithKind(PCK, FT, SL); 9282 } 9283 9284 void visitARCStrong(QualType FT, SourceLocation SL) { 9285 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 9286 } 9287 void visitARCWeak(QualType FT, SourceLocation SL) { 9288 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 9289 } 9290 void visitStruct(QualType FT, SourceLocation SL) { 9291 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 9292 visit(FD->getType(), FD->getLocation()); 9293 } 9294 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 9295 SourceLocation SL) { 9296 visit(getContext().getBaseElementType(AT), SL); 9297 } 9298 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 9299 SourceLocation SL) {} 9300 void visitTrivial(QualType FT, SourceLocation SL) {} 9301 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 9302 9303 static void diag(QualType RT, const Expr *E, Sema &S) { 9304 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 9305 } 9306 9307 ASTContext &getContext() { return S.getASTContext(); } 9308 9309 const Expr *E; 9310 Sema &S; 9311 }; 9312 9313 } 9314 9315 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 9316 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 9317 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 9318 9319 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 9320 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 9321 return false; 9322 9323 return doesExprLikelyComputeSize(BO->getLHS()) || 9324 doesExprLikelyComputeSize(BO->getRHS()); 9325 } 9326 9327 return getAsSizeOfExpr(SizeofExpr) != nullptr; 9328 } 9329 9330 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 9331 /// 9332 /// \code 9333 /// #define MACRO 0 9334 /// foo(MACRO); 9335 /// foo(0); 9336 /// \endcode 9337 /// 9338 /// This should return true for the first call to foo, but not for the second 9339 /// (regardless of whether foo is a macro or function). 9340 static bool isArgumentExpandedFromMacro(SourceManager &SM, 9341 SourceLocation CallLoc, 9342 SourceLocation ArgLoc) { 9343 if (!CallLoc.isMacroID()) 9344 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 9345 9346 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 9347 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 9348 } 9349 9350 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 9351 /// last two arguments transposed. 9352 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 9353 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 9354 return; 9355 9356 const Expr *SizeArg = 9357 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 9358 9359 auto isLiteralZero = [](const Expr *E) { 9360 return isa<IntegerLiteral>(E) && cast<IntegerLiteral>(E)->getValue() == 0; 9361 }; 9362 9363 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 9364 SourceLocation CallLoc = Call->getRParenLoc(); 9365 SourceManager &SM = S.getSourceManager(); 9366 if (isLiteralZero(SizeArg) && 9367 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 9368 9369 SourceLocation DiagLoc = SizeArg->getExprLoc(); 9370 9371 // Some platforms #define bzero to __builtin_memset. See if this is the 9372 // case, and if so, emit a better diagnostic. 9373 if (BId == Builtin::BIbzero || 9374 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 9375 CallLoc, SM, S.getLangOpts()) == "bzero")) { 9376 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 9377 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 9378 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 9379 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 9380 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 9381 } 9382 return; 9383 } 9384 9385 // If the second argument to a memset is a sizeof expression and the third 9386 // isn't, this is also likely an error. This should catch 9387 // 'memset(buf, sizeof(buf), 0xff)'. 9388 if (BId == Builtin::BImemset && 9389 doesExprLikelyComputeSize(Call->getArg(1)) && 9390 !doesExprLikelyComputeSize(Call->getArg(2))) { 9391 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 9392 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 9393 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 9394 return; 9395 } 9396 } 9397 9398 /// Check for dangerous or invalid arguments to memset(). 9399 /// 9400 /// This issues warnings on known problematic, dangerous or unspecified 9401 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 9402 /// function calls. 9403 /// 9404 /// \param Call The call expression to diagnose. 9405 void Sema::CheckMemaccessArguments(const CallExpr *Call, 9406 unsigned BId, 9407 IdentifierInfo *FnName) { 9408 assert(BId != 0); 9409 9410 // It is possible to have a non-standard definition of memset. Validate 9411 // we have enough arguments, and if not, abort further checking. 9412 unsigned ExpectedNumArgs = 9413 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 9414 if (Call->getNumArgs() < ExpectedNumArgs) 9415 return; 9416 9417 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 9418 BId == Builtin::BIstrndup ? 1 : 2); 9419 unsigned LenArg = 9420 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 9421 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 9422 9423 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 9424 Call->getBeginLoc(), Call->getRParenLoc())) 9425 return; 9426 9427 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 9428 CheckMemaccessSize(*this, BId, Call); 9429 9430 // We have special checking when the length is a sizeof expression. 9431 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 9432 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 9433 llvm::FoldingSetNodeID SizeOfArgID; 9434 9435 // Although widely used, 'bzero' is not a standard function. Be more strict 9436 // with the argument types before allowing diagnostics and only allow the 9437 // form bzero(ptr, sizeof(...)). 9438 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 9439 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 9440 return; 9441 9442 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 9443 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 9444 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 9445 9446 QualType DestTy = Dest->getType(); 9447 QualType PointeeTy; 9448 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 9449 PointeeTy = DestPtrTy->getPointeeType(); 9450 9451 // Never warn about void type pointers. This can be used to suppress 9452 // false positives. 9453 if (PointeeTy->isVoidType()) 9454 continue; 9455 9456 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 9457 // actually comparing the expressions for equality. Because computing the 9458 // expression IDs can be expensive, we only do this if the diagnostic is 9459 // enabled. 9460 if (SizeOfArg && 9461 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 9462 SizeOfArg->getExprLoc())) { 9463 // We only compute IDs for expressions if the warning is enabled, and 9464 // cache the sizeof arg's ID. 9465 if (SizeOfArgID == llvm::FoldingSetNodeID()) 9466 SizeOfArg->Profile(SizeOfArgID, Context, true); 9467 llvm::FoldingSetNodeID DestID; 9468 Dest->Profile(DestID, Context, true); 9469 if (DestID == SizeOfArgID) { 9470 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 9471 // over sizeof(src) as well. 9472 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 9473 StringRef ReadableName = FnName->getName(); 9474 9475 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 9476 if (UnaryOp->getOpcode() == UO_AddrOf) 9477 ActionIdx = 1; // If its an address-of operator, just remove it. 9478 if (!PointeeTy->isIncompleteType() && 9479 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 9480 ActionIdx = 2; // If the pointee's size is sizeof(char), 9481 // suggest an explicit length. 9482 9483 // If the function is defined as a builtin macro, do not show macro 9484 // expansion. 9485 SourceLocation SL = SizeOfArg->getExprLoc(); 9486 SourceRange DSR = Dest->getSourceRange(); 9487 SourceRange SSR = SizeOfArg->getSourceRange(); 9488 SourceManager &SM = getSourceManager(); 9489 9490 if (SM.isMacroArgExpansion(SL)) { 9491 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 9492 SL = SM.getSpellingLoc(SL); 9493 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 9494 SM.getSpellingLoc(DSR.getEnd())); 9495 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 9496 SM.getSpellingLoc(SSR.getEnd())); 9497 } 9498 9499 DiagRuntimeBehavior(SL, SizeOfArg, 9500 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 9501 << ReadableName 9502 << PointeeTy 9503 << DestTy 9504 << DSR 9505 << SSR); 9506 DiagRuntimeBehavior(SL, SizeOfArg, 9507 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 9508 << ActionIdx 9509 << SSR); 9510 9511 break; 9512 } 9513 } 9514 9515 // Also check for cases where the sizeof argument is the exact same 9516 // type as the memory argument, and where it points to a user-defined 9517 // record type. 9518 if (SizeOfArgTy != QualType()) { 9519 if (PointeeTy->isRecordType() && 9520 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 9521 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 9522 PDiag(diag::warn_sizeof_pointer_type_memaccess) 9523 << FnName << SizeOfArgTy << ArgIdx 9524 << PointeeTy << Dest->getSourceRange() 9525 << LenExpr->getSourceRange()); 9526 break; 9527 } 9528 } 9529 } else if (DestTy->isArrayType()) { 9530 PointeeTy = DestTy; 9531 } 9532 9533 if (PointeeTy == QualType()) 9534 continue; 9535 9536 // Always complain about dynamic classes. 9537 bool IsContained; 9538 if (const CXXRecordDecl *ContainedRD = 9539 getContainedDynamicClass(PointeeTy, IsContained)) { 9540 9541 unsigned OperationType = 0; 9542 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp; 9543 // "overwritten" if we're warning about the destination for any call 9544 // but memcmp; otherwise a verb appropriate to the call. 9545 if (ArgIdx != 0 || IsCmp) { 9546 if (BId == Builtin::BImemcpy) 9547 OperationType = 1; 9548 else if(BId == Builtin::BImemmove) 9549 OperationType = 2; 9550 else if (IsCmp) 9551 OperationType = 3; 9552 } 9553 9554 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 9555 PDiag(diag::warn_dyn_class_memaccess) 9556 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName 9557 << IsContained << ContainedRD << OperationType 9558 << Call->getCallee()->getSourceRange()); 9559 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 9560 BId != Builtin::BImemset) 9561 DiagRuntimeBehavior( 9562 Dest->getExprLoc(), Dest, 9563 PDiag(diag::warn_arc_object_memaccess) 9564 << ArgIdx << FnName << PointeeTy 9565 << Call->getCallee()->getSourceRange()); 9566 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 9567 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 9568 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 9569 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 9570 PDiag(diag::warn_cstruct_memaccess) 9571 << ArgIdx << FnName << PointeeTy << 0); 9572 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 9573 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 9574 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 9575 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 9576 PDiag(diag::warn_cstruct_memaccess) 9577 << ArgIdx << FnName << PointeeTy << 1); 9578 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 9579 } else { 9580 continue; 9581 } 9582 } else 9583 continue; 9584 9585 DiagRuntimeBehavior( 9586 Dest->getExprLoc(), Dest, 9587 PDiag(diag::note_bad_memaccess_silence) 9588 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 9589 break; 9590 } 9591 } 9592 9593 // A little helper routine: ignore addition and subtraction of integer literals. 9594 // This intentionally does not ignore all integer constant expressions because 9595 // we don't want to remove sizeof(). 9596 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 9597 Ex = Ex->IgnoreParenCasts(); 9598 9599 while (true) { 9600 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 9601 if (!BO || !BO->isAdditiveOp()) 9602 break; 9603 9604 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 9605 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 9606 9607 if (isa<IntegerLiteral>(RHS)) 9608 Ex = LHS; 9609 else if (isa<IntegerLiteral>(LHS)) 9610 Ex = RHS; 9611 else 9612 break; 9613 } 9614 9615 return Ex; 9616 } 9617 9618 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 9619 ASTContext &Context) { 9620 // Only handle constant-sized or VLAs, but not flexible members. 9621 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 9622 // Only issue the FIXIT for arrays of size > 1. 9623 if (CAT->getSize().getSExtValue() <= 1) 9624 return false; 9625 } else if (!Ty->isVariableArrayType()) { 9626 return false; 9627 } 9628 return true; 9629 } 9630 9631 // Warn if the user has made the 'size' argument to strlcpy or strlcat 9632 // be the size of the source, instead of the destination. 9633 void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 9634 IdentifierInfo *FnName) { 9635 9636 // Don't crash if the user has the wrong number of arguments 9637 unsigned NumArgs = Call->getNumArgs(); 9638 if ((NumArgs != 3) && (NumArgs != 4)) 9639 return; 9640 9641 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 9642 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 9643 const Expr *CompareWithSrc = nullptr; 9644 9645 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 9646 Call->getBeginLoc(), Call->getRParenLoc())) 9647 return; 9648 9649 // Look for 'strlcpy(dst, x, sizeof(x))' 9650 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 9651 CompareWithSrc = Ex; 9652 else { 9653 // Look for 'strlcpy(dst, x, strlen(x))' 9654 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 9655 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 9656 SizeCall->getNumArgs() == 1) 9657 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 9658 } 9659 } 9660 9661 if (!CompareWithSrc) 9662 return; 9663 9664 // Determine if the argument to sizeof/strlen is equal to the source 9665 // argument. In principle there's all kinds of things you could do 9666 // here, for instance creating an == expression and evaluating it with 9667 // EvaluateAsBooleanCondition, but this uses a more direct technique: 9668 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 9669 if (!SrcArgDRE) 9670 return; 9671 9672 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 9673 if (!CompareWithSrcDRE || 9674 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 9675 return; 9676 9677 const Expr *OriginalSizeArg = Call->getArg(2); 9678 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 9679 << OriginalSizeArg->getSourceRange() << FnName; 9680 9681 // Output a FIXIT hint if the destination is an array (rather than a 9682 // pointer to an array). This could be enhanced to handle some 9683 // pointers if we know the actual size, like if DstArg is 'array+2' 9684 // we could say 'sizeof(array)-2'. 9685 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 9686 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 9687 return; 9688 9689 SmallString<128> sizeString; 9690 llvm::raw_svector_ostream OS(sizeString); 9691 OS << "sizeof("; 9692 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 9693 OS << ")"; 9694 9695 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 9696 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 9697 OS.str()); 9698 } 9699 9700 /// Check if two expressions refer to the same declaration. 9701 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 9702 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 9703 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 9704 return D1->getDecl() == D2->getDecl(); 9705 return false; 9706 } 9707 9708 static const Expr *getStrlenExprArg(const Expr *E) { 9709 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 9710 const FunctionDecl *FD = CE->getDirectCallee(); 9711 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 9712 return nullptr; 9713 return CE->getArg(0)->IgnoreParenCasts(); 9714 } 9715 return nullptr; 9716 } 9717 9718 // Warn on anti-patterns as the 'size' argument to strncat. 9719 // The correct size argument should look like following: 9720 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 9721 void Sema::CheckStrncatArguments(const CallExpr *CE, 9722 IdentifierInfo *FnName) { 9723 // Don't crash if the user has the wrong number of arguments. 9724 if (CE->getNumArgs() < 3) 9725 return; 9726 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 9727 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 9728 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 9729 9730 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 9731 CE->getRParenLoc())) 9732 return; 9733 9734 // Identify common expressions, which are wrongly used as the size argument 9735 // to strncat and may lead to buffer overflows. 9736 unsigned PatternType = 0; 9737 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 9738 // - sizeof(dst) 9739 if (referToTheSameDecl(SizeOfArg, DstArg)) 9740 PatternType = 1; 9741 // - sizeof(src) 9742 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 9743 PatternType = 2; 9744 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 9745 if (BE->getOpcode() == BO_Sub) { 9746 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 9747 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 9748 // - sizeof(dst) - strlen(dst) 9749 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 9750 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 9751 PatternType = 1; 9752 // - sizeof(src) - (anything) 9753 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 9754 PatternType = 2; 9755 } 9756 } 9757 9758 if (PatternType == 0) 9759 return; 9760 9761 // Generate the diagnostic. 9762 SourceLocation SL = LenArg->getBeginLoc(); 9763 SourceRange SR = LenArg->getSourceRange(); 9764 SourceManager &SM = getSourceManager(); 9765 9766 // If the function is defined as a builtin macro, do not show macro expansion. 9767 if (SM.isMacroArgExpansion(SL)) { 9768 SL = SM.getSpellingLoc(SL); 9769 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 9770 SM.getSpellingLoc(SR.getEnd())); 9771 } 9772 9773 // Check if the destination is an array (rather than a pointer to an array). 9774 QualType DstTy = DstArg->getType(); 9775 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 9776 Context); 9777 if (!isKnownSizeArray) { 9778 if (PatternType == 1) 9779 Diag(SL, diag::warn_strncat_wrong_size) << SR; 9780 else 9781 Diag(SL, diag::warn_strncat_src_size) << SR; 9782 return; 9783 } 9784 9785 if (PatternType == 1) 9786 Diag(SL, diag::warn_strncat_large_size) << SR; 9787 else 9788 Diag(SL, diag::warn_strncat_src_size) << SR; 9789 9790 SmallString<128> sizeString; 9791 llvm::raw_svector_ostream OS(sizeString); 9792 OS << "sizeof("; 9793 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 9794 OS << ") - "; 9795 OS << "strlen("; 9796 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 9797 OS << ") - 1"; 9798 9799 Diag(SL, diag::note_strncat_wrong_size) 9800 << FixItHint::CreateReplacement(SR, OS.str()); 9801 } 9802 9803 void 9804 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 9805 SourceLocation ReturnLoc, 9806 bool isObjCMethod, 9807 const AttrVec *Attrs, 9808 const FunctionDecl *FD) { 9809 // Check if the return value is null but should not be. 9810 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 9811 (!isObjCMethod && isNonNullType(Context, lhsType))) && 9812 CheckNonNullExpr(*this, RetValExp)) 9813 Diag(ReturnLoc, diag::warn_null_ret) 9814 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 9815 9816 // C++11 [basic.stc.dynamic.allocation]p4: 9817 // If an allocation function declared with a non-throwing 9818 // exception-specification fails to allocate storage, it shall return 9819 // a null pointer. Any other allocation function that fails to allocate 9820 // storage shall indicate failure only by throwing an exception [...] 9821 if (FD) { 9822 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 9823 if (Op == OO_New || Op == OO_Array_New) { 9824 const FunctionProtoType *Proto 9825 = FD->getType()->castAs<FunctionProtoType>(); 9826 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 9827 CheckNonNullExpr(*this, RetValExp)) 9828 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 9829 << FD << getLangOpts().CPlusPlus11; 9830 } 9831 } 9832 } 9833 9834 //===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===// 9835 9836 /// Check for comparisons of floating point operands using != and ==. 9837 /// Issue a warning if these are no self-comparisons, as they are not likely 9838 /// to do what the programmer intended. 9839 void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) { 9840 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 9841 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 9842 9843 // Special case: check for x == x (which is OK). 9844 // Do not emit warnings for such cases. 9845 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 9846 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 9847 if (DRL->getDecl() == DRR->getDecl()) 9848 return; 9849 9850 // Special case: check for comparisons against literals that can be exactly 9851 // represented by APFloat. In such cases, do not emit a warning. This 9852 // is a heuristic: often comparison against such literals are used to 9853 // detect if a value in a variable has not changed. This clearly can 9854 // lead to false negatives. 9855 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 9856 if (FLL->isExact()) 9857 return; 9858 } else 9859 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 9860 if (FLR->isExact()) 9861 return; 9862 9863 // Check for comparisons with builtin types. 9864 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 9865 if (CL->getBuiltinCallee()) 9866 return; 9867 9868 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 9869 if (CR->getBuiltinCallee()) 9870 return; 9871 9872 // Emit the diagnostic. 9873 Diag(Loc, diag::warn_floatingpoint_eq) 9874 << LHS->getSourceRange() << RHS->getSourceRange(); 9875 } 9876 9877 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 9878 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 9879 9880 namespace { 9881 9882 /// Structure recording the 'active' range of an integer-valued 9883 /// expression. 9884 struct IntRange { 9885 /// The number of bits active in the int. 9886 unsigned Width; 9887 9888 /// True if the int is known not to have negative values. 9889 bool NonNegative; 9890 9891 IntRange(unsigned Width, bool NonNegative) 9892 : Width(Width), NonNegative(NonNegative) {} 9893 9894 /// Returns the range of the bool type. 9895 static IntRange forBoolType() { 9896 return IntRange(1, true); 9897 } 9898 9899 /// Returns the range of an opaque value of the given integral type. 9900 static IntRange forValueOfType(ASTContext &C, QualType T) { 9901 return forValueOfCanonicalType(C, 9902 T->getCanonicalTypeInternal().getTypePtr()); 9903 } 9904 9905 /// Returns the range of an opaque value of a canonical integral type. 9906 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 9907 assert(T->isCanonicalUnqualified()); 9908 9909 if (const VectorType *VT = dyn_cast<VectorType>(T)) 9910 T = VT->getElementType().getTypePtr(); 9911 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 9912 T = CT->getElementType().getTypePtr(); 9913 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 9914 T = AT->getValueType().getTypePtr(); 9915 9916 if (!C.getLangOpts().CPlusPlus) { 9917 // For enum types in C code, use the underlying datatype. 9918 if (const EnumType *ET = dyn_cast<EnumType>(T)) 9919 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 9920 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 9921 // For enum types in C++, use the known bit width of the enumerators. 9922 EnumDecl *Enum = ET->getDecl(); 9923 // In C++11, enums can have a fixed underlying type. Use this type to 9924 // compute the range. 9925 if (Enum->isFixed()) { 9926 return IntRange(C.getIntWidth(QualType(T, 0)), 9927 !ET->isSignedIntegerOrEnumerationType()); 9928 } 9929 9930 unsigned NumPositive = Enum->getNumPositiveBits(); 9931 unsigned NumNegative = Enum->getNumNegativeBits(); 9932 9933 if (NumNegative == 0) 9934 return IntRange(NumPositive, true/*NonNegative*/); 9935 else 9936 return IntRange(std::max(NumPositive + 1, NumNegative), 9937 false/*NonNegative*/); 9938 } 9939 9940 if (const auto *EIT = dyn_cast<ExtIntType>(T)) 9941 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 9942 9943 const BuiltinType *BT = cast<BuiltinType>(T); 9944 assert(BT->isInteger()); 9945 9946 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 9947 } 9948 9949 /// Returns the "target" range of a canonical integral type, i.e. 9950 /// the range of values expressible in the type. 9951 /// 9952 /// This matches forValueOfCanonicalType except that enums have the 9953 /// full range of their type, not the range of their enumerators. 9954 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 9955 assert(T->isCanonicalUnqualified()); 9956 9957 if (const VectorType *VT = dyn_cast<VectorType>(T)) 9958 T = VT->getElementType().getTypePtr(); 9959 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 9960 T = CT->getElementType().getTypePtr(); 9961 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 9962 T = AT->getValueType().getTypePtr(); 9963 if (const EnumType *ET = dyn_cast<EnumType>(T)) 9964 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 9965 9966 if (const auto *EIT = dyn_cast<ExtIntType>(T)) 9967 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 9968 9969 const BuiltinType *BT = cast<BuiltinType>(T); 9970 assert(BT->isInteger()); 9971 9972 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 9973 } 9974 9975 /// Returns the supremum of two ranges: i.e. their conservative merge. 9976 static IntRange join(IntRange L, IntRange R) { 9977 return IntRange(std::max(L.Width, R.Width), 9978 L.NonNegative && R.NonNegative); 9979 } 9980 9981 /// Returns the infinum of two ranges: i.e. their aggressive merge. 9982 static IntRange meet(IntRange L, IntRange R) { 9983 return IntRange(std::min(L.Width, R.Width), 9984 L.NonNegative || R.NonNegative); 9985 } 9986 }; 9987 9988 } // namespace 9989 9990 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 9991 unsigned MaxWidth) { 9992 if (value.isSigned() && value.isNegative()) 9993 return IntRange(value.getMinSignedBits(), false); 9994 9995 if (value.getBitWidth() > MaxWidth) 9996 value = value.trunc(MaxWidth); 9997 9998 // isNonNegative() just checks the sign bit without considering 9999 // signedness. 10000 return IntRange(value.getActiveBits(), true); 10001 } 10002 10003 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 10004 unsigned MaxWidth) { 10005 if (result.isInt()) 10006 return GetValueRange(C, result.getInt(), MaxWidth); 10007 10008 if (result.isVector()) { 10009 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 10010 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 10011 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 10012 R = IntRange::join(R, El); 10013 } 10014 return R; 10015 } 10016 10017 if (result.isComplexInt()) { 10018 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 10019 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 10020 return IntRange::join(R, I); 10021 } 10022 10023 // This can happen with lossless casts to intptr_t of "based" lvalues. 10024 // Assume it might use arbitrary bits. 10025 // FIXME: The only reason we need to pass the type in here is to get 10026 // the sign right on this one case. It would be nice if APValue 10027 // preserved this. 10028 assert(result.isLValue() || result.isAddrLabelDiff()); 10029 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 10030 } 10031 10032 static QualType GetExprType(const Expr *E) { 10033 QualType Ty = E->getType(); 10034 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 10035 Ty = AtomicRHS->getValueType(); 10036 return Ty; 10037 } 10038 10039 /// Pseudo-evaluate the given integer expression, estimating the 10040 /// range of values it might take. 10041 /// 10042 /// \param MaxWidth - the width to which the value will be truncated 10043 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth, 10044 bool InConstantContext) { 10045 E = E->IgnoreParens(); 10046 10047 // Try a full evaluation first. 10048 Expr::EvalResult result; 10049 if (E->EvaluateAsRValue(result, C, InConstantContext)) 10050 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 10051 10052 // I think we only want to look through implicit casts here; if the 10053 // user has an explicit widening cast, we should treat the value as 10054 // being of the new, wider type. 10055 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 10056 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 10057 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext); 10058 10059 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 10060 10061 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 10062 CE->getCastKind() == CK_BooleanToSignedIntegral; 10063 10064 // Assume that non-integer casts can span the full range of the type. 10065 if (!isIntegerCast) 10066 return OutputTypeRange; 10067 10068 IntRange SubRange = GetExprRange(C, CE->getSubExpr(), 10069 std::min(MaxWidth, OutputTypeRange.Width), 10070 InConstantContext); 10071 10072 // Bail out if the subexpr's range is as wide as the cast type. 10073 if (SubRange.Width >= OutputTypeRange.Width) 10074 return OutputTypeRange; 10075 10076 // Otherwise, we take the smaller width, and we're non-negative if 10077 // either the output type or the subexpr is. 10078 return IntRange(SubRange.Width, 10079 SubRange.NonNegative || OutputTypeRange.NonNegative); 10080 } 10081 10082 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 10083 // If we can fold the condition, just take that operand. 10084 bool CondResult; 10085 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 10086 return GetExprRange(C, 10087 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(), 10088 MaxWidth, InConstantContext); 10089 10090 // Otherwise, conservatively merge. 10091 IntRange L = 10092 GetExprRange(C, CO->getTrueExpr(), MaxWidth, InConstantContext); 10093 IntRange R = 10094 GetExprRange(C, CO->getFalseExpr(), MaxWidth, InConstantContext); 10095 return IntRange::join(L, R); 10096 } 10097 10098 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 10099 switch (BO->getOpcode()) { 10100 case BO_Cmp: 10101 llvm_unreachable("builtin <=> should have class type"); 10102 10103 // Boolean-valued operations are single-bit and positive. 10104 case BO_LAnd: 10105 case BO_LOr: 10106 case BO_LT: 10107 case BO_GT: 10108 case BO_LE: 10109 case BO_GE: 10110 case BO_EQ: 10111 case BO_NE: 10112 return IntRange::forBoolType(); 10113 10114 // The type of the assignments is the type of the LHS, so the RHS 10115 // is not necessarily the same type. 10116 case BO_MulAssign: 10117 case BO_DivAssign: 10118 case BO_RemAssign: 10119 case BO_AddAssign: 10120 case BO_SubAssign: 10121 case BO_XorAssign: 10122 case BO_OrAssign: 10123 // TODO: bitfields? 10124 return IntRange::forValueOfType(C, GetExprType(E)); 10125 10126 // Simple assignments just pass through the RHS, which will have 10127 // been coerced to the LHS type. 10128 case BO_Assign: 10129 // TODO: bitfields? 10130 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext); 10131 10132 // Operations with opaque sources are black-listed. 10133 case BO_PtrMemD: 10134 case BO_PtrMemI: 10135 return IntRange::forValueOfType(C, GetExprType(E)); 10136 10137 // Bitwise-and uses the *infinum* of the two source ranges. 10138 case BO_And: 10139 case BO_AndAssign: 10140 return IntRange::meet( 10141 GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext), 10142 GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext)); 10143 10144 // Left shift gets black-listed based on a judgement call. 10145 case BO_Shl: 10146 // ...except that we want to treat '1 << (blah)' as logically 10147 // positive. It's an important idiom. 10148 if (IntegerLiteral *I 10149 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 10150 if (I->getValue() == 1) { 10151 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 10152 return IntRange(R.Width, /*NonNegative*/ true); 10153 } 10154 } 10155 LLVM_FALLTHROUGH; 10156 10157 case BO_ShlAssign: 10158 return IntRange::forValueOfType(C, GetExprType(E)); 10159 10160 // Right shift by a constant can narrow its left argument. 10161 case BO_Shr: 10162 case BO_ShrAssign: { 10163 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext); 10164 10165 // If the shift amount is a positive constant, drop the width by 10166 // that much. 10167 llvm::APSInt shift; 10168 if (BO->getRHS()->isIntegerConstantExpr(shift, C) && 10169 shift.isNonNegative()) { 10170 unsigned zext = shift.getZExtValue(); 10171 if (zext >= L.Width) 10172 L.Width = (L.NonNegative ? 0 : 1); 10173 else 10174 L.Width -= zext; 10175 } 10176 10177 return L; 10178 } 10179 10180 // Comma acts as its right operand. 10181 case BO_Comma: 10182 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext); 10183 10184 // Black-list pointer subtractions. 10185 case BO_Sub: 10186 if (BO->getLHS()->getType()->isPointerType()) 10187 return IntRange::forValueOfType(C, GetExprType(E)); 10188 break; 10189 10190 // The width of a division result is mostly determined by the size 10191 // of the LHS. 10192 case BO_Div: { 10193 // Don't 'pre-truncate' the operands. 10194 unsigned opWidth = C.getIntWidth(GetExprType(E)); 10195 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext); 10196 10197 // If the divisor is constant, use that. 10198 llvm::APSInt divisor; 10199 if (BO->getRHS()->isIntegerConstantExpr(divisor, C)) { 10200 unsigned log2 = divisor.logBase2(); // floor(log_2(divisor)) 10201 if (log2 >= L.Width) 10202 L.Width = (L.NonNegative ? 0 : 1); 10203 else 10204 L.Width = std::min(L.Width - log2, MaxWidth); 10205 return L; 10206 } 10207 10208 // Otherwise, just use the LHS's width. 10209 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext); 10210 return IntRange(L.Width, L.NonNegative && R.NonNegative); 10211 } 10212 10213 // The result of a remainder can't be larger than the result of 10214 // either side. 10215 case BO_Rem: { 10216 // Don't 'pre-truncate' the operands. 10217 unsigned opWidth = C.getIntWidth(GetExprType(E)); 10218 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext); 10219 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext); 10220 10221 IntRange meet = IntRange::meet(L, R); 10222 meet.Width = std::min(meet.Width, MaxWidth); 10223 return meet; 10224 } 10225 10226 // The default behavior is okay for these. 10227 case BO_Mul: 10228 case BO_Add: 10229 case BO_Xor: 10230 case BO_Or: 10231 break; 10232 } 10233 10234 // The default case is to treat the operation as if it were closed 10235 // on the narrowest type that encompasses both operands. 10236 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext); 10237 IntRange R = GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext); 10238 return IntRange::join(L, R); 10239 } 10240 10241 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 10242 switch (UO->getOpcode()) { 10243 // Boolean-valued operations are white-listed. 10244 case UO_LNot: 10245 return IntRange::forBoolType(); 10246 10247 // Operations with opaque sources are black-listed. 10248 case UO_Deref: 10249 case UO_AddrOf: // should be impossible 10250 return IntRange::forValueOfType(C, GetExprType(E)); 10251 10252 default: 10253 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext); 10254 } 10255 } 10256 10257 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 10258 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext); 10259 10260 if (const auto *BitField = E->getSourceBitField()) 10261 return IntRange(BitField->getBitWidthValue(C), 10262 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 10263 10264 return IntRange::forValueOfType(C, GetExprType(E)); 10265 } 10266 10267 static IntRange GetExprRange(ASTContext &C, const Expr *E, 10268 bool InConstantContext) { 10269 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext); 10270 } 10271 10272 /// Checks whether the given value, which currently has the given 10273 /// source semantics, has the same value when coerced through the 10274 /// target semantics. 10275 static bool IsSameFloatAfterCast(const llvm::APFloat &value, 10276 const llvm::fltSemantics &Src, 10277 const llvm::fltSemantics &Tgt) { 10278 llvm::APFloat truncated = value; 10279 10280 bool ignored; 10281 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 10282 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 10283 10284 return truncated.bitwiseIsEqual(value); 10285 } 10286 10287 /// Checks whether the given value, which currently has the given 10288 /// source semantics, has the same value when coerced through the 10289 /// target semantics. 10290 /// 10291 /// The value might be a vector of floats (or a complex number). 10292 static bool IsSameFloatAfterCast(const APValue &value, 10293 const llvm::fltSemantics &Src, 10294 const llvm::fltSemantics &Tgt) { 10295 if (value.isFloat()) 10296 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 10297 10298 if (value.isVector()) { 10299 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 10300 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 10301 return false; 10302 return true; 10303 } 10304 10305 assert(value.isComplexFloat()); 10306 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 10307 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 10308 } 10309 10310 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC, 10311 bool IsListInit = false); 10312 10313 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 10314 // Suppress cases where we are comparing against an enum constant. 10315 if (const DeclRefExpr *DR = 10316 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 10317 if (isa<EnumConstantDecl>(DR->getDecl())) 10318 return true; 10319 10320 // Suppress cases where the value is expanded from a macro, unless that macro 10321 // is how a language represents a boolean literal. This is the case in both C 10322 // and Objective-C. 10323 SourceLocation BeginLoc = E->getBeginLoc(); 10324 if (BeginLoc.isMacroID()) { 10325 StringRef MacroName = Lexer::getImmediateMacroName( 10326 BeginLoc, S.getSourceManager(), S.getLangOpts()); 10327 return MacroName != "YES" && MacroName != "NO" && 10328 MacroName != "true" && MacroName != "false"; 10329 } 10330 10331 return false; 10332 } 10333 10334 static bool isKnownToHaveUnsignedValue(Expr *E) { 10335 return E->getType()->isIntegerType() && 10336 (!E->getType()->isSignedIntegerType() || 10337 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 10338 } 10339 10340 namespace { 10341 /// The promoted range of values of a type. In general this has the 10342 /// following structure: 10343 /// 10344 /// |-----------| . . . |-----------| 10345 /// ^ ^ ^ ^ 10346 /// Min HoleMin HoleMax Max 10347 /// 10348 /// ... where there is only a hole if a signed type is promoted to unsigned 10349 /// (in which case Min and Max are the smallest and largest representable 10350 /// values). 10351 struct PromotedRange { 10352 // Min, or HoleMax if there is a hole. 10353 llvm::APSInt PromotedMin; 10354 // Max, or HoleMin if there is a hole. 10355 llvm::APSInt PromotedMax; 10356 10357 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 10358 if (R.Width == 0) 10359 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 10360 else if (R.Width >= BitWidth && !Unsigned) { 10361 // Promotion made the type *narrower*. This happens when promoting 10362 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 10363 // Treat all values of 'signed int' as being in range for now. 10364 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 10365 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 10366 } else { 10367 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 10368 .extOrTrunc(BitWidth); 10369 PromotedMin.setIsUnsigned(Unsigned); 10370 10371 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 10372 .extOrTrunc(BitWidth); 10373 PromotedMax.setIsUnsigned(Unsigned); 10374 } 10375 } 10376 10377 // Determine whether this range is contiguous (has no hole). 10378 bool isContiguous() const { return PromotedMin <= PromotedMax; } 10379 10380 // Where a constant value is within the range. 10381 enum ComparisonResult { 10382 LT = 0x1, 10383 LE = 0x2, 10384 GT = 0x4, 10385 GE = 0x8, 10386 EQ = 0x10, 10387 NE = 0x20, 10388 InRangeFlag = 0x40, 10389 10390 Less = LE | LT | NE, 10391 Min = LE | InRangeFlag, 10392 InRange = InRangeFlag, 10393 Max = GE | InRangeFlag, 10394 Greater = GE | GT | NE, 10395 10396 OnlyValue = LE | GE | EQ | InRangeFlag, 10397 InHole = NE 10398 }; 10399 10400 ComparisonResult compare(const llvm::APSInt &Value) const { 10401 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 10402 Value.isUnsigned() == PromotedMin.isUnsigned()); 10403 if (!isContiguous()) { 10404 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 10405 if (Value.isMinValue()) return Min; 10406 if (Value.isMaxValue()) return Max; 10407 if (Value >= PromotedMin) return InRange; 10408 if (Value <= PromotedMax) return InRange; 10409 return InHole; 10410 } 10411 10412 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 10413 case -1: return Less; 10414 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 10415 case 1: 10416 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 10417 case -1: return InRange; 10418 case 0: return Max; 10419 case 1: return Greater; 10420 } 10421 } 10422 10423 llvm_unreachable("impossible compare result"); 10424 } 10425 10426 static llvm::Optional<StringRef> 10427 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 10428 if (Op == BO_Cmp) { 10429 ComparisonResult LTFlag = LT, GTFlag = GT; 10430 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 10431 10432 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 10433 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 10434 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 10435 return llvm::None; 10436 } 10437 10438 ComparisonResult TrueFlag, FalseFlag; 10439 if (Op == BO_EQ) { 10440 TrueFlag = EQ; 10441 FalseFlag = NE; 10442 } else if (Op == BO_NE) { 10443 TrueFlag = NE; 10444 FalseFlag = EQ; 10445 } else { 10446 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 10447 TrueFlag = LT; 10448 FalseFlag = GE; 10449 } else { 10450 TrueFlag = GT; 10451 FalseFlag = LE; 10452 } 10453 if (Op == BO_GE || Op == BO_LE) 10454 std::swap(TrueFlag, FalseFlag); 10455 } 10456 if (R & TrueFlag) 10457 return StringRef("true"); 10458 if (R & FalseFlag) 10459 return StringRef("false"); 10460 return llvm::None; 10461 } 10462 }; 10463 } 10464 10465 static bool HasEnumType(Expr *E) { 10466 // Strip off implicit integral promotions. 10467 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 10468 if (ICE->getCastKind() != CK_IntegralCast && 10469 ICE->getCastKind() != CK_NoOp) 10470 break; 10471 E = ICE->getSubExpr(); 10472 } 10473 10474 return E->getType()->isEnumeralType(); 10475 } 10476 10477 static int classifyConstantValue(Expr *Constant) { 10478 // The values of this enumeration are used in the diagnostics 10479 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 10480 enum ConstantValueKind { 10481 Miscellaneous = 0, 10482 LiteralTrue, 10483 LiteralFalse 10484 }; 10485 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 10486 return BL->getValue() ? ConstantValueKind::LiteralTrue 10487 : ConstantValueKind::LiteralFalse; 10488 return ConstantValueKind::Miscellaneous; 10489 } 10490 10491 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 10492 Expr *Constant, Expr *Other, 10493 const llvm::APSInt &Value, 10494 bool RhsConstant) { 10495 if (S.inTemplateInstantiation()) 10496 return false; 10497 10498 Expr *OriginalOther = Other; 10499 10500 Constant = Constant->IgnoreParenImpCasts(); 10501 Other = Other->IgnoreParenImpCasts(); 10502 10503 // Suppress warnings on tautological comparisons between values of the same 10504 // enumeration type. There are only two ways we could warn on this: 10505 // - If the constant is outside the range of representable values of 10506 // the enumeration. In such a case, we should warn about the cast 10507 // to enumeration type, not about the comparison. 10508 // - If the constant is the maximum / minimum in-range value. For an 10509 // enumeratin type, such comparisons can be meaningful and useful. 10510 if (Constant->getType()->isEnumeralType() && 10511 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 10512 return false; 10513 10514 // TODO: Investigate using GetExprRange() to get tighter bounds 10515 // on the bit ranges. 10516 QualType OtherT = Other->getType(); 10517 if (const auto *AT = OtherT->getAs<AtomicType>()) 10518 OtherT = AT->getValueType(); 10519 IntRange OtherRange = IntRange::forValueOfType(S.Context, OtherT); 10520 10521 // Special case for ObjC BOOL on targets where its a typedef for a signed char 10522 // (Namely, macOS). 10523 bool IsObjCSignedCharBool = S.getLangOpts().ObjC && 10524 S.NSAPIObj->isObjCBOOLType(OtherT) && 10525 OtherT->isSpecificBuiltinType(BuiltinType::SChar); 10526 10527 // Whether we're treating Other as being a bool because of the form of 10528 // expression despite it having another type (typically 'int' in C). 10529 bool OtherIsBooleanDespiteType = 10530 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 10531 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool) 10532 OtherRange = IntRange::forBoolType(); 10533 10534 // Determine the promoted range of the other type and see if a comparison of 10535 // the constant against that range is tautological. 10536 PromotedRange OtherPromotedRange(OtherRange, Value.getBitWidth(), 10537 Value.isUnsigned()); 10538 auto Cmp = OtherPromotedRange.compare(Value); 10539 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 10540 if (!Result) 10541 return false; 10542 10543 // Suppress the diagnostic for an in-range comparison if the constant comes 10544 // from a macro or enumerator. We don't want to diagnose 10545 // 10546 // some_long_value <= INT_MAX 10547 // 10548 // when sizeof(int) == sizeof(long). 10549 bool InRange = Cmp & PromotedRange::InRangeFlag; 10550 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 10551 return false; 10552 10553 // If this is a comparison to an enum constant, include that 10554 // constant in the diagnostic. 10555 const EnumConstantDecl *ED = nullptr; 10556 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 10557 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 10558 10559 // Should be enough for uint128 (39 decimal digits) 10560 SmallString<64> PrettySourceValue; 10561 llvm::raw_svector_ostream OS(PrettySourceValue); 10562 if (ED) { 10563 OS << '\'' << *ED << "' (" << Value << ")"; 10564 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>( 10565 Constant->IgnoreParenImpCasts())) { 10566 OS << (BL->getValue() ? "YES" : "NO"); 10567 } else { 10568 OS << Value; 10569 } 10570 10571 if (IsObjCSignedCharBool) { 10572 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 10573 S.PDiag(diag::warn_tautological_compare_objc_bool) 10574 << OS.str() << *Result); 10575 return true; 10576 } 10577 10578 // FIXME: We use a somewhat different formatting for the in-range cases and 10579 // cases involving boolean values for historical reasons. We should pick a 10580 // consistent way of presenting these diagnostics. 10581 if (!InRange || Other->isKnownToHaveBooleanValue()) { 10582 10583 S.DiagRuntimeBehavior( 10584 E->getOperatorLoc(), E, 10585 S.PDiag(!InRange ? diag::warn_out_of_range_compare 10586 : diag::warn_tautological_bool_compare) 10587 << OS.str() << classifyConstantValue(Constant) << OtherT 10588 << OtherIsBooleanDespiteType << *Result 10589 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 10590 } else { 10591 unsigned Diag = (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 10592 ? (HasEnumType(OriginalOther) 10593 ? diag::warn_unsigned_enum_always_true_comparison 10594 : diag::warn_unsigned_always_true_comparison) 10595 : diag::warn_tautological_constant_compare; 10596 10597 S.Diag(E->getOperatorLoc(), Diag) 10598 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 10599 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 10600 } 10601 10602 return true; 10603 } 10604 10605 /// Analyze the operands of the given comparison. Implements the 10606 /// fallback case from AnalyzeComparison. 10607 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 10608 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 10609 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 10610 } 10611 10612 /// Implements -Wsign-compare. 10613 /// 10614 /// \param E the binary operator to check for warnings 10615 static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 10616 // The type the comparison is being performed in. 10617 QualType T = E->getLHS()->getType(); 10618 10619 // Only analyze comparison operators where both sides have been converted to 10620 // the same type. 10621 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 10622 return AnalyzeImpConvsInComparison(S, E); 10623 10624 // Don't analyze value-dependent comparisons directly. 10625 if (E->isValueDependent()) 10626 return AnalyzeImpConvsInComparison(S, E); 10627 10628 Expr *LHS = E->getLHS(); 10629 Expr *RHS = E->getRHS(); 10630 10631 if (T->isIntegralType(S.Context)) { 10632 llvm::APSInt RHSValue; 10633 llvm::APSInt LHSValue; 10634 10635 bool IsRHSIntegralLiteral = RHS->isIntegerConstantExpr(RHSValue, S.Context); 10636 bool IsLHSIntegralLiteral = LHS->isIntegerConstantExpr(LHSValue, S.Context); 10637 10638 // We don't care about expressions whose result is a constant. 10639 if (IsRHSIntegralLiteral && IsLHSIntegralLiteral) 10640 return AnalyzeImpConvsInComparison(S, E); 10641 10642 // We only care about expressions where just one side is literal 10643 if (IsRHSIntegralLiteral ^ IsLHSIntegralLiteral) { 10644 // Is the constant on the RHS or LHS? 10645 const bool RhsConstant = IsRHSIntegralLiteral; 10646 Expr *Const = RhsConstant ? RHS : LHS; 10647 Expr *Other = RhsConstant ? LHS : RHS; 10648 const llvm::APSInt &Value = RhsConstant ? RHSValue : LHSValue; 10649 10650 // Check whether an integer constant comparison results in a value 10651 // of 'true' or 'false'. 10652 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 10653 return AnalyzeImpConvsInComparison(S, E); 10654 } 10655 } 10656 10657 if (!T->hasUnsignedIntegerRepresentation()) { 10658 // We don't do anything special if this isn't an unsigned integral 10659 // comparison: we're only interested in integral comparisons, and 10660 // signed comparisons only happen in cases we don't care to warn about. 10661 return AnalyzeImpConvsInComparison(S, E); 10662 } 10663 10664 LHS = LHS->IgnoreParenImpCasts(); 10665 RHS = RHS->IgnoreParenImpCasts(); 10666 10667 if (!S.getLangOpts().CPlusPlus) { 10668 // Avoid warning about comparison of integers with different signs when 10669 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 10670 // the type of `E`. 10671 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 10672 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 10673 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 10674 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 10675 } 10676 10677 // Check to see if one of the (unmodified) operands is of different 10678 // signedness. 10679 Expr *signedOperand, *unsignedOperand; 10680 if (LHS->getType()->hasSignedIntegerRepresentation()) { 10681 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 10682 "unsigned comparison between two signed integer expressions?"); 10683 signedOperand = LHS; 10684 unsignedOperand = RHS; 10685 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 10686 signedOperand = RHS; 10687 unsignedOperand = LHS; 10688 } else { 10689 return AnalyzeImpConvsInComparison(S, E); 10690 } 10691 10692 // Otherwise, calculate the effective range of the signed operand. 10693 IntRange signedRange = 10694 GetExprRange(S.Context, signedOperand, S.isConstantEvaluated()); 10695 10696 // Go ahead and analyze implicit conversions in the operands. Note 10697 // that we skip the implicit conversions on both sides. 10698 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 10699 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 10700 10701 // If the signed range is non-negative, -Wsign-compare won't fire. 10702 if (signedRange.NonNegative) 10703 return; 10704 10705 // For (in)equality comparisons, if the unsigned operand is a 10706 // constant which cannot collide with a overflowed signed operand, 10707 // then reinterpreting the signed operand as unsigned will not 10708 // change the result of the comparison. 10709 if (E->isEqualityOp()) { 10710 unsigned comparisonWidth = S.Context.getIntWidth(T); 10711 IntRange unsignedRange = 10712 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated()); 10713 10714 // We should never be unable to prove that the unsigned operand is 10715 // non-negative. 10716 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 10717 10718 if (unsignedRange.Width < comparisonWidth) 10719 return; 10720 } 10721 10722 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 10723 S.PDiag(diag::warn_mixed_sign_comparison) 10724 << LHS->getType() << RHS->getType() 10725 << LHS->getSourceRange() << RHS->getSourceRange()); 10726 } 10727 10728 /// Analyzes an attempt to assign the given value to a bitfield. 10729 /// 10730 /// Returns true if there was something fishy about the attempt. 10731 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 10732 SourceLocation InitLoc) { 10733 assert(Bitfield->isBitField()); 10734 if (Bitfield->isInvalidDecl()) 10735 return false; 10736 10737 // White-list bool bitfields. 10738 QualType BitfieldType = Bitfield->getType(); 10739 if (BitfieldType->isBooleanType()) 10740 return false; 10741 10742 if (BitfieldType->isEnumeralType()) { 10743 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl(); 10744 // If the underlying enum type was not explicitly specified as an unsigned 10745 // type and the enum contain only positive values, MSVC++ will cause an 10746 // inconsistency by storing this as a signed type. 10747 if (S.getLangOpts().CPlusPlus11 && 10748 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 10749 BitfieldEnumDecl->getNumPositiveBits() > 0 && 10750 BitfieldEnumDecl->getNumNegativeBits() == 0) { 10751 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 10752 << BitfieldEnumDecl->getNameAsString(); 10753 } 10754 } 10755 10756 if (Bitfield->getType()->isBooleanType()) 10757 return false; 10758 10759 // Ignore value- or type-dependent expressions. 10760 if (Bitfield->getBitWidth()->isValueDependent() || 10761 Bitfield->getBitWidth()->isTypeDependent() || 10762 Init->isValueDependent() || 10763 Init->isTypeDependent()) 10764 return false; 10765 10766 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 10767 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 10768 10769 Expr::EvalResult Result; 10770 if (!OriginalInit->EvaluateAsInt(Result, S.Context, 10771 Expr::SE_AllowSideEffects)) { 10772 // The RHS is not constant. If the RHS has an enum type, make sure the 10773 // bitfield is wide enough to hold all the values of the enum without 10774 // truncation. 10775 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 10776 EnumDecl *ED = EnumTy->getDecl(); 10777 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 10778 10779 // Enum types are implicitly signed on Windows, so check if there are any 10780 // negative enumerators to see if the enum was intended to be signed or 10781 // not. 10782 bool SignedEnum = ED->getNumNegativeBits() > 0; 10783 10784 // Check for surprising sign changes when assigning enum values to a 10785 // bitfield of different signedness. If the bitfield is signed and we 10786 // have exactly the right number of bits to store this unsigned enum, 10787 // suggest changing the enum to an unsigned type. This typically happens 10788 // on Windows where unfixed enums always use an underlying type of 'int'. 10789 unsigned DiagID = 0; 10790 if (SignedEnum && !SignedBitfield) { 10791 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 10792 } else if (SignedBitfield && !SignedEnum && 10793 ED->getNumPositiveBits() == FieldWidth) { 10794 DiagID = diag::warn_signed_bitfield_enum_conversion; 10795 } 10796 10797 if (DiagID) { 10798 S.Diag(InitLoc, DiagID) << Bitfield << ED; 10799 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 10800 SourceRange TypeRange = 10801 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 10802 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 10803 << SignedEnum << TypeRange; 10804 } 10805 10806 // Compute the required bitwidth. If the enum has negative values, we need 10807 // one more bit than the normal number of positive bits to represent the 10808 // sign bit. 10809 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 10810 ED->getNumNegativeBits()) 10811 : ED->getNumPositiveBits(); 10812 10813 // Check the bitwidth. 10814 if (BitsNeeded > FieldWidth) { 10815 Expr *WidthExpr = Bitfield->getBitWidth(); 10816 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 10817 << Bitfield << ED; 10818 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 10819 << BitsNeeded << ED << WidthExpr->getSourceRange(); 10820 } 10821 } 10822 10823 return false; 10824 } 10825 10826 llvm::APSInt Value = Result.Val.getInt(); 10827 10828 unsigned OriginalWidth = Value.getBitWidth(); 10829 10830 if (!Value.isSigned() || Value.isNegative()) 10831 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 10832 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 10833 OriginalWidth = Value.getMinSignedBits(); 10834 10835 if (OriginalWidth <= FieldWidth) 10836 return false; 10837 10838 // Compute the value which the bitfield will contain. 10839 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 10840 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 10841 10842 // Check whether the stored value is equal to the original value. 10843 TruncatedValue = TruncatedValue.extend(OriginalWidth); 10844 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 10845 return false; 10846 10847 // Special-case bitfields of width 1: booleans are naturally 0/1, and 10848 // therefore don't strictly fit into a signed bitfield of width 1. 10849 if (FieldWidth == 1 && Value == 1) 10850 return false; 10851 10852 std::string PrettyValue = Value.toString(10); 10853 std::string PrettyTrunc = TruncatedValue.toString(10); 10854 10855 S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant) 10856 << PrettyValue << PrettyTrunc << OriginalInit->getType() 10857 << Init->getSourceRange(); 10858 10859 return true; 10860 } 10861 10862 /// Analyze the given simple or compound assignment for warning-worthy 10863 /// operations. 10864 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 10865 // Just recurse on the LHS. 10866 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 10867 10868 // We want to recurse on the RHS as normal unless we're assigning to 10869 // a bitfield. 10870 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 10871 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 10872 E->getOperatorLoc())) { 10873 // Recurse, ignoring any implicit conversions on the RHS. 10874 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 10875 E->getOperatorLoc()); 10876 } 10877 } 10878 10879 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 10880 10881 // Diagnose implicitly sequentially-consistent atomic assignment. 10882 if (E->getLHS()->getType()->isAtomicType()) 10883 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 10884 } 10885 10886 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 10887 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 10888 SourceLocation CContext, unsigned diag, 10889 bool pruneControlFlow = false) { 10890 if (pruneControlFlow) { 10891 S.DiagRuntimeBehavior(E->getExprLoc(), E, 10892 S.PDiag(diag) 10893 << SourceType << T << E->getSourceRange() 10894 << SourceRange(CContext)); 10895 return; 10896 } 10897 S.Diag(E->getExprLoc(), diag) 10898 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 10899 } 10900 10901 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 10902 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 10903 SourceLocation CContext, 10904 unsigned diag, bool pruneControlFlow = false) { 10905 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 10906 } 10907 10908 static bool isObjCSignedCharBool(Sema &S, QualType Ty) { 10909 return Ty->isSpecificBuiltinType(BuiltinType::SChar) && 10910 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty); 10911 } 10912 10913 static void adornObjCBoolConversionDiagWithTernaryFixit( 10914 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) { 10915 Expr *Ignored = SourceExpr->IgnoreImplicit(); 10916 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored)) 10917 Ignored = OVE->getSourceExpr(); 10918 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) || 10919 isa<BinaryOperator>(Ignored) || 10920 isa<CXXOperatorCallExpr>(Ignored); 10921 SourceLocation EndLoc = S.getLocForEndOfToken(SourceExpr->getEndLoc()); 10922 if (NeedsParens) 10923 Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(") 10924 << FixItHint::CreateInsertion(EndLoc, ")"); 10925 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO"); 10926 } 10927 10928 /// Diagnose an implicit cast from a floating point value to an integer value. 10929 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 10930 SourceLocation CContext) { 10931 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 10932 const bool PruneWarnings = S.inTemplateInstantiation(); 10933 10934 Expr *InnerE = E->IgnoreParenImpCasts(); 10935 // We also want to warn on, e.g., "int i = -1.234" 10936 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 10937 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 10938 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 10939 10940 const bool IsLiteral = 10941 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 10942 10943 llvm::APFloat Value(0.0); 10944 bool IsConstant = 10945 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 10946 if (!IsConstant) { 10947 if (isObjCSignedCharBool(S, T)) { 10948 return adornObjCBoolConversionDiagWithTernaryFixit( 10949 S, E, 10950 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool) 10951 << E->getType()); 10952 } 10953 10954 return DiagnoseImpCast(S, E, T, CContext, 10955 diag::warn_impcast_float_integer, PruneWarnings); 10956 } 10957 10958 bool isExact = false; 10959 10960 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 10961 T->hasUnsignedIntegerRepresentation()); 10962 llvm::APFloat::opStatus Result = Value.convertToInteger( 10963 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 10964 10965 // FIXME: Force the precision of the source value down so we don't print 10966 // digits which are usually useless (we don't really care here if we 10967 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 10968 // would automatically print the shortest representation, but it's a bit 10969 // tricky to implement. 10970 SmallString<16> PrettySourceValue; 10971 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 10972 precision = (precision * 59 + 195) / 196; 10973 Value.toString(PrettySourceValue, precision); 10974 10975 if (isObjCSignedCharBool(S, T) && IntegerValue != 0 && IntegerValue != 1) { 10976 return adornObjCBoolConversionDiagWithTernaryFixit( 10977 S, E, 10978 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool) 10979 << PrettySourceValue); 10980 } 10981 10982 if (Result == llvm::APFloat::opOK && isExact) { 10983 if (IsLiteral) return; 10984 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 10985 PruneWarnings); 10986 } 10987 10988 // Conversion of a floating-point value to a non-bool integer where the 10989 // integral part cannot be represented by the integer type is undefined. 10990 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 10991 return DiagnoseImpCast( 10992 S, E, T, CContext, 10993 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 10994 : diag::warn_impcast_float_to_integer_out_of_range, 10995 PruneWarnings); 10996 10997 unsigned DiagID = 0; 10998 if (IsLiteral) { 10999 // Warn on floating point literal to integer. 11000 DiagID = diag::warn_impcast_literal_float_to_integer; 11001 } else if (IntegerValue == 0) { 11002 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 11003 return DiagnoseImpCast(S, E, T, CContext, 11004 diag::warn_impcast_float_integer, PruneWarnings); 11005 } 11006 // Warn on non-zero to zero conversion. 11007 DiagID = diag::warn_impcast_float_to_integer_zero; 11008 } else { 11009 if (IntegerValue.isUnsigned()) { 11010 if (!IntegerValue.isMaxValue()) { 11011 return DiagnoseImpCast(S, E, T, CContext, 11012 diag::warn_impcast_float_integer, PruneWarnings); 11013 } 11014 } else { // IntegerValue.isSigned() 11015 if (!IntegerValue.isMaxSignedValue() && 11016 !IntegerValue.isMinSignedValue()) { 11017 return DiagnoseImpCast(S, E, T, CContext, 11018 diag::warn_impcast_float_integer, PruneWarnings); 11019 } 11020 } 11021 // Warn on evaluatable floating point expression to integer conversion. 11022 DiagID = diag::warn_impcast_float_to_integer; 11023 } 11024 11025 SmallString<16> PrettyTargetValue; 11026 if (IsBool) 11027 PrettyTargetValue = Value.isZero() ? "false" : "true"; 11028 else 11029 IntegerValue.toString(PrettyTargetValue); 11030 11031 if (PruneWarnings) { 11032 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11033 S.PDiag(DiagID) 11034 << E->getType() << T.getUnqualifiedType() 11035 << PrettySourceValue << PrettyTargetValue 11036 << E->getSourceRange() << SourceRange(CContext)); 11037 } else { 11038 S.Diag(E->getExprLoc(), DiagID) 11039 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 11040 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 11041 } 11042 } 11043 11044 /// Analyze the given compound assignment for the possible losing of 11045 /// floating-point precision. 11046 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 11047 assert(isa<CompoundAssignOperator>(E) && 11048 "Must be compound assignment operation"); 11049 // Recurse on the LHS and RHS in here 11050 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 11051 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 11052 11053 if (E->getLHS()->getType()->isAtomicType()) 11054 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 11055 11056 // Now check the outermost expression 11057 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 11058 const auto *RBT = cast<CompoundAssignOperator>(E) 11059 ->getComputationResultType() 11060 ->getAs<BuiltinType>(); 11061 11062 // The below checks assume source is floating point. 11063 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 11064 11065 // If source is floating point but target is an integer. 11066 if (ResultBT->isInteger()) 11067 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(), 11068 E->getExprLoc(), diag::warn_impcast_float_integer); 11069 11070 if (!ResultBT->isFloatingPoint()) 11071 return; 11072 11073 // If both source and target are floating points, warn about losing precision. 11074 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 11075 QualType(ResultBT, 0), QualType(RBT, 0)); 11076 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 11077 // warn about dropping FP rank. 11078 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 11079 diag::warn_impcast_float_result_precision); 11080 } 11081 11082 static std::string PrettyPrintInRange(const llvm::APSInt &Value, 11083 IntRange Range) { 11084 if (!Range.Width) return "0"; 11085 11086 llvm::APSInt ValueInRange = Value; 11087 ValueInRange.setIsSigned(!Range.NonNegative); 11088 ValueInRange = ValueInRange.trunc(Range.Width); 11089 return ValueInRange.toString(10); 11090 } 11091 11092 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 11093 if (!isa<ImplicitCastExpr>(Ex)) 11094 return false; 11095 11096 Expr *InnerE = Ex->IgnoreParenImpCasts(); 11097 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 11098 const Type *Source = 11099 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 11100 if (Target->isDependentType()) 11101 return false; 11102 11103 const BuiltinType *FloatCandidateBT = 11104 dyn_cast<BuiltinType>(ToBool ? Source : Target); 11105 const Type *BoolCandidateType = ToBool ? Target : Source; 11106 11107 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 11108 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 11109 } 11110 11111 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 11112 SourceLocation CC) { 11113 unsigned NumArgs = TheCall->getNumArgs(); 11114 for (unsigned i = 0; i < NumArgs; ++i) { 11115 Expr *CurrA = TheCall->getArg(i); 11116 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 11117 continue; 11118 11119 bool IsSwapped = ((i > 0) && 11120 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 11121 IsSwapped |= ((i < (NumArgs - 1)) && 11122 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 11123 if (IsSwapped) { 11124 // Warn on this floating-point to bool conversion. 11125 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 11126 CurrA->getType(), CC, 11127 diag::warn_impcast_floating_point_to_bool); 11128 } 11129 } 11130 } 11131 11132 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 11133 SourceLocation CC) { 11134 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 11135 E->getExprLoc())) 11136 return; 11137 11138 // Don't warn on functions which have return type nullptr_t. 11139 if (isa<CallExpr>(E)) 11140 return; 11141 11142 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 11143 const Expr::NullPointerConstantKind NullKind = 11144 E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull); 11145 if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr) 11146 return; 11147 11148 // Return if target type is a safe conversion. 11149 if (T->isAnyPointerType() || T->isBlockPointerType() || 11150 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 11151 return; 11152 11153 SourceLocation Loc = E->getSourceRange().getBegin(); 11154 11155 // Venture through the macro stacks to get to the source of macro arguments. 11156 // The new location is a better location than the complete location that was 11157 // passed in. 11158 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 11159 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 11160 11161 // __null is usually wrapped in a macro. Go up a macro if that is the case. 11162 if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) { 11163 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 11164 Loc, S.SourceMgr, S.getLangOpts()); 11165 if (MacroName == "NULL") 11166 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 11167 } 11168 11169 // Only warn if the null and context location are in the same macro expansion. 11170 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 11171 return; 11172 11173 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 11174 << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC) 11175 << FixItHint::CreateReplacement(Loc, 11176 S.getFixItZeroLiteralForType(T, Loc)); 11177 } 11178 11179 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 11180 ObjCArrayLiteral *ArrayLiteral); 11181 11182 static void 11183 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 11184 ObjCDictionaryLiteral *DictionaryLiteral); 11185 11186 /// Check a single element within a collection literal against the 11187 /// target element type. 11188 static void checkObjCCollectionLiteralElement(Sema &S, 11189 QualType TargetElementType, 11190 Expr *Element, 11191 unsigned ElementKind) { 11192 // Skip a bitcast to 'id' or qualified 'id'. 11193 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 11194 if (ICE->getCastKind() == CK_BitCast && 11195 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 11196 Element = ICE->getSubExpr(); 11197 } 11198 11199 QualType ElementType = Element->getType(); 11200 ExprResult ElementResult(Element); 11201 if (ElementType->getAs<ObjCObjectPointerType>() && 11202 S.CheckSingleAssignmentConstraints(TargetElementType, 11203 ElementResult, 11204 false, false) 11205 != Sema::Compatible) { 11206 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 11207 << ElementType << ElementKind << TargetElementType 11208 << Element->getSourceRange(); 11209 } 11210 11211 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 11212 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 11213 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 11214 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 11215 } 11216 11217 /// Check an Objective-C array literal being converted to the given 11218 /// target type. 11219 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 11220 ObjCArrayLiteral *ArrayLiteral) { 11221 if (!S.NSArrayDecl) 11222 return; 11223 11224 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 11225 if (!TargetObjCPtr) 11226 return; 11227 11228 if (TargetObjCPtr->isUnspecialized() || 11229 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 11230 != S.NSArrayDecl->getCanonicalDecl()) 11231 return; 11232 11233 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 11234 if (TypeArgs.size() != 1) 11235 return; 11236 11237 QualType TargetElementType = TypeArgs[0]; 11238 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 11239 checkObjCCollectionLiteralElement(S, TargetElementType, 11240 ArrayLiteral->getElement(I), 11241 0); 11242 } 11243 } 11244 11245 /// Check an Objective-C dictionary literal being converted to the given 11246 /// target type. 11247 static void 11248 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 11249 ObjCDictionaryLiteral *DictionaryLiteral) { 11250 if (!S.NSDictionaryDecl) 11251 return; 11252 11253 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 11254 if (!TargetObjCPtr) 11255 return; 11256 11257 if (TargetObjCPtr->isUnspecialized() || 11258 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 11259 != S.NSDictionaryDecl->getCanonicalDecl()) 11260 return; 11261 11262 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 11263 if (TypeArgs.size() != 2) 11264 return; 11265 11266 QualType TargetKeyType = TypeArgs[0]; 11267 QualType TargetObjectType = TypeArgs[1]; 11268 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 11269 auto Element = DictionaryLiteral->getKeyValueElement(I); 11270 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 11271 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 11272 } 11273 } 11274 11275 // Helper function to filter out cases for constant width constant conversion. 11276 // Don't warn on char array initialization or for non-decimal values. 11277 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 11278 SourceLocation CC) { 11279 // If initializing from a constant, and the constant starts with '0', 11280 // then it is a binary, octal, or hexadecimal. Allow these constants 11281 // to fill all the bits, even if there is a sign change. 11282 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 11283 const char FirstLiteralCharacter = 11284 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 11285 if (FirstLiteralCharacter == '0') 11286 return false; 11287 } 11288 11289 // If the CC location points to a '{', and the type is char, then assume 11290 // assume it is an array initialization. 11291 if (CC.isValid() && T->isCharType()) { 11292 const char FirstContextCharacter = 11293 S.getSourceManager().getCharacterData(CC)[0]; 11294 if (FirstContextCharacter == '{') 11295 return false; 11296 } 11297 11298 return true; 11299 } 11300 11301 static const IntegerLiteral *getIntegerLiteral(Expr *E) { 11302 const auto *IL = dyn_cast<IntegerLiteral>(E); 11303 if (!IL) { 11304 if (auto *UO = dyn_cast<UnaryOperator>(E)) { 11305 if (UO->getOpcode() == UO_Minus) 11306 return dyn_cast<IntegerLiteral>(UO->getSubExpr()); 11307 } 11308 } 11309 11310 return IL; 11311 } 11312 11313 static void DiagnoseIntInBoolContext(Sema &S, Expr *E) { 11314 E = E->IgnoreParenImpCasts(); 11315 SourceLocation ExprLoc = E->getExprLoc(); 11316 11317 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 11318 BinaryOperator::Opcode Opc = BO->getOpcode(); 11319 Expr::EvalResult Result; 11320 // Do not diagnose unsigned shifts. 11321 if (Opc == BO_Shl) { 11322 const auto *LHS = getIntegerLiteral(BO->getLHS()); 11323 const auto *RHS = getIntegerLiteral(BO->getRHS()); 11324 if (LHS && LHS->getValue() == 0) 11325 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0; 11326 else if (!E->isValueDependent() && LHS && RHS && 11327 RHS->getValue().isNonNegative() && 11328 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) 11329 S.Diag(ExprLoc, diag::warn_left_shift_always) 11330 << (Result.Val.getInt() != 0); 11331 else if (E->getType()->isSignedIntegerType()) 11332 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E; 11333 } 11334 } 11335 11336 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 11337 const auto *LHS = getIntegerLiteral(CO->getTrueExpr()); 11338 const auto *RHS = getIntegerLiteral(CO->getFalseExpr()); 11339 if (!LHS || !RHS) 11340 return; 11341 if ((LHS->getValue() == 0 || LHS->getValue() == 1) && 11342 (RHS->getValue() == 0 || RHS->getValue() == 1)) 11343 // Do not diagnose common idioms. 11344 return; 11345 if (LHS->getValue() != 0 && RHS->getValue() != 0) 11346 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true); 11347 } 11348 } 11349 11350 static void CheckImplicitConversion(Sema &S, Expr *E, QualType T, 11351 SourceLocation CC, 11352 bool *ICContext = nullptr, 11353 bool IsListInit = false) { 11354 if (E->isTypeDependent() || E->isValueDependent()) return; 11355 11356 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 11357 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 11358 if (Source == Target) return; 11359 if (Target->isDependentType()) return; 11360 11361 // If the conversion context location is invalid don't complain. We also 11362 // don't want to emit a warning if the issue occurs from the expansion of 11363 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 11364 // delay this check as long as possible. Once we detect we are in that 11365 // scenario, we just return. 11366 if (CC.isInvalid()) 11367 return; 11368 11369 if (Source->isAtomicType()) 11370 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 11371 11372 // Diagnose implicit casts to bool. 11373 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 11374 if (isa<StringLiteral>(E)) 11375 // Warn on string literal to bool. Checks for string literals in logical 11376 // and expressions, for instance, assert(0 && "error here"), are 11377 // prevented by a check in AnalyzeImplicitConversions(). 11378 return DiagnoseImpCast(S, E, T, CC, 11379 diag::warn_impcast_string_literal_to_bool); 11380 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 11381 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 11382 // This covers the literal expressions that evaluate to Objective-C 11383 // objects. 11384 return DiagnoseImpCast(S, E, T, CC, 11385 diag::warn_impcast_objective_c_literal_to_bool); 11386 } 11387 if (Source->isPointerType() || Source->canDecayToPointerType()) { 11388 // Warn on pointer to bool conversion that is always true. 11389 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 11390 SourceRange(CC)); 11391 } 11392 } 11393 11394 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL 11395 // is a typedef for signed char (macOS), then that constant value has to be 1 11396 // or 0. 11397 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) { 11398 Expr::EvalResult Result; 11399 if (E->EvaluateAsInt(Result, S.getASTContext(), 11400 Expr::SE_AllowSideEffects)) { 11401 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) { 11402 adornObjCBoolConversionDiagWithTernaryFixit( 11403 S, E, 11404 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool) 11405 << Result.Val.getInt().toString(10)); 11406 } 11407 return; 11408 } 11409 } 11410 11411 // Check implicit casts from Objective-C collection literals to specialized 11412 // collection types, e.g., NSArray<NSString *> *. 11413 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 11414 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 11415 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 11416 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 11417 11418 // Strip vector types. 11419 if (isa<VectorType>(Source)) { 11420 if (!isa<VectorType>(Target)) { 11421 if (S.SourceMgr.isInSystemMacro(CC)) 11422 return; 11423 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 11424 } 11425 11426 // If the vector cast is cast between two vectors of the same size, it is 11427 // a bitcast, not a conversion. 11428 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 11429 return; 11430 11431 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 11432 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 11433 } 11434 if (auto VecTy = dyn_cast<VectorType>(Target)) 11435 Target = VecTy->getElementType().getTypePtr(); 11436 11437 // Strip complex types. 11438 if (isa<ComplexType>(Source)) { 11439 if (!isa<ComplexType>(Target)) { 11440 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 11441 return; 11442 11443 return DiagnoseImpCast(S, E, T, CC, 11444 S.getLangOpts().CPlusPlus 11445 ? diag::err_impcast_complex_scalar 11446 : diag::warn_impcast_complex_scalar); 11447 } 11448 11449 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 11450 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 11451 } 11452 11453 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 11454 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 11455 11456 // If the source is floating point... 11457 if (SourceBT && SourceBT->isFloatingPoint()) { 11458 // ...and the target is floating point... 11459 if (TargetBT && TargetBT->isFloatingPoint()) { 11460 // ...then warn if we're dropping FP rank. 11461 11462 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 11463 QualType(SourceBT, 0), QualType(TargetBT, 0)); 11464 if (Order > 0) { 11465 // Don't warn about float constants that are precisely 11466 // representable in the target type. 11467 Expr::EvalResult result; 11468 if (E->EvaluateAsRValue(result, S.Context)) { 11469 // Value might be a float, a float vector, or a float complex. 11470 if (IsSameFloatAfterCast(result.Val, 11471 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 11472 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 11473 return; 11474 } 11475 11476 if (S.SourceMgr.isInSystemMacro(CC)) 11477 return; 11478 11479 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 11480 } 11481 // ... or possibly if we're increasing rank, too 11482 else if (Order < 0) { 11483 if (S.SourceMgr.isInSystemMacro(CC)) 11484 return; 11485 11486 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 11487 } 11488 return; 11489 } 11490 11491 // If the target is integral, always warn. 11492 if (TargetBT && TargetBT->isInteger()) { 11493 if (S.SourceMgr.isInSystemMacro(CC)) 11494 return; 11495 11496 DiagnoseFloatingImpCast(S, E, T, CC); 11497 } 11498 11499 // Detect the case where a call result is converted from floating-point to 11500 // to bool, and the final argument to the call is converted from bool, to 11501 // discover this typo: 11502 // 11503 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 11504 // 11505 // FIXME: This is an incredibly special case; is there some more general 11506 // way to detect this class of misplaced-parentheses bug? 11507 if (Target->isBooleanType() && isa<CallExpr>(E)) { 11508 // Check last argument of function call to see if it is an 11509 // implicit cast from a type matching the type the result 11510 // is being cast to. 11511 CallExpr *CEx = cast<CallExpr>(E); 11512 if (unsigned NumArgs = CEx->getNumArgs()) { 11513 Expr *LastA = CEx->getArg(NumArgs - 1); 11514 Expr *InnerE = LastA->IgnoreParenImpCasts(); 11515 if (isa<ImplicitCastExpr>(LastA) && 11516 InnerE->getType()->isBooleanType()) { 11517 // Warn on this floating-point to bool conversion 11518 DiagnoseImpCast(S, E, T, CC, 11519 diag::warn_impcast_floating_point_to_bool); 11520 } 11521 } 11522 } 11523 return; 11524 } 11525 11526 // Valid casts involving fixed point types should be accounted for here. 11527 if (Source->isFixedPointType()) { 11528 if (Target->isUnsaturatedFixedPointType()) { 11529 Expr::EvalResult Result; 11530 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects, 11531 S.isConstantEvaluated())) { 11532 APFixedPoint Value = Result.Val.getFixedPoint(); 11533 APFixedPoint MaxVal = S.Context.getFixedPointMax(T); 11534 APFixedPoint MinVal = S.Context.getFixedPointMin(T); 11535 if (Value > MaxVal || Value < MinVal) { 11536 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11537 S.PDiag(diag::warn_impcast_fixed_point_range) 11538 << Value.toString() << T 11539 << E->getSourceRange() 11540 << clang::SourceRange(CC)); 11541 return; 11542 } 11543 } 11544 } else if (Target->isIntegerType()) { 11545 Expr::EvalResult Result; 11546 if (!S.isConstantEvaluated() && 11547 E->EvaluateAsFixedPoint(Result, S.Context, 11548 Expr::SE_AllowSideEffects)) { 11549 APFixedPoint FXResult = Result.Val.getFixedPoint(); 11550 11551 bool Overflowed; 11552 llvm::APSInt IntResult = FXResult.convertToInt( 11553 S.Context.getIntWidth(T), 11554 Target->isSignedIntegerOrEnumerationType(), &Overflowed); 11555 11556 if (Overflowed) { 11557 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11558 S.PDiag(diag::warn_impcast_fixed_point_range) 11559 << FXResult.toString() << T 11560 << E->getSourceRange() 11561 << clang::SourceRange(CC)); 11562 return; 11563 } 11564 } 11565 } 11566 } else if (Target->isUnsaturatedFixedPointType()) { 11567 if (Source->isIntegerType()) { 11568 Expr::EvalResult Result; 11569 if (!S.isConstantEvaluated() && 11570 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 11571 llvm::APSInt Value = Result.Val.getInt(); 11572 11573 bool Overflowed; 11574 APFixedPoint IntResult = APFixedPoint::getFromIntValue( 11575 Value, S.Context.getFixedPointSemantics(T), &Overflowed); 11576 11577 if (Overflowed) { 11578 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11579 S.PDiag(diag::warn_impcast_fixed_point_range) 11580 << Value.toString(/*Radix=*/10) << T 11581 << E->getSourceRange() 11582 << clang::SourceRange(CC)); 11583 return; 11584 } 11585 } 11586 } 11587 } 11588 11589 // If we are casting an integer type to a floating point type without 11590 // initialization-list syntax, we might lose accuracy if the floating 11591 // point type has a narrower significand than the integer type. 11592 if (SourceBT && TargetBT && SourceBT->isIntegerType() && 11593 TargetBT->isFloatingType() && !IsListInit) { 11594 // Determine the number of precision bits in the source integer type. 11595 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated()); 11596 unsigned int SourcePrecision = SourceRange.Width; 11597 11598 // Determine the number of precision bits in the 11599 // target floating point type. 11600 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision( 11601 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 11602 11603 if (SourcePrecision > 0 && TargetPrecision > 0 && 11604 SourcePrecision > TargetPrecision) { 11605 11606 llvm::APSInt SourceInt; 11607 if (E->isIntegerConstantExpr(SourceInt, S.Context)) { 11608 // If the source integer is a constant, convert it to the target 11609 // floating point type. Issue a warning if the value changes 11610 // during the whole conversion. 11611 llvm::APFloat TargetFloatValue( 11612 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 11613 llvm::APFloat::opStatus ConversionStatus = 11614 TargetFloatValue.convertFromAPInt( 11615 SourceInt, SourceBT->isSignedInteger(), 11616 llvm::APFloat::rmNearestTiesToEven); 11617 11618 if (ConversionStatus != llvm::APFloat::opOK) { 11619 std::string PrettySourceValue = SourceInt.toString(10); 11620 SmallString<32> PrettyTargetValue; 11621 TargetFloatValue.toString(PrettyTargetValue, TargetPrecision); 11622 11623 S.DiagRuntimeBehavior( 11624 E->getExprLoc(), E, 11625 S.PDiag(diag::warn_impcast_integer_float_precision_constant) 11626 << PrettySourceValue << PrettyTargetValue << E->getType() << T 11627 << E->getSourceRange() << clang::SourceRange(CC)); 11628 } 11629 } else { 11630 // Otherwise, the implicit conversion may lose precision. 11631 DiagnoseImpCast(S, E, T, CC, 11632 diag::warn_impcast_integer_float_precision); 11633 } 11634 } 11635 } 11636 11637 DiagnoseNullConversion(S, E, T, CC); 11638 11639 S.DiscardMisalignedMemberAddress(Target, E); 11640 11641 if (Target->isBooleanType()) 11642 DiagnoseIntInBoolContext(S, E); 11643 11644 if (!Source->isIntegerType() || !Target->isIntegerType()) 11645 return; 11646 11647 // TODO: remove this early return once the false positives for constant->bool 11648 // in templates, macros, etc, are reduced or removed. 11649 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 11650 return; 11651 11652 if (isObjCSignedCharBool(S, T) && !Source->isCharType() && 11653 !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) { 11654 return adornObjCBoolConversionDiagWithTernaryFixit( 11655 S, E, 11656 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool) 11657 << E->getType()); 11658 } 11659 11660 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated()); 11661 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 11662 11663 if (SourceRange.Width > TargetRange.Width) { 11664 // If the source is a constant, use a default-on diagnostic. 11665 // TODO: this should happen for bitfield stores, too. 11666 Expr::EvalResult Result; 11667 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects, 11668 S.isConstantEvaluated())) { 11669 llvm::APSInt Value(32); 11670 Value = Result.Val.getInt(); 11671 11672 if (S.SourceMgr.isInSystemMacro(CC)) 11673 return; 11674 11675 std::string PrettySourceValue = Value.toString(10); 11676 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 11677 11678 S.DiagRuntimeBehavior( 11679 E->getExprLoc(), E, 11680 S.PDiag(diag::warn_impcast_integer_precision_constant) 11681 << PrettySourceValue << PrettyTargetValue << E->getType() << T 11682 << E->getSourceRange() << clang::SourceRange(CC)); 11683 return; 11684 } 11685 11686 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 11687 if (S.SourceMgr.isInSystemMacro(CC)) 11688 return; 11689 11690 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 11691 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 11692 /* pruneControlFlow */ true); 11693 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 11694 } 11695 11696 if (TargetRange.Width > SourceRange.Width) { 11697 if (auto *UO = dyn_cast<UnaryOperator>(E)) 11698 if (UO->getOpcode() == UO_Minus) 11699 if (Source->isUnsignedIntegerType()) { 11700 if (Target->isUnsignedIntegerType()) 11701 return DiagnoseImpCast(S, E, T, CC, 11702 diag::warn_impcast_high_order_zero_bits); 11703 if (Target->isSignedIntegerType()) 11704 return DiagnoseImpCast(S, E, T, CC, 11705 diag::warn_impcast_nonnegative_result); 11706 } 11707 } 11708 11709 if (TargetRange.Width == SourceRange.Width && !TargetRange.NonNegative && 11710 SourceRange.NonNegative && Source->isSignedIntegerType()) { 11711 // Warn when doing a signed to signed conversion, warn if the positive 11712 // source value is exactly the width of the target type, which will 11713 // cause a negative value to be stored. 11714 11715 Expr::EvalResult Result; 11716 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) && 11717 !S.SourceMgr.isInSystemMacro(CC)) { 11718 llvm::APSInt Value = Result.Val.getInt(); 11719 if (isSameWidthConstantConversion(S, E, T, CC)) { 11720 std::string PrettySourceValue = Value.toString(10); 11721 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 11722 11723 S.DiagRuntimeBehavior( 11724 E->getExprLoc(), E, 11725 S.PDiag(diag::warn_impcast_integer_precision_constant) 11726 << PrettySourceValue << PrettyTargetValue << E->getType() << T 11727 << E->getSourceRange() << clang::SourceRange(CC)); 11728 return; 11729 } 11730 } 11731 11732 // Fall through for non-constants to give a sign conversion warning. 11733 } 11734 11735 if ((TargetRange.NonNegative && !SourceRange.NonNegative) || 11736 (!TargetRange.NonNegative && SourceRange.NonNegative && 11737 SourceRange.Width == TargetRange.Width)) { 11738 if (S.SourceMgr.isInSystemMacro(CC)) 11739 return; 11740 11741 unsigned DiagID = diag::warn_impcast_integer_sign; 11742 11743 // Traditionally, gcc has warned about this under -Wsign-compare. 11744 // We also want to warn about it in -Wconversion. 11745 // So if -Wconversion is off, use a completely identical diagnostic 11746 // in the sign-compare group. 11747 // The conditional-checking code will 11748 if (ICContext) { 11749 DiagID = diag::warn_impcast_integer_sign_conditional; 11750 *ICContext = true; 11751 } 11752 11753 return DiagnoseImpCast(S, E, T, CC, DiagID); 11754 } 11755 11756 // Diagnose conversions between different enumeration types. 11757 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 11758 // type, to give us better diagnostics. 11759 QualType SourceType = E->getType(); 11760 if (!S.getLangOpts().CPlusPlus) { 11761 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 11762 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 11763 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 11764 SourceType = S.Context.getTypeDeclType(Enum); 11765 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 11766 } 11767 } 11768 11769 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 11770 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 11771 if (SourceEnum->getDecl()->hasNameForLinkage() && 11772 TargetEnum->getDecl()->hasNameForLinkage() && 11773 SourceEnum != TargetEnum) { 11774 if (S.SourceMgr.isInSystemMacro(CC)) 11775 return; 11776 11777 return DiagnoseImpCast(S, E, SourceType, T, CC, 11778 diag::warn_impcast_different_enum_types); 11779 } 11780 } 11781 11782 static void CheckConditionalOperator(Sema &S, ConditionalOperator *E, 11783 SourceLocation CC, QualType T); 11784 11785 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 11786 SourceLocation CC, bool &ICContext) { 11787 E = E->IgnoreParenImpCasts(); 11788 11789 if (isa<ConditionalOperator>(E)) 11790 return CheckConditionalOperator(S, cast<ConditionalOperator>(E), CC, T); 11791 11792 AnalyzeImplicitConversions(S, E, CC); 11793 if (E->getType() != T) 11794 return CheckImplicitConversion(S, E, T, CC, &ICContext); 11795 } 11796 11797 static void CheckConditionalOperator(Sema &S, ConditionalOperator *E, 11798 SourceLocation CC, QualType T) { 11799 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 11800 11801 bool Suspicious = false; 11802 CheckConditionalOperand(S, E->getTrueExpr(), T, CC, Suspicious); 11803 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 11804 11805 if (T->isBooleanType()) 11806 DiagnoseIntInBoolContext(S, E); 11807 11808 // If -Wconversion would have warned about either of the candidates 11809 // for a signedness conversion to the context type... 11810 if (!Suspicious) return; 11811 11812 // ...but it's currently ignored... 11813 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 11814 return; 11815 11816 // ...then check whether it would have warned about either of the 11817 // candidates for a signedness conversion to the condition type. 11818 if (E->getType() == T) return; 11819 11820 Suspicious = false; 11821 CheckImplicitConversion(S, E->getTrueExpr()->IgnoreParenImpCasts(), 11822 E->getType(), CC, &Suspicious); 11823 if (!Suspicious) 11824 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 11825 E->getType(), CC, &Suspicious); 11826 } 11827 11828 /// Check conversion of given expression to boolean. 11829 /// Input argument E is a logical expression. 11830 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 11831 if (S.getLangOpts().Bool) 11832 return; 11833 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 11834 return; 11835 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 11836 } 11837 11838 namespace { 11839 struct AnalyzeImplicitConversionsWorkItem { 11840 Expr *E; 11841 SourceLocation CC; 11842 bool IsListInit; 11843 }; 11844 } 11845 11846 /// Data recursive variant of AnalyzeImplicitConversions. Subexpressions 11847 /// that should be visited are added to WorkList. 11848 static void AnalyzeImplicitConversions( 11849 Sema &S, AnalyzeImplicitConversionsWorkItem Item, 11850 llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) { 11851 Expr *OrigE = Item.E; 11852 SourceLocation CC = Item.CC; 11853 11854 QualType T = OrigE->getType(); 11855 Expr *E = OrigE->IgnoreParenImpCasts(); 11856 11857 // Propagate whether we are in a C++ list initialization expression. 11858 // If so, we do not issue warnings for implicit int-float conversion 11859 // precision loss, because C++11 narrowing already handles it. 11860 bool IsListInit = Item.IsListInit || 11861 (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus); 11862 11863 if (E->isTypeDependent() || E->isValueDependent()) 11864 return; 11865 11866 Expr *SourceExpr = E; 11867 // Examine, but don't traverse into the source expression of an 11868 // OpaqueValueExpr, since it may have multiple parents and we don't want to 11869 // emit duplicate diagnostics. Its fine to examine the form or attempt to 11870 // evaluate it in the context of checking the specific conversion to T though. 11871 if (auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 11872 if (auto *Src = OVE->getSourceExpr()) 11873 SourceExpr = Src; 11874 11875 if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr)) 11876 if (UO->getOpcode() == UO_Not && 11877 UO->getSubExpr()->isKnownToHaveBooleanValue()) 11878 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool) 11879 << OrigE->getSourceRange() << T->isBooleanType() 11880 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!"); 11881 11882 // For conditional operators, we analyze the arguments as if they 11883 // were being fed directly into the output. 11884 if (auto *CO = dyn_cast<ConditionalOperator>(SourceExpr)) { 11885 CheckConditionalOperator(S, CO, CC, T); 11886 return; 11887 } 11888 11889 // Check implicit argument conversions for function calls. 11890 if (CallExpr *Call = dyn_cast<CallExpr>(SourceExpr)) 11891 CheckImplicitArgumentConversions(S, Call, CC); 11892 11893 // Go ahead and check any implicit conversions we might have skipped. 11894 // The non-canonical typecheck is just an optimization; 11895 // CheckImplicitConversion will filter out dead implicit conversions. 11896 if (SourceExpr->getType() != T) 11897 CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit); 11898 11899 // Now continue drilling into this expression. 11900 11901 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 11902 // The bound subexpressions in a PseudoObjectExpr are not reachable 11903 // as transitive children. 11904 // FIXME: Use a more uniform representation for this. 11905 for (auto *SE : POE->semantics()) 11906 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 11907 WorkList.push_back({OVE->getSourceExpr(), CC, IsListInit}); 11908 } 11909 11910 // Skip past explicit casts. 11911 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 11912 E = CE->getSubExpr()->IgnoreParenImpCasts(); 11913 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 11914 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 11915 WorkList.push_back({E, CC, IsListInit}); 11916 return; 11917 } 11918 11919 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 11920 // Do a somewhat different check with comparison operators. 11921 if (BO->isComparisonOp()) 11922 return AnalyzeComparison(S, BO); 11923 11924 // And with simple assignments. 11925 if (BO->getOpcode() == BO_Assign) 11926 return AnalyzeAssignment(S, BO); 11927 // And with compound assignments. 11928 if (BO->isAssignmentOp()) 11929 return AnalyzeCompoundAssignment(S, BO); 11930 } 11931 11932 // These break the otherwise-useful invariant below. Fortunately, 11933 // we don't really need to recurse into them, because any internal 11934 // expressions should have been analyzed already when they were 11935 // built into statements. 11936 if (isa<StmtExpr>(E)) return; 11937 11938 // Don't descend into unevaluated contexts. 11939 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 11940 11941 // Now just recurse over the expression's children. 11942 CC = E->getExprLoc(); 11943 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 11944 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 11945 for (Stmt *SubStmt : E->children()) { 11946 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 11947 if (!ChildExpr) 11948 continue; 11949 11950 if (IsLogicalAndOperator && 11951 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 11952 // Ignore checking string literals that are in logical and operators. 11953 // This is a common pattern for asserts. 11954 continue; 11955 WorkList.push_back({ChildExpr, CC, IsListInit}); 11956 } 11957 11958 if (BO && BO->isLogicalOp()) { 11959 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 11960 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 11961 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 11962 11963 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 11964 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 11965 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 11966 } 11967 11968 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 11969 if (U->getOpcode() == UO_LNot) { 11970 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 11971 } else if (U->getOpcode() != UO_AddrOf) { 11972 if (U->getSubExpr()->getType()->isAtomicType()) 11973 S.Diag(U->getSubExpr()->getBeginLoc(), 11974 diag::warn_atomic_implicit_seq_cst); 11975 } 11976 } 11977 } 11978 11979 /// AnalyzeImplicitConversions - Find and report any interesting 11980 /// implicit conversions in the given expression. There are a couple 11981 /// of competing diagnostics here, -Wconversion and -Wsign-compare. 11982 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC, 11983 bool IsListInit/*= false*/) { 11984 llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList; 11985 WorkList.push_back({OrigE, CC, IsListInit}); 11986 while (!WorkList.empty()) 11987 AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList); 11988 } 11989 11990 /// Diagnose integer type and any valid implicit conversion to it. 11991 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 11992 // Taking into account implicit conversions, 11993 // allow any integer. 11994 if (!E->getType()->isIntegerType()) { 11995 S.Diag(E->getBeginLoc(), 11996 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 11997 return true; 11998 } 11999 // Potentially emit standard warnings for implicit conversions if enabled 12000 // using -Wconversion. 12001 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 12002 return false; 12003 } 12004 12005 // Helper function for Sema::DiagnoseAlwaysNonNullPointer. 12006 // Returns true when emitting a warning about taking the address of a reference. 12007 static bool CheckForReference(Sema &SemaRef, const Expr *E, 12008 const PartialDiagnostic &PD) { 12009 E = E->IgnoreParenImpCasts(); 12010 12011 const FunctionDecl *FD = nullptr; 12012 12013 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 12014 if (!DRE->getDecl()->getType()->isReferenceType()) 12015 return false; 12016 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 12017 if (!M->getMemberDecl()->getType()->isReferenceType()) 12018 return false; 12019 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 12020 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 12021 return false; 12022 FD = Call->getDirectCallee(); 12023 } else { 12024 return false; 12025 } 12026 12027 SemaRef.Diag(E->getExprLoc(), PD); 12028 12029 // If possible, point to location of function. 12030 if (FD) { 12031 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 12032 } 12033 12034 return true; 12035 } 12036 12037 // Returns true if the SourceLocation is expanded from any macro body. 12038 // Returns false if the SourceLocation is invalid, is from not in a macro 12039 // expansion, or is from expanded from a top-level macro argument. 12040 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 12041 if (Loc.isInvalid()) 12042 return false; 12043 12044 while (Loc.isMacroID()) { 12045 if (SM.isMacroBodyExpansion(Loc)) 12046 return true; 12047 Loc = SM.getImmediateMacroCallerLoc(Loc); 12048 } 12049 12050 return false; 12051 } 12052 12053 /// Diagnose pointers that are always non-null. 12054 /// \param E the expression containing the pointer 12055 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 12056 /// compared to a null pointer 12057 /// \param IsEqual True when the comparison is equal to a null pointer 12058 /// \param Range Extra SourceRange to highlight in the diagnostic 12059 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 12060 Expr::NullPointerConstantKind NullKind, 12061 bool IsEqual, SourceRange Range) { 12062 if (!E) 12063 return; 12064 12065 // Don't warn inside macros. 12066 if (E->getExprLoc().isMacroID()) { 12067 const SourceManager &SM = getSourceManager(); 12068 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 12069 IsInAnyMacroBody(SM, Range.getBegin())) 12070 return; 12071 } 12072 E = E->IgnoreImpCasts(); 12073 12074 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 12075 12076 if (isa<CXXThisExpr>(E)) { 12077 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 12078 : diag::warn_this_bool_conversion; 12079 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 12080 return; 12081 } 12082 12083 bool IsAddressOf = false; 12084 12085 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 12086 if (UO->getOpcode() != UO_AddrOf) 12087 return; 12088 IsAddressOf = true; 12089 E = UO->getSubExpr(); 12090 } 12091 12092 if (IsAddressOf) { 12093 unsigned DiagID = IsCompare 12094 ? diag::warn_address_of_reference_null_compare 12095 : diag::warn_address_of_reference_bool_conversion; 12096 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 12097 << IsEqual; 12098 if (CheckForReference(*this, E, PD)) { 12099 return; 12100 } 12101 } 12102 12103 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 12104 bool IsParam = isa<NonNullAttr>(NonnullAttr); 12105 std::string Str; 12106 llvm::raw_string_ostream S(Str); 12107 E->printPretty(S, nullptr, getPrintingPolicy()); 12108 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 12109 : diag::warn_cast_nonnull_to_bool; 12110 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 12111 << E->getSourceRange() << Range << IsEqual; 12112 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 12113 }; 12114 12115 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 12116 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 12117 if (auto *Callee = Call->getDirectCallee()) { 12118 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 12119 ComplainAboutNonnullParamOrCall(A); 12120 return; 12121 } 12122 } 12123 } 12124 12125 // Expect to find a single Decl. Skip anything more complicated. 12126 ValueDecl *D = nullptr; 12127 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 12128 D = R->getDecl(); 12129 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 12130 D = M->getMemberDecl(); 12131 } 12132 12133 // Weak Decls can be null. 12134 if (!D || D->isWeak()) 12135 return; 12136 12137 // Check for parameter decl with nonnull attribute 12138 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 12139 if (getCurFunction() && 12140 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 12141 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 12142 ComplainAboutNonnullParamOrCall(A); 12143 return; 12144 } 12145 12146 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 12147 // Skip function template not specialized yet. 12148 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 12149 return; 12150 auto ParamIter = llvm::find(FD->parameters(), PV); 12151 assert(ParamIter != FD->param_end()); 12152 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 12153 12154 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 12155 if (!NonNull->args_size()) { 12156 ComplainAboutNonnullParamOrCall(NonNull); 12157 return; 12158 } 12159 12160 for (const ParamIdx &ArgNo : NonNull->args()) { 12161 if (ArgNo.getASTIndex() == ParamNo) { 12162 ComplainAboutNonnullParamOrCall(NonNull); 12163 return; 12164 } 12165 } 12166 } 12167 } 12168 } 12169 } 12170 12171 QualType T = D->getType(); 12172 const bool IsArray = T->isArrayType(); 12173 const bool IsFunction = T->isFunctionType(); 12174 12175 // Address of function is used to silence the function warning. 12176 if (IsAddressOf && IsFunction) { 12177 return; 12178 } 12179 12180 // Found nothing. 12181 if (!IsAddressOf && !IsFunction && !IsArray) 12182 return; 12183 12184 // Pretty print the expression for the diagnostic. 12185 std::string Str; 12186 llvm::raw_string_ostream S(Str); 12187 E->printPretty(S, nullptr, getPrintingPolicy()); 12188 12189 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 12190 : diag::warn_impcast_pointer_to_bool; 12191 enum { 12192 AddressOf, 12193 FunctionPointer, 12194 ArrayPointer 12195 } DiagType; 12196 if (IsAddressOf) 12197 DiagType = AddressOf; 12198 else if (IsFunction) 12199 DiagType = FunctionPointer; 12200 else if (IsArray) 12201 DiagType = ArrayPointer; 12202 else 12203 llvm_unreachable("Could not determine diagnostic."); 12204 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 12205 << Range << IsEqual; 12206 12207 if (!IsFunction) 12208 return; 12209 12210 // Suggest '&' to silence the function warning. 12211 Diag(E->getExprLoc(), diag::note_function_warning_silence) 12212 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 12213 12214 // Check to see if '()' fixit should be emitted. 12215 QualType ReturnType; 12216 UnresolvedSet<4> NonTemplateOverloads; 12217 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 12218 if (ReturnType.isNull()) 12219 return; 12220 12221 if (IsCompare) { 12222 // There are two cases here. If there is null constant, the only suggest 12223 // for a pointer return type. If the null is 0, then suggest if the return 12224 // type is a pointer or an integer type. 12225 if (!ReturnType->isPointerType()) { 12226 if (NullKind == Expr::NPCK_ZeroExpression || 12227 NullKind == Expr::NPCK_ZeroLiteral) { 12228 if (!ReturnType->isIntegerType()) 12229 return; 12230 } else { 12231 return; 12232 } 12233 } 12234 } else { // !IsCompare 12235 // For function to bool, only suggest if the function pointer has bool 12236 // return type. 12237 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 12238 return; 12239 } 12240 Diag(E->getExprLoc(), diag::note_function_to_function_call) 12241 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 12242 } 12243 12244 /// Diagnoses "dangerous" implicit conversions within the given 12245 /// expression (which is a full expression). Implements -Wconversion 12246 /// and -Wsign-compare. 12247 /// 12248 /// \param CC the "context" location of the implicit conversion, i.e. 12249 /// the most location of the syntactic entity requiring the implicit 12250 /// conversion 12251 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 12252 // Don't diagnose in unevaluated contexts. 12253 if (isUnevaluatedContext()) 12254 return; 12255 12256 // Don't diagnose for value- or type-dependent expressions. 12257 if (E->isTypeDependent() || E->isValueDependent()) 12258 return; 12259 12260 // Check for array bounds violations in cases where the check isn't triggered 12261 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 12262 // ArraySubscriptExpr is on the RHS of a variable initialization. 12263 CheckArrayAccess(E); 12264 12265 // This is not the right CC for (e.g.) a variable initialization. 12266 AnalyzeImplicitConversions(*this, E, CC); 12267 } 12268 12269 /// CheckBoolLikeConversion - Check conversion of given expression to boolean. 12270 /// Input argument E is a logical expression. 12271 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 12272 ::CheckBoolLikeConversion(*this, E, CC); 12273 } 12274 12275 /// Diagnose when expression is an integer constant expression and its evaluation 12276 /// results in integer overflow 12277 void Sema::CheckForIntOverflow (Expr *E) { 12278 // Use a work list to deal with nested struct initializers. 12279 SmallVector<Expr *, 2> Exprs(1, E); 12280 12281 do { 12282 Expr *OriginalE = Exprs.pop_back_val(); 12283 Expr *E = OriginalE->IgnoreParenCasts(); 12284 12285 if (isa<BinaryOperator>(E)) { 12286 E->EvaluateForOverflow(Context); 12287 continue; 12288 } 12289 12290 if (auto InitList = dyn_cast<InitListExpr>(OriginalE)) 12291 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 12292 else if (isa<ObjCBoxedExpr>(OriginalE)) 12293 E->EvaluateForOverflow(Context); 12294 else if (auto Call = dyn_cast<CallExpr>(E)) 12295 Exprs.append(Call->arg_begin(), Call->arg_end()); 12296 else if (auto Message = dyn_cast<ObjCMessageExpr>(E)) 12297 Exprs.append(Message->arg_begin(), Message->arg_end()); 12298 } while (!Exprs.empty()); 12299 } 12300 12301 namespace { 12302 12303 /// Visitor for expressions which looks for unsequenced operations on the 12304 /// same object. 12305 class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> { 12306 using Base = ConstEvaluatedExprVisitor<SequenceChecker>; 12307 12308 /// A tree of sequenced regions within an expression. Two regions are 12309 /// unsequenced if one is an ancestor or a descendent of the other. When we 12310 /// finish processing an expression with sequencing, such as a comma 12311 /// expression, we fold its tree nodes into its parent, since they are 12312 /// unsequenced with respect to nodes we will visit later. 12313 class SequenceTree { 12314 struct Value { 12315 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 12316 unsigned Parent : 31; 12317 unsigned Merged : 1; 12318 }; 12319 SmallVector<Value, 8> Values; 12320 12321 public: 12322 /// A region within an expression which may be sequenced with respect 12323 /// to some other region. 12324 class Seq { 12325 friend class SequenceTree; 12326 12327 unsigned Index; 12328 12329 explicit Seq(unsigned N) : Index(N) {} 12330 12331 public: 12332 Seq() : Index(0) {} 12333 }; 12334 12335 SequenceTree() { Values.push_back(Value(0)); } 12336 Seq root() const { return Seq(0); } 12337 12338 /// Create a new sequence of operations, which is an unsequenced 12339 /// subset of \p Parent. This sequence of operations is sequenced with 12340 /// respect to other children of \p Parent. 12341 Seq allocate(Seq Parent) { 12342 Values.push_back(Value(Parent.Index)); 12343 return Seq(Values.size() - 1); 12344 } 12345 12346 /// Merge a sequence of operations into its parent. 12347 void merge(Seq S) { 12348 Values[S.Index].Merged = true; 12349 } 12350 12351 /// Determine whether two operations are unsequenced. This operation 12352 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 12353 /// should have been merged into its parent as appropriate. 12354 bool isUnsequenced(Seq Cur, Seq Old) { 12355 unsigned C = representative(Cur.Index); 12356 unsigned Target = representative(Old.Index); 12357 while (C >= Target) { 12358 if (C == Target) 12359 return true; 12360 C = Values[C].Parent; 12361 } 12362 return false; 12363 } 12364 12365 private: 12366 /// Pick a representative for a sequence. 12367 unsigned representative(unsigned K) { 12368 if (Values[K].Merged) 12369 // Perform path compression as we go. 12370 return Values[K].Parent = representative(Values[K].Parent); 12371 return K; 12372 } 12373 }; 12374 12375 /// An object for which we can track unsequenced uses. 12376 using Object = const NamedDecl *; 12377 12378 /// Different flavors of object usage which we track. We only track the 12379 /// least-sequenced usage of each kind. 12380 enum UsageKind { 12381 /// A read of an object. Multiple unsequenced reads are OK. 12382 UK_Use, 12383 12384 /// A modification of an object which is sequenced before the value 12385 /// computation of the expression, such as ++n in C++. 12386 UK_ModAsValue, 12387 12388 /// A modification of an object which is not sequenced before the value 12389 /// computation of the expression, such as n++. 12390 UK_ModAsSideEffect, 12391 12392 UK_Count = UK_ModAsSideEffect + 1 12393 }; 12394 12395 /// Bundle together a sequencing region and the expression corresponding 12396 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo. 12397 struct Usage { 12398 const Expr *UsageExpr; 12399 SequenceTree::Seq Seq; 12400 12401 Usage() : UsageExpr(nullptr), Seq() {} 12402 }; 12403 12404 struct UsageInfo { 12405 Usage Uses[UK_Count]; 12406 12407 /// Have we issued a diagnostic for this object already? 12408 bool Diagnosed; 12409 12410 UsageInfo() : Uses(), Diagnosed(false) {} 12411 }; 12412 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 12413 12414 Sema &SemaRef; 12415 12416 /// Sequenced regions within the expression. 12417 SequenceTree Tree; 12418 12419 /// Declaration modifications and references which we have seen. 12420 UsageInfoMap UsageMap; 12421 12422 /// The region we are currently within. 12423 SequenceTree::Seq Region; 12424 12425 /// Filled in with declarations which were modified as a side-effect 12426 /// (that is, post-increment operations). 12427 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 12428 12429 /// Expressions to check later. We defer checking these to reduce 12430 /// stack usage. 12431 SmallVectorImpl<const Expr *> &WorkList; 12432 12433 /// RAII object wrapping the visitation of a sequenced subexpression of an 12434 /// expression. At the end of this process, the side-effects of the evaluation 12435 /// become sequenced with respect to the value computation of the result, so 12436 /// we downgrade any UK_ModAsSideEffect within the evaluation to 12437 /// UK_ModAsValue. 12438 struct SequencedSubexpression { 12439 SequencedSubexpression(SequenceChecker &Self) 12440 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 12441 Self.ModAsSideEffect = &ModAsSideEffect; 12442 } 12443 12444 ~SequencedSubexpression() { 12445 for (const std::pair<Object, Usage> &M : llvm::reverse(ModAsSideEffect)) { 12446 // Add a new usage with usage kind UK_ModAsValue, and then restore 12447 // the previous usage with UK_ModAsSideEffect (thus clearing it if 12448 // the previous one was empty). 12449 UsageInfo &UI = Self.UsageMap[M.first]; 12450 auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect]; 12451 Self.addUsage(M.first, UI, SideEffectUsage.UsageExpr, UK_ModAsValue); 12452 SideEffectUsage = M.second; 12453 } 12454 Self.ModAsSideEffect = OldModAsSideEffect; 12455 } 12456 12457 SequenceChecker &Self; 12458 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 12459 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 12460 }; 12461 12462 /// RAII object wrapping the visitation of a subexpression which we might 12463 /// choose to evaluate as a constant. If any subexpression is evaluated and 12464 /// found to be non-constant, this allows us to suppress the evaluation of 12465 /// the outer expression. 12466 class EvaluationTracker { 12467 public: 12468 EvaluationTracker(SequenceChecker &Self) 12469 : Self(Self), Prev(Self.EvalTracker) { 12470 Self.EvalTracker = this; 12471 } 12472 12473 ~EvaluationTracker() { 12474 Self.EvalTracker = Prev; 12475 if (Prev) 12476 Prev->EvalOK &= EvalOK; 12477 } 12478 12479 bool evaluate(const Expr *E, bool &Result) { 12480 if (!EvalOK || E->isValueDependent()) 12481 return false; 12482 EvalOK = E->EvaluateAsBooleanCondition( 12483 Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated()); 12484 return EvalOK; 12485 } 12486 12487 private: 12488 SequenceChecker &Self; 12489 EvaluationTracker *Prev; 12490 bool EvalOK = true; 12491 } *EvalTracker = nullptr; 12492 12493 /// Find the object which is produced by the specified expression, 12494 /// if any. 12495 Object getObject(const Expr *E, bool Mod) const { 12496 E = E->IgnoreParenCasts(); 12497 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 12498 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 12499 return getObject(UO->getSubExpr(), Mod); 12500 } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 12501 if (BO->getOpcode() == BO_Comma) 12502 return getObject(BO->getRHS(), Mod); 12503 if (Mod && BO->isAssignmentOp()) 12504 return getObject(BO->getLHS(), Mod); 12505 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 12506 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 12507 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 12508 return ME->getMemberDecl(); 12509 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 12510 // FIXME: If this is a reference, map through to its value. 12511 return DRE->getDecl(); 12512 return nullptr; 12513 } 12514 12515 /// Note that an object \p O was modified or used by an expression 12516 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for 12517 /// the object \p O as obtained via the \p UsageMap. 12518 void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) { 12519 // Get the old usage for the given object and usage kind. 12520 Usage &U = UI.Uses[UK]; 12521 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) { 12522 // If we have a modification as side effect and are in a sequenced 12523 // subexpression, save the old Usage so that we can restore it later 12524 // in SequencedSubexpression::~SequencedSubexpression. 12525 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 12526 ModAsSideEffect->push_back(std::make_pair(O, U)); 12527 // Then record the new usage with the current sequencing region. 12528 U.UsageExpr = UsageExpr; 12529 U.Seq = Region; 12530 } 12531 } 12532 12533 /// Check whether a modification or use of an object \p O in an expression 12534 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is 12535 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap. 12536 /// \p IsModMod is true when we are checking for a mod-mod unsequenced 12537 /// usage and false we are checking for a mod-use unsequenced usage. 12538 void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, 12539 UsageKind OtherKind, bool IsModMod) { 12540 if (UI.Diagnosed) 12541 return; 12542 12543 const Usage &U = UI.Uses[OtherKind]; 12544 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) 12545 return; 12546 12547 const Expr *Mod = U.UsageExpr; 12548 const Expr *ModOrUse = UsageExpr; 12549 if (OtherKind == UK_Use) 12550 std::swap(Mod, ModOrUse); 12551 12552 SemaRef.DiagRuntimeBehavior( 12553 Mod->getExprLoc(), {Mod, ModOrUse}, 12554 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod 12555 : diag::warn_unsequenced_mod_use) 12556 << O << SourceRange(ModOrUse->getExprLoc())); 12557 UI.Diagnosed = true; 12558 } 12559 12560 // A note on note{Pre, Post}{Use, Mod}: 12561 // 12562 // (It helps to follow the algorithm with an expression such as 12563 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced 12564 // operations before C++17 and both are well-defined in C++17). 12565 // 12566 // When visiting a node which uses/modify an object we first call notePreUse 12567 // or notePreMod before visiting its sub-expression(s). At this point the 12568 // children of the current node have not yet been visited and so the eventual 12569 // uses/modifications resulting from the children of the current node have not 12570 // been recorded yet. 12571 // 12572 // We then visit the children of the current node. After that notePostUse or 12573 // notePostMod is called. These will 1) detect an unsequenced modification 12574 // as side effect (as in "k++ + k") and 2) add a new usage with the 12575 // appropriate usage kind. 12576 // 12577 // We also have to be careful that some operation sequences modification as 12578 // side effect as well (for example: || or ,). To account for this we wrap 12579 // the visitation of such a sub-expression (for example: the LHS of || or ,) 12580 // with SequencedSubexpression. SequencedSubexpression is an RAII object 12581 // which record usages which are modifications as side effect, and then 12582 // downgrade them (or more accurately restore the previous usage which was a 12583 // modification as side effect) when exiting the scope of the sequenced 12584 // subexpression. 12585 12586 void notePreUse(Object O, const Expr *UseExpr) { 12587 UsageInfo &UI = UsageMap[O]; 12588 // Uses conflict with other modifications. 12589 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false); 12590 } 12591 12592 void notePostUse(Object O, const Expr *UseExpr) { 12593 UsageInfo &UI = UsageMap[O]; 12594 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsSideEffect, 12595 /*IsModMod=*/false); 12596 addUsage(O, UI, UseExpr, /*UsageKind=*/UK_Use); 12597 } 12598 12599 void notePreMod(Object O, const Expr *ModExpr) { 12600 UsageInfo &UI = UsageMap[O]; 12601 // Modifications conflict with other modifications and with uses. 12602 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true); 12603 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false); 12604 } 12605 12606 void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) { 12607 UsageInfo &UI = UsageMap[O]; 12608 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsSideEffect, 12609 /*IsModMod=*/true); 12610 addUsage(O, UI, ModExpr, /*UsageKind=*/UK); 12611 } 12612 12613 public: 12614 SequenceChecker(Sema &S, const Expr *E, 12615 SmallVectorImpl<const Expr *> &WorkList) 12616 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 12617 Visit(E); 12618 // Silence a -Wunused-private-field since WorkList is now unused. 12619 // TODO: Evaluate if it can be used, and if not remove it. 12620 (void)this->WorkList; 12621 } 12622 12623 void VisitStmt(const Stmt *S) { 12624 // Skip all statements which aren't expressions for now. 12625 } 12626 12627 void VisitExpr(const Expr *E) { 12628 // By default, just recurse to evaluated subexpressions. 12629 Base::VisitStmt(E); 12630 } 12631 12632 void VisitCastExpr(const CastExpr *E) { 12633 Object O = Object(); 12634 if (E->getCastKind() == CK_LValueToRValue) 12635 O = getObject(E->getSubExpr(), false); 12636 12637 if (O) 12638 notePreUse(O, E); 12639 VisitExpr(E); 12640 if (O) 12641 notePostUse(O, E); 12642 } 12643 12644 void VisitSequencedExpressions(const Expr *SequencedBefore, 12645 const Expr *SequencedAfter) { 12646 SequenceTree::Seq BeforeRegion = Tree.allocate(Region); 12647 SequenceTree::Seq AfterRegion = Tree.allocate(Region); 12648 SequenceTree::Seq OldRegion = Region; 12649 12650 { 12651 SequencedSubexpression SeqBefore(*this); 12652 Region = BeforeRegion; 12653 Visit(SequencedBefore); 12654 } 12655 12656 Region = AfterRegion; 12657 Visit(SequencedAfter); 12658 12659 Region = OldRegion; 12660 12661 Tree.merge(BeforeRegion); 12662 Tree.merge(AfterRegion); 12663 } 12664 12665 void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) { 12666 // C++17 [expr.sub]p1: 12667 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The 12668 // expression E1 is sequenced before the expression E2. 12669 if (SemaRef.getLangOpts().CPlusPlus17) 12670 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS()); 12671 else { 12672 Visit(ASE->getLHS()); 12673 Visit(ASE->getRHS()); 12674 } 12675 } 12676 12677 void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 12678 void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 12679 void VisitBinPtrMem(const BinaryOperator *BO) { 12680 // C++17 [expr.mptr.oper]p4: 12681 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...] 12682 // the expression E1 is sequenced before the expression E2. 12683 if (SemaRef.getLangOpts().CPlusPlus17) 12684 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 12685 else { 12686 Visit(BO->getLHS()); 12687 Visit(BO->getRHS()); 12688 } 12689 } 12690 12691 void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); } 12692 void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); } 12693 void VisitBinShlShr(const BinaryOperator *BO) { 12694 // C++17 [expr.shift]p4: 12695 // The expression E1 is sequenced before the expression E2. 12696 if (SemaRef.getLangOpts().CPlusPlus17) 12697 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 12698 else { 12699 Visit(BO->getLHS()); 12700 Visit(BO->getRHS()); 12701 } 12702 } 12703 12704 void VisitBinComma(const BinaryOperator *BO) { 12705 // C++11 [expr.comma]p1: 12706 // Every value computation and side effect associated with the left 12707 // expression is sequenced before every value computation and side 12708 // effect associated with the right expression. 12709 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 12710 } 12711 12712 void VisitBinAssign(const BinaryOperator *BO) { 12713 SequenceTree::Seq RHSRegion; 12714 SequenceTree::Seq LHSRegion; 12715 if (SemaRef.getLangOpts().CPlusPlus17) { 12716 RHSRegion = Tree.allocate(Region); 12717 LHSRegion = Tree.allocate(Region); 12718 } else { 12719 RHSRegion = Region; 12720 LHSRegion = Region; 12721 } 12722 SequenceTree::Seq OldRegion = Region; 12723 12724 // C++11 [expr.ass]p1: 12725 // [...] the assignment is sequenced after the value computation 12726 // of the right and left operands, [...] 12727 // 12728 // so check it before inspecting the operands and update the 12729 // map afterwards. 12730 Object O = getObject(BO->getLHS(), /*Mod=*/true); 12731 if (O) 12732 notePreMod(O, BO); 12733 12734 if (SemaRef.getLangOpts().CPlusPlus17) { 12735 // C++17 [expr.ass]p1: 12736 // [...] The right operand is sequenced before the left operand. [...] 12737 { 12738 SequencedSubexpression SeqBefore(*this); 12739 Region = RHSRegion; 12740 Visit(BO->getRHS()); 12741 } 12742 12743 Region = LHSRegion; 12744 Visit(BO->getLHS()); 12745 12746 if (O && isa<CompoundAssignOperator>(BO)) 12747 notePostUse(O, BO); 12748 12749 } else { 12750 // C++11 does not specify any sequencing between the LHS and RHS. 12751 Region = LHSRegion; 12752 Visit(BO->getLHS()); 12753 12754 if (O && isa<CompoundAssignOperator>(BO)) 12755 notePostUse(O, BO); 12756 12757 Region = RHSRegion; 12758 Visit(BO->getRHS()); 12759 } 12760 12761 // C++11 [expr.ass]p1: 12762 // the assignment is sequenced [...] before the value computation of the 12763 // assignment expression. 12764 // C11 6.5.16/3 has no such rule. 12765 Region = OldRegion; 12766 if (O) 12767 notePostMod(O, BO, 12768 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 12769 : UK_ModAsSideEffect); 12770 if (SemaRef.getLangOpts().CPlusPlus17) { 12771 Tree.merge(RHSRegion); 12772 Tree.merge(LHSRegion); 12773 } 12774 } 12775 12776 void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) { 12777 VisitBinAssign(CAO); 12778 } 12779 12780 void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 12781 void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 12782 void VisitUnaryPreIncDec(const UnaryOperator *UO) { 12783 Object O = getObject(UO->getSubExpr(), true); 12784 if (!O) 12785 return VisitExpr(UO); 12786 12787 notePreMod(O, UO); 12788 Visit(UO->getSubExpr()); 12789 // C++11 [expr.pre.incr]p1: 12790 // the expression ++x is equivalent to x+=1 12791 notePostMod(O, UO, 12792 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 12793 : UK_ModAsSideEffect); 12794 } 12795 12796 void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 12797 void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 12798 void VisitUnaryPostIncDec(const UnaryOperator *UO) { 12799 Object O = getObject(UO->getSubExpr(), true); 12800 if (!O) 12801 return VisitExpr(UO); 12802 12803 notePreMod(O, UO); 12804 Visit(UO->getSubExpr()); 12805 notePostMod(O, UO, UK_ModAsSideEffect); 12806 } 12807 12808 void VisitBinLOr(const BinaryOperator *BO) { 12809 // C++11 [expr.log.or]p2: 12810 // If the second expression is evaluated, every value computation and 12811 // side effect associated with the first expression is sequenced before 12812 // every value computation and side effect associated with the 12813 // second expression. 12814 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 12815 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 12816 SequenceTree::Seq OldRegion = Region; 12817 12818 EvaluationTracker Eval(*this); 12819 { 12820 SequencedSubexpression Sequenced(*this); 12821 Region = LHSRegion; 12822 Visit(BO->getLHS()); 12823 } 12824 12825 // C++11 [expr.log.or]p1: 12826 // [...] the second operand is not evaluated if the first operand 12827 // evaluates to true. 12828 bool EvalResult = false; 12829 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 12830 bool ShouldVisitRHS = !EvalOK || (EvalOK && !EvalResult); 12831 if (ShouldVisitRHS) { 12832 Region = RHSRegion; 12833 Visit(BO->getRHS()); 12834 } 12835 12836 Region = OldRegion; 12837 Tree.merge(LHSRegion); 12838 Tree.merge(RHSRegion); 12839 } 12840 12841 void VisitBinLAnd(const BinaryOperator *BO) { 12842 // C++11 [expr.log.and]p2: 12843 // If the second expression is evaluated, every value computation and 12844 // side effect associated with the first expression is sequenced before 12845 // every value computation and side effect associated with the 12846 // second expression. 12847 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 12848 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 12849 SequenceTree::Seq OldRegion = Region; 12850 12851 EvaluationTracker Eval(*this); 12852 { 12853 SequencedSubexpression Sequenced(*this); 12854 Region = LHSRegion; 12855 Visit(BO->getLHS()); 12856 } 12857 12858 // C++11 [expr.log.and]p1: 12859 // [...] the second operand is not evaluated if the first operand is false. 12860 bool EvalResult = false; 12861 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 12862 bool ShouldVisitRHS = !EvalOK || (EvalOK && EvalResult); 12863 if (ShouldVisitRHS) { 12864 Region = RHSRegion; 12865 Visit(BO->getRHS()); 12866 } 12867 12868 Region = OldRegion; 12869 Tree.merge(LHSRegion); 12870 Tree.merge(RHSRegion); 12871 } 12872 12873 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) { 12874 // C++11 [expr.cond]p1: 12875 // [...] Every value computation and side effect associated with the first 12876 // expression is sequenced before every value computation and side effect 12877 // associated with the second or third expression. 12878 SequenceTree::Seq ConditionRegion = Tree.allocate(Region); 12879 12880 // No sequencing is specified between the true and false expression. 12881 // However since exactly one of both is going to be evaluated we can 12882 // consider them to be sequenced. This is needed to avoid warning on 12883 // something like "x ? y+= 1 : y += 2;" in the case where we will visit 12884 // both the true and false expressions because we can't evaluate x. 12885 // This will still allow us to detect an expression like (pre C++17) 12886 // "(x ? y += 1 : y += 2) = y". 12887 // 12888 // We don't wrap the visitation of the true and false expression with 12889 // SequencedSubexpression because we don't want to downgrade modifications 12890 // as side effect in the true and false expressions after the visition 12891 // is done. (for example in the expression "(x ? y++ : y++) + y" we should 12892 // not warn between the two "y++", but we should warn between the "y++" 12893 // and the "y". 12894 SequenceTree::Seq TrueRegion = Tree.allocate(Region); 12895 SequenceTree::Seq FalseRegion = Tree.allocate(Region); 12896 SequenceTree::Seq OldRegion = Region; 12897 12898 EvaluationTracker Eval(*this); 12899 { 12900 SequencedSubexpression Sequenced(*this); 12901 Region = ConditionRegion; 12902 Visit(CO->getCond()); 12903 } 12904 12905 // C++11 [expr.cond]p1: 12906 // [...] The first expression is contextually converted to bool (Clause 4). 12907 // It is evaluated and if it is true, the result of the conditional 12908 // expression is the value of the second expression, otherwise that of the 12909 // third expression. Only one of the second and third expressions is 12910 // evaluated. [...] 12911 bool EvalResult = false; 12912 bool EvalOK = Eval.evaluate(CO->getCond(), EvalResult); 12913 bool ShouldVisitTrueExpr = !EvalOK || (EvalOK && EvalResult); 12914 bool ShouldVisitFalseExpr = !EvalOK || (EvalOK && !EvalResult); 12915 if (ShouldVisitTrueExpr) { 12916 Region = TrueRegion; 12917 Visit(CO->getTrueExpr()); 12918 } 12919 if (ShouldVisitFalseExpr) { 12920 Region = FalseRegion; 12921 Visit(CO->getFalseExpr()); 12922 } 12923 12924 Region = OldRegion; 12925 Tree.merge(ConditionRegion); 12926 Tree.merge(TrueRegion); 12927 Tree.merge(FalseRegion); 12928 } 12929 12930 void VisitCallExpr(const CallExpr *CE) { 12931 // C++11 [intro.execution]p15: 12932 // When calling a function [...], every value computation and side effect 12933 // associated with any argument expression, or with the postfix expression 12934 // designating the called function, is sequenced before execution of every 12935 // expression or statement in the body of the function [and thus before 12936 // the value computation of its result]. 12937 SequencedSubexpression Sequenced(*this); 12938 SemaRef.runWithSufficientStackSpace(CE->getExprLoc(), 12939 [&] { Base::VisitCallExpr(CE); }); 12940 12941 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 12942 } 12943 12944 void VisitCXXConstructExpr(const CXXConstructExpr *CCE) { 12945 // This is a call, so all subexpressions are sequenced before the result. 12946 SequencedSubexpression Sequenced(*this); 12947 12948 if (!CCE->isListInitialization()) 12949 return VisitExpr(CCE); 12950 12951 // In C++11, list initializations are sequenced. 12952 SmallVector<SequenceTree::Seq, 32> Elts; 12953 SequenceTree::Seq Parent = Region; 12954 for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(), 12955 E = CCE->arg_end(); 12956 I != E; ++I) { 12957 Region = Tree.allocate(Parent); 12958 Elts.push_back(Region); 12959 Visit(*I); 12960 } 12961 12962 // Forget that the initializers are sequenced. 12963 Region = Parent; 12964 for (unsigned I = 0; I < Elts.size(); ++I) 12965 Tree.merge(Elts[I]); 12966 } 12967 12968 void VisitInitListExpr(const InitListExpr *ILE) { 12969 if (!SemaRef.getLangOpts().CPlusPlus11) 12970 return VisitExpr(ILE); 12971 12972 // In C++11, list initializations are sequenced. 12973 SmallVector<SequenceTree::Seq, 32> Elts; 12974 SequenceTree::Seq Parent = Region; 12975 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 12976 const Expr *E = ILE->getInit(I); 12977 if (!E) 12978 continue; 12979 Region = Tree.allocate(Parent); 12980 Elts.push_back(Region); 12981 Visit(E); 12982 } 12983 12984 // Forget that the initializers are sequenced. 12985 Region = Parent; 12986 for (unsigned I = 0; I < Elts.size(); ++I) 12987 Tree.merge(Elts[I]); 12988 } 12989 }; 12990 12991 } // namespace 12992 12993 void Sema::CheckUnsequencedOperations(const Expr *E) { 12994 SmallVector<const Expr *, 8> WorkList; 12995 WorkList.push_back(E); 12996 while (!WorkList.empty()) { 12997 const Expr *Item = WorkList.pop_back_val(); 12998 SequenceChecker(*this, Item, WorkList); 12999 } 13000 } 13001 13002 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 13003 bool IsConstexpr) { 13004 llvm::SaveAndRestore<bool> ConstantContext( 13005 isConstantEvaluatedOverride, IsConstexpr || isa<ConstantExpr>(E)); 13006 CheckImplicitConversions(E, CheckLoc); 13007 if (!E->isInstantiationDependent()) 13008 CheckUnsequencedOperations(E); 13009 if (!IsConstexpr && !E->isValueDependent()) 13010 CheckForIntOverflow(E); 13011 DiagnoseMisalignedMembers(); 13012 } 13013 13014 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 13015 FieldDecl *BitField, 13016 Expr *Init) { 13017 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 13018 } 13019 13020 static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 13021 SourceLocation Loc) { 13022 if (!PType->isVariablyModifiedType()) 13023 return; 13024 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 13025 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 13026 return; 13027 } 13028 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 13029 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 13030 return; 13031 } 13032 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 13033 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 13034 return; 13035 } 13036 13037 const ArrayType *AT = S.Context.getAsArrayType(PType); 13038 if (!AT) 13039 return; 13040 13041 if (AT->getSizeModifier() != ArrayType::Star) { 13042 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 13043 return; 13044 } 13045 13046 S.Diag(Loc, diag::err_array_star_in_function_definition); 13047 } 13048 13049 /// CheckParmsForFunctionDef - Check that the parameters of the given 13050 /// function are appropriate for the definition of a function. This 13051 /// takes care of any checks that cannot be performed on the 13052 /// declaration itself, e.g., that the types of each of the function 13053 /// parameters are complete. 13054 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 13055 bool CheckParameterNames) { 13056 bool HasInvalidParm = false; 13057 for (ParmVarDecl *Param : Parameters) { 13058 // C99 6.7.5.3p4: the parameters in a parameter type list in a 13059 // function declarator that is part of a function definition of 13060 // that function shall not have incomplete type. 13061 // 13062 // This is also C++ [dcl.fct]p6. 13063 if (!Param->isInvalidDecl() && 13064 RequireCompleteType(Param->getLocation(), Param->getType(), 13065 diag::err_typecheck_decl_incomplete_type)) { 13066 Param->setInvalidDecl(); 13067 HasInvalidParm = true; 13068 } 13069 13070 // C99 6.9.1p5: If the declarator includes a parameter type list, the 13071 // declaration of each parameter shall include an identifier. 13072 if (CheckParameterNames && Param->getIdentifier() == nullptr && 13073 !Param->isImplicit() && !getLangOpts().CPlusPlus) { 13074 // Diagnose this as an extension in C17 and earlier. 13075 if (!getLangOpts().C2x) 13076 Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x); 13077 } 13078 13079 // C99 6.7.5.3p12: 13080 // If the function declarator is not part of a definition of that 13081 // function, parameters may have incomplete type and may use the [*] 13082 // notation in their sequences of declarator specifiers to specify 13083 // variable length array types. 13084 QualType PType = Param->getOriginalType(); 13085 // FIXME: This diagnostic should point the '[*]' if source-location 13086 // information is added for it. 13087 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 13088 13089 // If the parameter is a c++ class type and it has to be destructed in the 13090 // callee function, declare the destructor so that it can be called by the 13091 // callee function. Do not perform any direct access check on the dtor here. 13092 if (!Param->isInvalidDecl()) { 13093 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 13094 if (!ClassDecl->isInvalidDecl() && 13095 !ClassDecl->hasIrrelevantDestructor() && 13096 !ClassDecl->isDependentContext() && 13097 ClassDecl->isParamDestroyedInCallee()) { 13098 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 13099 MarkFunctionReferenced(Param->getLocation(), Destructor); 13100 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 13101 } 13102 } 13103 } 13104 13105 // Parameters with the pass_object_size attribute only need to be marked 13106 // constant at function definitions. Because we lack information about 13107 // whether we're on a declaration or definition when we're instantiating the 13108 // attribute, we need to check for constness here. 13109 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 13110 if (!Param->getType().isConstQualified()) 13111 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 13112 << Attr->getSpelling() << 1; 13113 13114 // Check for parameter names shadowing fields from the class. 13115 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) { 13116 // The owning context for the parameter should be the function, but we 13117 // want to see if this function's declaration context is a record. 13118 DeclContext *DC = Param->getDeclContext(); 13119 if (DC && DC->isFunctionOrMethod()) { 13120 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent())) 13121 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(), 13122 RD, /*DeclIsField*/ false); 13123 } 13124 } 13125 } 13126 13127 return HasInvalidParm; 13128 } 13129 13130 Optional<std::pair<CharUnits, CharUnits>> 13131 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx); 13132 13133 /// Compute the alignment and offset of the base class object given the 13134 /// derived-to-base cast expression and the alignment and offset of the derived 13135 /// class object. 13136 static std::pair<CharUnits, CharUnits> 13137 getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType, 13138 CharUnits BaseAlignment, CharUnits Offset, 13139 ASTContext &Ctx) { 13140 for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE; 13141 ++PathI) { 13142 const CXXBaseSpecifier *Base = *PathI; 13143 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl(); 13144 if (Base->isVirtual()) { 13145 // The complete object may have a lower alignment than the non-virtual 13146 // alignment of the base, in which case the base may be misaligned. Choose 13147 // the smaller of the non-virtual alignment and BaseAlignment, which is a 13148 // conservative lower bound of the complete object alignment. 13149 CharUnits NonVirtualAlignment = 13150 Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment(); 13151 BaseAlignment = std::min(BaseAlignment, NonVirtualAlignment); 13152 Offset = CharUnits::Zero(); 13153 } else { 13154 const ASTRecordLayout &RL = 13155 Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl()); 13156 Offset += RL.getBaseClassOffset(BaseDecl); 13157 } 13158 DerivedType = Base->getType(); 13159 } 13160 13161 return std::make_pair(BaseAlignment, Offset); 13162 } 13163 13164 /// Compute the alignment and offset of a binary additive operator. 13165 static Optional<std::pair<CharUnits, CharUnits>> 13166 getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE, 13167 bool IsSub, ASTContext &Ctx) { 13168 QualType PointeeType = PtrE->getType()->getPointeeType(); 13169 13170 if (!PointeeType->isConstantSizeType()) 13171 return llvm::None; 13172 13173 auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx); 13174 13175 if (!P) 13176 return llvm::None; 13177 13178 llvm::APSInt IdxRes; 13179 CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType); 13180 if (IntE->isIntegerConstantExpr(IdxRes, Ctx)) { 13181 CharUnits Offset = EltSize * IdxRes.getExtValue(); 13182 if (IsSub) 13183 Offset = -Offset; 13184 return std::make_pair(P->first, P->second + Offset); 13185 } 13186 13187 // If the integer expression isn't a constant expression, compute the lower 13188 // bound of the alignment using the alignment and offset of the pointer 13189 // expression and the element size. 13190 return std::make_pair( 13191 P->first.alignmentAtOffset(P->second).alignmentAtOffset(EltSize), 13192 CharUnits::Zero()); 13193 } 13194 13195 /// This helper function takes an lvalue expression and returns the alignment of 13196 /// a VarDecl and a constant offset from the VarDecl. 13197 Optional<std::pair<CharUnits, CharUnits>> 13198 static getBaseAlignmentAndOffsetFromLValue(const Expr *E, ASTContext &Ctx) { 13199 E = E->IgnoreParens(); 13200 switch (E->getStmtClass()) { 13201 default: 13202 break; 13203 case Stmt::CStyleCastExprClass: 13204 case Stmt::CXXStaticCastExprClass: 13205 case Stmt::ImplicitCastExprClass: { 13206 auto *CE = cast<CastExpr>(E); 13207 const Expr *From = CE->getSubExpr(); 13208 switch (CE->getCastKind()) { 13209 default: 13210 break; 13211 case CK_NoOp: 13212 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 13213 case CK_UncheckedDerivedToBase: 13214 case CK_DerivedToBase: { 13215 auto P = getBaseAlignmentAndOffsetFromLValue(From, Ctx); 13216 if (!P) 13217 break; 13218 return getDerivedToBaseAlignmentAndOffset(CE, From->getType(), P->first, 13219 P->second, Ctx); 13220 } 13221 } 13222 break; 13223 } 13224 case Stmt::ArraySubscriptExprClass: { 13225 auto *ASE = cast<ArraySubscriptExpr>(E); 13226 return getAlignmentAndOffsetFromBinAddOrSub(ASE->getBase(), ASE->getIdx(), 13227 false, Ctx); 13228 } 13229 case Stmt::DeclRefExprClass: { 13230 if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) { 13231 // FIXME: If VD is captured by copy or is an escaping __block variable, 13232 // use the alignment of VD's type. 13233 if (!VD->getType()->isReferenceType()) 13234 return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero()); 13235 if (VD->hasInit()) 13236 return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx); 13237 } 13238 break; 13239 } 13240 case Stmt::MemberExprClass: { 13241 auto *ME = cast<MemberExpr>(E); 13242 if (ME->isArrow()) 13243 break; 13244 auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); 13245 if (!FD || FD->getType()->isReferenceType()) 13246 break; 13247 auto P = getBaseAlignmentAndOffsetFromLValue(ME->getBase(), Ctx); 13248 if (!P) 13249 break; 13250 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent()); 13251 uint64_t Offset = Layout.getFieldOffset(FD->getFieldIndex()); 13252 return std::make_pair(P->first, 13253 P->second + CharUnits::fromQuantity(Offset)); 13254 } 13255 case Stmt::UnaryOperatorClass: { 13256 auto *UO = cast<UnaryOperator>(E); 13257 switch (UO->getOpcode()) { 13258 default: 13259 break; 13260 case UO_Deref: 13261 return getBaseAlignmentAndOffsetFromPtr(UO->getSubExpr(), Ctx); 13262 } 13263 break; 13264 } 13265 case Stmt::BinaryOperatorClass: { 13266 auto *BO = cast<BinaryOperator>(E); 13267 auto Opcode = BO->getOpcode(); 13268 switch (Opcode) { 13269 default: 13270 break; 13271 case BO_Comma: 13272 return getBaseAlignmentAndOffsetFromLValue(BO->getRHS(), Ctx); 13273 } 13274 break; 13275 } 13276 } 13277 return llvm::None; 13278 } 13279 13280 /// This helper function takes a pointer expression and returns the alignment of 13281 /// a VarDecl and a constant offset from the VarDecl. 13282 Optional<std::pair<CharUnits, CharUnits>> 13283 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx) { 13284 E = E->IgnoreParens(); 13285 switch (E->getStmtClass()) { 13286 default: 13287 break; 13288 case Stmt::CStyleCastExprClass: 13289 case Stmt::CXXStaticCastExprClass: 13290 case Stmt::ImplicitCastExprClass: { 13291 auto *CE = cast<CastExpr>(E); 13292 const Expr *From = CE->getSubExpr(); 13293 switch (CE->getCastKind()) { 13294 default: 13295 break; 13296 case CK_NoOp: 13297 return getBaseAlignmentAndOffsetFromPtr(From, Ctx); 13298 case CK_ArrayToPointerDecay: 13299 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 13300 case CK_UncheckedDerivedToBase: 13301 case CK_DerivedToBase: { 13302 auto P = getBaseAlignmentAndOffsetFromPtr(From, Ctx); 13303 if (!P) 13304 break; 13305 return getDerivedToBaseAlignmentAndOffset( 13306 CE, From->getType()->getPointeeType(), P->first, P->second, Ctx); 13307 } 13308 } 13309 break; 13310 } 13311 case Stmt::UnaryOperatorClass: { 13312 auto *UO = cast<UnaryOperator>(E); 13313 if (UO->getOpcode() == UO_AddrOf) 13314 return getBaseAlignmentAndOffsetFromLValue(UO->getSubExpr(), Ctx); 13315 break; 13316 } 13317 case Stmt::BinaryOperatorClass: { 13318 auto *BO = cast<BinaryOperator>(E); 13319 auto Opcode = BO->getOpcode(); 13320 switch (Opcode) { 13321 default: 13322 break; 13323 case BO_Add: 13324 case BO_Sub: { 13325 const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS(); 13326 if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType()) 13327 std::swap(LHS, RHS); 13328 return getAlignmentAndOffsetFromBinAddOrSub(LHS, RHS, Opcode == BO_Sub, 13329 Ctx); 13330 } 13331 case BO_Comma: 13332 return getBaseAlignmentAndOffsetFromPtr(BO->getRHS(), Ctx); 13333 } 13334 break; 13335 } 13336 } 13337 return llvm::None; 13338 } 13339 13340 static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) { 13341 // See if we can compute the alignment of a VarDecl and an offset from it. 13342 Optional<std::pair<CharUnits, CharUnits>> P = 13343 getBaseAlignmentAndOffsetFromPtr(E, S.Context); 13344 13345 if (P) 13346 return P->first.alignmentAtOffset(P->second); 13347 13348 // If that failed, return the type's alignment. 13349 return S.Context.getTypeAlignInChars(E->getType()->getPointeeType()); 13350 } 13351 13352 /// CheckCastAlign - Implements -Wcast-align, which warns when a 13353 /// pointer cast increases the alignment requirements. 13354 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 13355 // This is actually a lot of work to potentially be doing on every 13356 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 13357 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 13358 return; 13359 13360 // Ignore dependent types. 13361 if (T->isDependentType() || Op->getType()->isDependentType()) 13362 return; 13363 13364 // Require that the destination be a pointer type. 13365 const PointerType *DestPtr = T->getAs<PointerType>(); 13366 if (!DestPtr) return; 13367 13368 // If the destination has alignment 1, we're done. 13369 QualType DestPointee = DestPtr->getPointeeType(); 13370 if (DestPointee->isIncompleteType()) return; 13371 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 13372 if (DestAlign.isOne()) return; 13373 13374 // Require that the source be a pointer type. 13375 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 13376 if (!SrcPtr) return; 13377 QualType SrcPointee = SrcPtr->getPointeeType(); 13378 13379 // Whitelist casts from cv void*. We already implicitly 13380 // whitelisted casts to cv void*, since they have alignment 1. 13381 // Also whitelist casts involving incomplete types, which implicitly 13382 // includes 'void'. 13383 if (SrcPointee->isIncompleteType()) return; 13384 13385 CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this); 13386 13387 if (SrcAlign >= DestAlign) return; 13388 13389 Diag(TRange.getBegin(), diag::warn_cast_align) 13390 << Op->getType() << T 13391 << static_cast<unsigned>(SrcAlign.getQuantity()) 13392 << static_cast<unsigned>(DestAlign.getQuantity()) 13393 << TRange << Op->getSourceRange(); 13394 } 13395 13396 /// Check whether this array fits the idiom of a size-one tail padded 13397 /// array member of a struct. 13398 /// 13399 /// We avoid emitting out-of-bounds access warnings for such arrays as they are 13400 /// commonly used to emulate flexible arrays in C89 code. 13401 static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size, 13402 const NamedDecl *ND) { 13403 if (Size != 1 || !ND) return false; 13404 13405 const FieldDecl *FD = dyn_cast<FieldDecl>(ND); 13406 if (!FD) return false; 13407 13408 // Don't consider sizes resulting from macro expansions or template argument 13409 // substitution to form C89 tail-padded arrays. 13410 13411 TypeSourceInfo *TInfo = FD->getTypeSourceInfo(); 13412 while (TInfo) { 13413 TypeLoc TL = TInfo->getTypeLoc(); 13414 // Look through typedefs. 13415 if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) { 13416 const TypedefNameDecl *TDL = TTL.getTypedefNameDecl(); 13417 TInfo = TDL->getTypeSourceInfo(); 13418 continue; 13419 } 13420 if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) { 13421 const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr()); 13422 if (!SizeExpr || SizeExpr->getExprLoc().isMacroID()) 13423 return false; 13424 } 13425 break; 13426 } 13427 13428 const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext()); 13429 if (!RD) return false; 13430 if (RD->isUnion()) return false; 13431 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 13432 if (!CRD->isStandardLayout()) return false; 13433 } 13434 13435 // See if this is the last field decl in the record. 13436 const Decl *D = FD; 13437 while ((D = D->getNextDeclInContext())) 13438 if (isa<FieldDecl>(D)) 13439 return false; 13440 return true; 13441 } 13442 13443 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 13444 const ArraySubscriptExpr *ASE, 13445 bool AllowOnePastEnd, bool IndexNegated) { 13446 // Already diagnosed by the constant evaluator. 13447 if (isConstantEvaluated()) 13448 return; 13449 13450 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 13451 if (IndexExpr->isValueDependent()) 13452 return; 13453 13454 const Type *EffectiveType = 13455 BaseExpr->getType()->getPointeeOrArrayElementType(); 13456 BaseExpr = BaseExpr->IgnoreParenCasts(); 13457 const ConstantArrayType *ArrayTy = 13458 Context.getAsConstantArrayType(BaseExpr->getType()); 13459 13460 if (!ArrayTy) 13461 return; 13462 13463 const Type *BaseType = ArrayTy->getElementType().getTypePtr(); 13464 if (EffectiveType->isDependentType() || BaseType->isDependentType()) 13465 return; 13466 13467 Expr::EvalResult Result; 13468 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) 13469 return; 13470 13471 llvm::APSInt index = Result.Val.getInt(); 13472 if (IndexNegated) 13473 index = -index; 13474 13475 const NamedDecl *ND = nullptr; 13476 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 13477 ND = DRE->getDecl(); 13478 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 13479 ND = ME->getMemberDecl(); 13480 13481 if (index.isUnsigned() || !index.isNegative()) { 13482 // It is possible that the type of the base expression after 13483 // IgnoreParenCasts is incomplete, even though the type of the base 13484 // expression before IgnoreParenCasts is complete (see PR39746 for an 13485 // example). In this case we have no information about whether the array 13486 // access exceeds the array bounds. However we can still diagnose an array 13487 // access which precedes the array bounds. 13488 if (BaseType->isIncompleteType()) 13489 return; 13490 13491 llvm::APInt size = ArrayTy->getSize(); 13492 if (!size.isStrictlyPositive()) 13493 return; 13494 13495 if (BaseType != EffectiveType) { 13496 // Make sure we're comparing apples to apples when comparing index to size 13497 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 13498 uint64_t array_typesize = Context.getTypeSize(BaseType); 13499 // Handle ptrarith_typesize being zero, such as when casting to void* 13500 if (!ptrarith_typesize) ptrarith_typesize = 1; 13501 if (ptrarith_typesize != array_typesize) { 13502 // There's a cast to a different size type involved 13503 uint64_t ratio = array_typesize / ptrarith_typesize; 13504 // TODO: Be smarter about handling cases where array_typesize is not a 13505 // multiple of ptrarith_typesize 13506 if (ptrarith_typesize * ratio == array_typesize) 13507 size *= llvm::APInt(size.getBitWidth(), ratio); 13508 } 13509 } 13510 13511 if (size.getBitWidth() > index.getBitWidth()) 13512 index = index.zext(size.getBitWidth()); 13513 else if (size.getBitWidth() < index.getBitWidth()) 13514 size = size.zext(index.getBitWidth()); 13515 13516 // For array subscripting the index must be less than size, but for pointer 13517 // arithmetic also allow the index (offset) to be equal to size since 13518 // computing the next address after the end of the array is legal and 13519 // commonly done e.g. in C++ iterators and range-based for loops. 13520 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 13521 return; 13522 13523 // Also don't warn for arrays of size 1 which are members of some 13524 // structure. These are often used to approximate flexible arrays in C89 13525 // code. 13526 if (IsTailPaddedMemberArray(*this, size, ND)) 13527 return; 13528 13529 // Suppress the warning if the subscript expression (as identified by the 13530 // ']' location) and the index expression are both from macro expansions 13531 // within a system header. 13532 if (ASE) { 13533 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 13534 ASE->getRBracketLoc()); 13535 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 13536 SourceLocation IndexLoc = 13537 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 13538 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 13539 return; 13540 } 13541 } 13542 13543 unsigned DiagID = diag::warn_ptr_arith_exceeds_bounds; 13544 if (ASE) 13545 DiagID = diag::warn_array_index_exceeds_bounds; 13546 13547 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 13548 PDiag(DiagID) << index.toString(10, true) 13549 << size.toString(10, true) 13550 << (unsigned)size.getLimitedValue(~0U) 13551 << IndexExpr->getSourceRange()); 13552 } else { 13553 unsigned DiagID = diag::warn_array_index_precedes_bounds; 13554 if (!ASE) { 13555 DiagID = diag::warn_ptr_arith_precedes_bounds; 13556 if (index.isNegative()) index = -index; 13557 } 13558 13559 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 13560 PDiag(DiagID) << index.toString(10, true) 13561 << IndexExpr->getSourceRange()); 13562 } 13563 13564 if (!ND) { 13565 // Try harder to find a NamedDecl to point at in the note. 13566 while (const ArraySubscriptExpr *ASE = 13567 dyn_cast<ArraySubscriptExpr>(BaseExpr)) 13568 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 13569 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 13570 ND = DRE->getDecl(); 13571 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 13572 ND = ME->getMemberDecl(); 13573 } 13574 13575 if (ND) 13576 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 13577 PDiag(diag::note_array_declared_here) 13578 << ND->getDeclName()); 13579 } 13580 13581 void Sema::CheckArrayAccess(const Expr *expr) { 13582 int AllowOnePastEnd = 0; 13583 while (expr) { 13584 expr = expr->IgnoreParenImpCasts(); 13585 switch (expr->getStmtClass()) { 13586 case Stmt::ArraySubscriptExprClass: { 13587 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 13588 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 13589 AllowOnePastEnd > 0); 13590 expr = ASE->getBase(); 13591 break; 13592 } 13593 case Stmt::MemberExprClass: { 13594 expr = cast<MemberExpr>(expr)->getBase(); 13595 break; 13596 } 13597 case Stmt::OMPArraySectionExprClass: { 13598 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 13599 if (ASE->getLowerBound()) 13600 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 13601 /*ASE=*/nullptr, AllowOnePastEnd > 0); 13602 return; 13603 } 13604 case Stmt::UnaryOperatorClass: { 13605 // Only unwrap the * and & unary operators 13606 const UnaryOperator *UO = cast<UnaryOperator>(expr); 13607 expr = UO->getSubExpr(); 13608 switch (UO->getOpcode()) { 13609 case UO_AddrOf: 13610 AllowOnePastEnd++; 13611 break; 13612 case UO_Deref: 13613 AllowOnePastEnd--; 13614 break; 13615 default: 13616 return; 13617 } 13618 break; 13619 } 13620 case Stmt::ConditionalOperatorClass: { 13621 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 13622 if (const Expr *lhs = cond->getLHS()) 13623 CheckArrayAccess(lhs); 13624 if (const Expr *rhs = cond->getRHS()) 13625 CheckArrayAccess(rhs); 13626 return; 13627 } 13628 case Stmt::CXXOperatorCallExprClass: { 13629 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 13630 for (const auto *Arg : OCE->arguments()) 13631 CheckArrayAccess(Arg); 13632 return; 13633 } 13634 default: 13635 return; 13636 } 13637 } 13638 } 13639 13640 //===--- CHECK: Objective-C retain cycles ----------------------------------// 13641 13642 namespace { 13643 13644 struct RetainCycleOwner { 13645 VarDecl *Variable = nullptr; 13646 SourceRange Range; 13647 SourceLocation Loc; 13648 bool Indirect = false; 13649 13650 RetainCycleOwner() = default; 13651 13652 void setLocsFrom(Expr *e) { 13653 Loc = e->getExprLoc(); 13654 Range = e->getSourceRange(); 13655 } 13656 }; 13657 13658 } // namespace 13659 13660 /// Consider whether capturing the given variable can possibly lead to 13661 /// a retain cycle. 13662 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 13663 // In ARC, it's captured strongly iff the variable has __strong 13664 // lifetime. In MRR, it's captured strongly if the variable is 13665 // __block and has an appropriate type. 13666 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 13667 return false; 13668 13669 owner.Variable = var; 13670 if (ref) 13671 owner.setLocsFrom(ref); 13672 return true; 13673 } 13674 13675 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 13676 while (true) { 13677 e = e->IgnoreParens(); 13678 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 13679 switch (cast->getCastKind()) { 13680 case CK_BitCast: 13681 case CK_LValueBitCast: 13682 case CK_LValueToRValue: 13683 case CK_ARCReclaimReturnedObject: 13684 e = cast->getSubExpr(); 13685 continue; 13686 13687 default: 13688 return false; 13689 } 13690 } 13691 13692 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 13693 ObjCIvarDecl *ivar = ref->getDecl(); 13694 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 13695 return false; 13696 13697 // Try to find a retain cycle in the base. 13698 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 13699 return false; 13700 13701 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 13702 owner.Indirect = true; 13703 return true; 13704 } 13705 13706 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 13707 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 13708 if (!var) return false; 13709 return considerVariable(var, ref, owner); 13710 } 13711 13712 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 13713 if (member->isArrow()) return false; 13714 13715 // Don't count this as an indirect ownership. 13716 e = member->getBase(); 13717 continue; 13718 } 13719 13720 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 13721 // Only pay attention to pseudo-objects on property references. 13722 ObjCPropertyRefExpr *pre 13723 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 13724 ->IgnoreParens()); 13725 if (!pre) return false; 13726 if (pre->isImplicitProperty()) return false; 13727 ObjCPropertyDecl *property = pre->getExplicitProperty(); 13728 if (!property->isRetaining() && 13729 !(property->getPropertyIvarDecl() && 13730 property->getPropertyIvarDecl()->getType() 13731 .getObjCLifetime() == Qualifiers::OCL_Strong)) 13732 return false; 13733 13734 owner.Indirect = true; 13735 if (pre->isSuperReceiver()) { 13736 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 13737 if (!owner.Variable) 13738 return false; 13739 owner.Loc = pre->getLocation(); 13740 owner.Range = pre->getSourceRange(); 13741 return true; 13742 } 13743 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 13744 ->getSourceExpr()); 13745 continue; 13746 } 13747 13748 // Array ivars? 13749 13750 return false; 13751 } 13752 } 13753 13754 namespace { 13755 13756 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 13757 ASTContext &Context; 13758 VarDecl *Variable; 13759 Expr *Capturer = nullptr; 13760 bool VarWillBeReased = false; 13761 13762 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 13763 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 13764 Context(Context), Variable(variable) {} 13765 13766 void VisitDeclRefExpr(DeclRefExpr *ref) { 13767 if (ref->getDecl() == Variable && !Capturer) 13768 Capturer = ref; 13769 } 13770 13771 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 13772 if (Capturer) return; 13773 Visit(ref->getBase()); 13774 if (Capturer && ref->isFreeIvar()) 13775 Capturer = ref; 13776 } 13777 13778 void VisitBlockExpr(BlockExpr *block) { 13779 // Look inside nested blocks 13780 if (block->getBlockDecl()->capturesVariable(Variable)) 13781 Visit(block->getBlockDecl()->getBody()); 13782 } 13783 13784 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 13785 if (Capturer) return; 13786 if (OVE->getSourceExpr()) 13787 Visit(OVE->getSourceExpr()); 13788 } 13789 13790 void VisitBinaryOperator(BinaryOperator *BinOp) { 13791 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 13792 return; 13793 Expr *LHS = BinOp->getLHS(); 13794 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 13795 if (DRE->getDecl() != Variable) 13796 return; 13797 if (Expr *RHS = BinOp->getRHS()) { 13798 RHS = RHS->IgnoreParenCasts(); 13799 llvm::APSInt Value; 13800 VarWillBeReased = 13801 (RHS && RHS->isIntegerConstantExpr(Value, Context) && Value == 0); 13802 } 13803 } 13804 } 13805 }; 13806 13807 } // namespace 13808 13809 /// Check whether the given argument is a block which captures a 13810 /// variable. 13811 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 13812 assert(owner.Variable && owner.Loc.isValid()); 13813 13814 e = e->IgnoreParenCasts(); 13815 13816 // Look through [^{...} copy] and Block_copy(^{...}). 13817 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 13818 Selector Cmd = ME->getSelector(); 13819 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 13820 e = ME->getInstanceReceiver(); 13821 if (!e) 13822 return nullptr; 13823 e = e->IgnoreParenCasts(); 13824 } 13825 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 13826 if (CE->getNumArgs() == 1) { 13827 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 13828 if (Fn) { 13829 const IdentifierInfo *FnI = Fn->getIdentifier(); 13830 if (FnI && FnI->isStr("_Block_copy")) { 13831 e = CE->getArg(0)->IgnoreParenCasts(); 13832 } 13833 } 13834 } 13835 } 13836 13837 BlockExpr *block = dyn_cast<BlockExpr>(e); 13838 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 13839 return nullptr; 13840 13841 FindCaptureVisitor visitor(S.Context, owner.Variable); 13842 visitor.Visit(block->getBlockDecl()->getBody()); 13843 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 13844 } 13845 13846 static void diagnoseRetainCycle(Sema &S, Expr *capturer, 13847 RetainCycleOwner &owner) { 13848 assert(capturer); 13849 assert(owner.Variable && owner.Loc.isValid()); 13850 13851 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 13852 << owner.Variable << capturer->getSourceRange(); 13853 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 13854 << owner.Indirect << owner.Range; 13855 } 13856 13857 /// Check for a keyword selector that starts with the word 'add' or 13858 /// 'set'. 13859 static bool isSetterLikeSelector(Selector sel) { 13860 if (sel.isUnarySelector()) return false; 13861 13862 StringRef str = sel.getNameForSlot(0); 13863 while (!str.empty() && str.front() == '_') str = str.substr(1); 13864 if (str.startswith("set")) 13865 str = str.substr(3); 13866 else if (str.startswith("add")) { 13867 // Specially whitelist 'addOperationWithBlock:'. 13868 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) 13869 return false; 13870 str = str.substr(3); 13871 } 13872 else 13873 return false; 13874 13875 if (str.empty()) return true; 13876 return !isLowercase(str.front()); 13877 } 13878 13879 static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S, 13880 ObjCMessageExpr *Message) { 13881 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 13882 Message->getReceiverInterface(), 13883 NSAPI::ClassId_NSMutableArray); 13884 if (!IsMutableArray) { 13885 return None; 13886 } 13887 13888 Selector Sel = Message->getSelector(); 13889 13890 Optional<NSAPI::NSArrayMethodKind> MKOpt = 13891 S.NSAPIObj->getNSArrayMethodKind(Sel); 13892 if (!MKOpt) { 13893 return None; 13894 } 13895 13896 NSAPI::NSArrayMethodKind MK = *MKOpt; 13897 13898 switch (MK) { 13899 case NSAPI::NSMutableArr_addObject: 13900 case NSAPI::NSMutableArr_insertObjectAtIndex: 13901 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 13902 return 0; 13903 case NSAPI::NSMutableArr_replaceObjectAtIndex: 13904 return 1; 13905 13906 default: 13907 return None; 13908 } 13909 13910 return None; 13911 } 13912 13913 static 13914 Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S, 13915 ObjCMessageExpr *Message) { 13916 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 13917 Message->getReceiverInterface(), 13918 NSAPI::ClassId_NSMutableDictionary); 13919 if (!IsMutableDictionary) { 13920 return None; 13921 } 13922 13923 Selector Sel = Message->getSelector(); 13924 13925 Optional<NSAPI::NSDictionaryMethodKind> MKOpt = 13926 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 13927 if (!MKOpt) { 13928 return None; 13929 } 13930 13931 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 13932 13933 switch (MK) { 13934 case NSAPI::NSMutableDict_setObjectForKey: 13935 case NSAPI::NSMutableDict_setValueForKey: 13936 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 13937 return 0; 13938 13939 default: 13940 return None; 13941 } 13942 13943 return None; 13944 } 13945 13946 static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 13947 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 13948 Message->getReceiverInterface(), 13949 NSAPI::ClassId_NSMutableSet); 13950 13951 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 13952 Message->getReceiverInterface(), 13953 NSAPI::ClassId_NSMutableOrderedSet); 13954 if (!IsMutableSet && !IsMutableOrderedSet) { 13955 return None; 13956 } 13957 13958 Selector Sel = Message->getSelector(); 13959 13960 Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel); 13961 if (!MKOpt) { 13962 return None; 13963 } 13964 13965 NSAPI::NSSetMethodKind MK = *MKOpt; 13966 13967 switch (MK) { 13968 case NSAPI::NSMutableSet_addObject: 13969 case NSAPI::NSOrderedSet_setObjectAtIndex: 13970 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 13971 case NSAPI::NSOrderedSet_insertObjectAtIndex: 13972 return 0; 13973 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 13974 return 1; 13975 } 13976 13977 return None; 13978 } 13979 13980 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 13981 if (!Message->isInstanceMessage()) { 13982 return; 13983 } 13984 13985 Optional<int> ArgOpt; 13986 13987 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 13988 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 13989 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 13990 return; 13991 } 13992 13993 int ArgIndex = *ArgOpt; 13994 13995 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 13996 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 13997 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 13998 } 13999 14000 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 14001 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 14002 if (ArgRE->isObjCSelfExpr()) { 14003 Diag(Message->getSourceRange().getBegin(), 14004 diag::warn_objc_circular_container) 14005 << ArgRE->getDecl() << StringRef("'super'"); 14006 } 14007 } 14008 } else { 14009 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 14010 14011 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 14012 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 14013 } 14014 14015 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 14016 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 14017 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 14018 ValueDecl *Decl = ReceiverRE->getDecl(); 14019 Diag(Message->getSourceRange().getBegin(), 14020 diag::warn_objc_circular_container) 14021 << Decl << Decl; 14022 if (!ArgRE->isObjCSelfExpr()) { 14023 Diag(Decl->getLocation(), 14024 diag::note_objc_circular_container_declared_here) 14025 << Decl; 14026 } 14027 } 14028 } 14029 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 14030 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 14031 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 14032 ObjCIvarDecl *Decl = IvarRE->getDecl(); 14033 Diag(Message->getSourceRange().getBegin(), 14034 diag::warn_objc_circular_container) 14035 << Decl << Decl; 14036 Diag(Decl->getLocation(), 14037 diag::note_objc_circular_container_declared_here) 14038 << Decl; 14039 } 14040 } 14041 } 14042 } 14043 } 14044 14045 /// Check a message send to see if it's likely to cause a retain cycle. 14046 void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 14047 // Only check instance methods whose selector looks like a setter. 14048 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 14049 return; 14050 14051 // Try to find a variable that the receiver is strongly owned by. 14052 RetainCycleOwner owner; 14053 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 14054 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 14055 return; 14056 } else { 14057 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 14058 owner.Variable = getCurMethodDecl()->getSelfDecl(); 14059 owner.Loc = msg->getSuperLoc(); 14060 owner.Range = msg->getSuperLoc(); 14061 } 14062 14063 // Check whether the receiver is captured by any of the arguments. 14064 const ObjCMethodDecl *MD = msg->getMethodDecl(); 14065 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 14066 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 14067 // noescape blocks should not be retained by the method. 14068 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 14069 continue; 14070 return diagnoseRetainCycle(*this, capturer, owner); 14071 } 14072 } 14073 } 14074 14075 /// Check a property assign to see if it's likely to cause a retain cycle. 14076 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 14077 RetainCycleOwner owner; 14078 if (!findRetainCycleOwner(*this, receiver, owner)) 14079 return; 14080 14081 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 14082 diagnoseRetainCycle(*this, capturer, owner); 14083 } 14084 14085 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 14086 RetainCycleOwner Owner; 14087 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 14088 return; 14089 14090 // Because we don't have an expression for the variable, we have to set the 14091 // location explicitly here. 14092 Owner.Loc = Var->getLocation(); 14093 Owner.Range = Var->getSourceRange(); 14094 14095 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 14096 diagnoseRetainCycle(*this, Capturer, Owner); 14097 } 14098 14099 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 14100 Expr *RHS, bool isProperty) { 14101 // Check if RHS is an Objective-C object literal, which also can get 14102 // immediately zapped in a weak reference. Note that we explicitly 14103 // allow ObjCStringLiterals, since those are designed to never really die. 14104 RHS = RHS->IgnoreParenImpCasts(); 14105 14106 // This enum needs to match with the 'select' in 14107 // warn_objc_arc_literal_assign (off-by-1). 14108 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 14109 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 14110 return false; 14111 14112 S.Diag(Loc, diag::warn_arc_literal_assign) 14113 << (unsigned) Kind 14114 << (isProperty ? 0 : 1) 14115 << RHS->getSourceRange(); 14116 14117 return true; 14118 } 14119 14120 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 14121 Qualifiers::ObjCLifetime LT, 14122 Expr *RHS, bool isProperty) { 14123 // Strip off any implicit cast added to get to the one ARC-specific. 14124 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 14125 if (cast->getCastKind() == CK_ARCConsumeObject) { 14126 S.Diag(Loc, diag::warn_arc_retained_assign) 14127 << (LT == Qualifiers::OCL_ExplicitNone) 14128 << (isProperty ? 0 : 1) 14129 << RHS->getSourceRange(); 14130 return true; 14131 } 14132 RHS = cast->getSubExpr(); 14133 } 14134 14135 if (LT == Qualifiers::OCL_Weak && 14136 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 14137 return true; 14138 14139 return false; 14140 } 14141 14142 bool Sema::checkUnsafeAssigns(SourceLocation Loc, 14143 QualType LHS, Expr *RHS) { 14144 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 14145 14146 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 14147 return false; 14148 14149 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 14150 return true; 14151 14152 return false; 14153 } 14154 14155 void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 14156 Expr *LHS, Expr *RHS) { 14157 QualType LHSType; 14158 // PropertyRef on LHS type need be directly obtained from 14159 // its declaration as it has a PseudoType. 14160 ObjCPropertyRefExpr *PRE 14161 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 14162 if (PRE && !PRE->isImplicitProperty()) { 14163 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 14164 if (PD) 14165 LHSType = PD->getType(); 14166 } 14167 14168 if (LHSType.isNull()) 14169 LHSType = LHS->getType(); 14170 14171 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 14172 14173 if (LT == Qualifiers::OCL_Weak) { 14174 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 14175 getCurFunction()->markSafeWeakUse(LHS); 14176 } 14177 14178 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 14179 return; 14180 14181 // FIXME. Check for other life times. 14182 if (LT != Qualifiers::OCL_None) 14183 return; 14184 14185 if (PRE) { 14186 if (PRE->isImplicitProperty()) 14187 return; 14188 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 14189 if (!PD) 14190 return; 14191 14192 unsigned Attributes = PD->getPropertyAttributes(); 14193 if (Attributes & ObjCPropertyAttribute::kind_assign) { 14194 // when 'assign' attribute was not explicitly specified 14195 // by user, ignore it and rely on property type itself 14196 // for lifetime info. 14197 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 14198 if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) && 14199 LHSType->isObjCRetainableType()) 14200 return; 14201 14202 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 14203 if (cast->getCastKind() == CK_ARCConsumeObject) { 14204 Diag(Loc, diag::warn_arc_retained_property_assign) 14205 << RHS->getSourceRange(); 14206 return; 14207 } 14208 RHS = cast->getSubExpr(); 14209 } 14210 } else if (Attributes & ObjCPropertyAttribute::kind_weak) { 14211 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 14212 return; 14213 } 14214 } 14215 } 14216 14217 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 14218 14219 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 14220 SourceLocation StmtLoc, 14221 const NullStmt *Body) { 14222 // Do not warn if the body is a macro that expands to nothing, e.g: 14223 // 14224 // #define CALL(x) 14225 // if (condition) 14226 // CALL(0); 14227 if (Body->hasLeadingEmptyMacro()) 14228 return false; 14229 14230 // Get line numbers of statement and body. 14231 bool StmtLineInvalid; 14232 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 14233 &StmtLineInvalid); 14234 if (StmtLineInvalid) 14235 return false; 14236 14237 bool BodyLineInvalid; 14238 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 14239 &BodyLineInvalid); 14240 if (BodyLineInvalid) 14241 return false; 14242 14243 // Warn if null statement and body are on the same line. 14244 if (StmtLine != BodyLine) 14245 return false; 14246 14247 return true; 14248 } 14249 14250 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 14251 const Stmt *Body, 14252 unsigned DiagID) { 14253 // Since this is a syntactic check, don't emit diagnostic for template 14254 // instantiations, this just adds noise. 14255 if (CurrentInstantiationScope) 14256 return; 14257 14258 // The body should be a null statement. 14259 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 14260 if (!NBody) 14261 return; 14262 14263 // Do the usual checks. 14264 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 14265 return; 14266 14267 Diag(NBody->getSemiLoc(), DiagID); 14268 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 14269 } 14270 14271 void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 14272 const Stmt *PossibleBody) { 14273 assert(!CurrentInstantiationScope); // Ensured by caller 14274 14275 SourceLocation StmtLoc; 14276 const Stmt *Body; 14277 unsigned DiagID; 14278 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 14279 StmtLoc = FS->getRParenLoc(); 14280 Body = FS->getBody(); 14281 DiagID = diag::warn_empty_for_body; 14282 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 14283 StmtLoc = WS->getCond()->getSourceRange().getEnd(); 14284 Body = WS->getBody(); 14285 DiagID = diag::warn_empty_while_body; 14286 } else 14287 return; // Neither `for' nor `while'. 14288 14289 // The body should be a null statement. 14290 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 14291 if (!NBody) 14292 return; 14293 14294 // Skip expensive checks if diagnostic is disabled. 14295 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 14296 return; 14297 14298 // Do the usual checks. 14299 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 14300 return; 14301 14302 // `for(...);' and `while(...);' are popular idioms, so in order to keep 14303 // noise level low, emit diagnostics only if for/while is followed by a 14304 // CompoundStmt, e.g.: 14305 // for (int i = 0; i < n; i++); 14306 // { 14307 // a(i); 14308 // } 14309 // or if for/while is followed by a statement with more indentation 14310 // than for/while itself: 14311 // for (int i = 0; i < n; i++); 14312 // a(i); 14313 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 14314 if (!ProbableTypo) { 14315 bool BodyColInvalid; 14316 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 14317 PossibleBody->getBeginLoc(), &BodyColInvalid); 14318 if (BodyColInvalid) 14319 return; 14320 14321 bool StmtColInvalid; 14322 unsigned StmtCol = 14323 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 14324 if (StmtColInvalid) 14325 return; 14326 14327 if (BodyCol > StmtCol) 14328 ProbableTypo = true; 14329 } 14330 14331 if (ProbableTypo) { 14332 Diag(NBody->getSemiLoc(), DiagID); 14333 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 14334 } 14335 } 14336 14337 //===--- CHECK: Warn on self move with std::move. -------------------------===// 14338 14339 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 14340 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 14341 SourceLocation OpLoc) { 14342 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 14343 return; 14344 14345 if (inTemplateInstantiation()) 14346 return; 14347 14348 // Strip parens and casts away. 14349 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 14350 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 14351 14352 // Check for a call expression 14353 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 14354 if (!CE || CE->getNumArgs() != 1) 14355 return; 14356 14357 // Check for a call to std::move 14358 if (!CE->isCallToStdMove()) 14359 return; 14360 14361 // Get argument from std::move 14362 RHSExpr = CE->getArg(0); 14363 14364 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 14365 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 14366 14367 // Two DeclRefExpr's, check that the decls are the same. 14368 if (LHSDeclRef && RHSDeclRef) { 14369 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 14370 return; 14371 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 14372 RHSDeclRef->getDecl()->getCanonicalDecl()) 14373 return; 14374 14375 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 14376 << LHSExpr->getSourceRange() 14377 << RHSExpr->getSourceRange(); 14378 return; 14379 } 14380 14381 // Member variables require a different approach to check for self moves. 14382 // MemberExpr's are the same if every nested MemberExpr refers to the same 14383 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 14384 // the base Expr's are CXXThisExpr's. 14385 const Expr *LHSBase = LHSExpr; 14386 const Expr *RHSBase = RHSExpr; 14387 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 14388 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 14389 if (!LHSME || !RHSME) 14390 return; 14391 14392 while (LHSME && RHSME) { 14393 if (LHSME->getMemberDecl()->getCanonicalDecl() != 14394 RHSME->getMemberDecl()->getCanonicalDecl()) 14395 return; 14396 14397 LHSBase = LHSME->getBase(); 14398 RHSBase = RHSME->getBase(); 14399 LHSME = dyn_cast<MemberExpr>(LHSBase); 14400 RHSME = dyn_cast<MemberExpr>(RHSBase); 14401 } 14402 14403 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 14404 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 14405 if (LHSDeclRef && RHSDeclRef) { 14406 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 14407 return; 14408 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 14409 RHSDeclRef->getDecl()->getCanonicalDecl()) 14410 return; 14411 14412 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 14413 << LHSExpr->getSourceRange() 14414 << RHSExpr->getSourceRange(); 14415 return; 14416 } 14417 14418 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 14419 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 14420 << LHSExpr->getSourceRange() 14421 << RHSExpr->getSourceRange(); 14422 } 14423 14424 //===--- Layout compatibility ----------------------------------------------// 14425 14426 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 14427 14428 /// Check if two enumeration types are layout-compatible. 14429 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 14430 // C++11 [dcl.enum] p8: 14431 // Two enumeration types are layout-compatible if they have the same 14432 // underlying type. 14433 return ED1->isComplete() && ED2->isComplete() && 14434 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 14435 } 14436 14437 /// Check if two fields are layout-compatible. 14438 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 14439 FieldDecl *Field2) { 14440 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 14441 return false; 14442 14443 if (Field1->isBitField() != Field2->isBitField()) 14444 return false; 14445 14446 if (Field1->isBitField()) { 14447 // Make sure that the bit-fields are the same length. 14448 unsigned Bits1 = Field1->getBitWidthValue(C); 14449 unsigned Bits2 = Field2->getBitWidthValue(C); 14450 14451 if (Bits1 != Bits2) 14452 return false; 14453 } 14454 14455 return true; 14456 } 14457 14458 /// Check if two standard-layout structs are layout-compatible. 14459 /// (C++11 [class.mem] p17) 14460 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 14461 RecordDecl *RD2) { 14462 // If both records are C++ classes, check that base classes match. 14463 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 14464 // If one of records is a CXXRecordDecl we are in C++ mode, 14465 // thus the other one is a CXXRecordDecl, too. 14466 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 14467 // Check number of base classes. 14468 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 14469 return false; 14470 14471 // Check the base classes. 14472 for (CXXRecordDecl::base_class_const_iterator 14473 Base1 = D1CXX->bases_begin(), 14474 BaseEnd1 = D1CXX->bases_end(), 14475 Base2 = D2CXX->bases_begin(); 14476 Base1 != BaseEnd1; 14477 ++Base1, ++Base2) { 14478 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 14479 return false; 14480 } 14481 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 14482 // If only RD2 is a C++ class, it should have zero base classes. 14483 if (D2CXX->getNumBases() > 0) 14484 return false; 14485 } 14486 14487 // Check the fields. 14488 RecordDecl::field_iterator Field2 = RD2->field_begin(), 14489 Field2End = RD2->field_end(), 14490 Field1 = RD1->field_begin(), 14491 Field1End = RD1->field_end(); 14492 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 14493 if (!isLayoutCompatible(C, *Field1, *Field2)) 14494 return false; 14495 } 14496 if (Field1 != Field1End || Field2 != Field2End) 14497 return false; 14498 14499 return true; 14500 } 14501 14502 /// Check if two standard-layout unions are layout-compatible. 14503 /// (C++11 [class.mem] p18) 14504 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 14505 RecordDecl *RD2) { 14506 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 14507 for (auto *Field2 : RD2->fields()) 14508 UnmatchedFields.insert(Field2); 14509 14510 for (auto *Field1 : RD1->fields()) { 14511 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 14512 I = UnmatchedFields.begin(), 14513 E = UnmatchedFields.end(); 14514 14515 for ( ; I != E; ++I) { 14516 if (isLayoutCompatible(C, Field1, *I)) { 14517 bool Result = UnmatchedFields.erase(*I); 14518 (void) Result; 14519 assert(Result); 14520 break; 14521 } 14522 } 14523 if (I == E) 14524 return false; 14525 } 14526 14527 return UnmatchedFields.empty(); 14528 } 14529 14530 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 14531 RecordDecl *RD2) { 14532 if (RD1->isUnion() != RD2->isUnion()) 14533 return false; 14534 14535 if (RD1->isUnion()) 14536 return isLayoutCompatibleUnion(C, RD1, RD2); 14537 else 14538 return isLayoutCompatibleStruct(C, RD1, RD2); 14539 } 14540 14541 /// Check if two types are layout-compatible in C++11 sense. 14542 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 14543 if (T1.isNull() || T2.isNull()) 14544 return false; 14545 14546 // C++11 [basic.types] p11: 14547 // If two types T1 and T2 are the same type, then T1 and T2 are 14548 // layout-compatible types. 14549 if (C.hasSameType(T1, T2)) 14550 return true; 14551 14552 T1 = T1.getCanonicalType().getUnqualifiedType(); 14553 T2 = T2.getCanonicalType().getUnqualifiedType(); 14554 14555 const Type::TypeClass TC1 = T1->getTypeClass(); 14556 const Type::TypeClass TC2 = T2->getTypeClass(); 14557 14558 if (TC1 != TC2) 14559 return false; 14560 14561 if (TC1 == Type::Enum) { 14562 return isLayoutCompatible(C, 14563 cast<EnumType>(T1)->getDecl(), 14564 cast<EnumType>(T2)->getDecl()); 14565 } else if (TC1 == Type::Record) { 14566 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 14567 return false; 14568 14569 return isLayoutCompatible(C, 14570 cast<RecordType>(T1)->getDecl(), 14571 cast<RecordType>(T2)->getDecl()); 14572 } 14573 14574 return false; 14575 } 14576 14577 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 14578 14579 /// Given a type tag expression find the type tag itself. 14580 /// 14581 /// \param TypeExpr Type tag expression, as it appears in user's code. 14582 /// 14583 /// \param VD Declaration of an identifier that appears in a type tag. 14584 /// 14585 /// \param MagicValue Type tag magic value. 14586 /// 14587 /// \param isConstantEvaluated wether the evalaution should be performed in 14588 14589 /// constant context. 14590 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 14591 const ValueDecl **VD, uint64_t *MagicValue, 14592 bool isConstantEvaluated) { 14593 while(true) { 14594 if (!TypeExpr) 14595 return false; 14596 14597 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 14598 14599 switch (TypeExpr->getStmtClass()) { 14600 case Stmt::UnaryOperatorClass: { 14601 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 14602 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 14603 TypeExpr = UO->getSubExpr(); 14604 continue; 14605 } 14606 return false; 14607 } 14608 14609 case Stmt::DeclRefExprClass: { 14610 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 14611 *VD = DRE->getDecl(); 14612 return true; 14613 } 14614 14615 case Stmt::IntegerLiteralClass: { 14616 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 14617 llvm::APInt MagicValueAPInt = IL->getValue(); 14618 if (MagicValueAPInt.getActiveBits() <= 64) { 14619 *MagicValue = MagicValueAPInt.getZExtValue(); 14620 return true; 14621 } else 14622 return false; 14623 } 14624 14625 case Stmt::BinaryConditionalOperatorClass: 14626 case Stmt::ConditionalOperatorClass: { 14627 const AbstractConditionalOperator *ACO = 14628 cast<AbstractConditionalOperator>(TypeExpr); 14629 bool Result; 14630 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx, 14631 isConstantEvaluated)) { 14632 if (Result) 14633 TypeExpr = ACO->getTrueExpr(); 14634 else 14635 TypeExpr = ACO->getFalseExpr(); 14636 continue; 14637 } 14638 return false; 14639 } 14640 14641 case Stmt::BinaryOperatorClass: { 14642 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 14643 if (BO->getOpcode() == BO_Comma) { 14644 TypeExpr = BO->getRHS(); 14645 continue; 14646 } 14647 return false; 14648 } 14649 14650 default: 14651 return false; 14652 } 14653 } 14654 } 14655 14656 /// Retrieve the C type corresponding to type tag TypeExpr. 14657 /// 14658 /// \param TypeExpr Expression that specifies a type tag. 14659 /// 14660 /// \param MagicValues Registered magic values. 14661 /// 14662 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 14663 /// kind. 14664 /// 14665 /// \param TypeInfo Information about the corresponding C type. 14666 /// 14667 /// \param isConstantEvaluated wether the evalaution should be performed in 14668 /// constant context. 14669 /// 14670 /// \returns true if the corresponding C type was found. 14671 static bool GetMatchingCType( 14672 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr, 14673 const ASTContext &Ctx, 14674 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData> 14675 *MagicValues, 14676 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo, 14677 bool isConstantEvaluated) { 14678 FoundWrongKind = false; 14679 14680 // Variable declaration that has type_tag_for_datatype attribute. 14681 const ValueDecl *VD = nullptr; 14682 14683 uint64_t MagicValue; 14684 14685 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated)) 14686 return false; 14687 14688 if (VD) { 14689 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 14690 if (I->getArgumentKind() != ArgumentKind) { 14691 FoundWrongKind = true; 14692 return false; 14693 } 14694 TypeInfo.Type = I->getMatchingCType(); 14695 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 14696 TypeInfo.MustBeNull = I->getMustBeNull(); 14697 return true; 14698 } 14699 return false; 14700 } 14701 14702 if (!MagicValues) 14703 return false; 14704 14705 llvm::DenseMap<Sema::TypeTagMagicValue, 14706 Sema::TypeTagData>::const_iterator I = 14707 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 14708 if (I == MagicValues->end()) 14709 return false; 14710 14711 TypeInfo = I->second; 14712 return true; 14713 } 14714 14715 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 14716 uint64_t MagicValue, QualType Type, 14717 bool LayoutCompatible, 14718 bool MustBeNull) { 14719 if (!TypeTagForDatatypeMagicValues) 14720 TypeTagForDatatypeMagicValues.reset( 14721 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 14722 14723 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 14724 (*TypeTagForDatatypeMagicValues)[Magic] = 14725 TypeTagData(Type, LayoutCompatible, MustBeNull); 14726 } 14727 14728 static bool IsSameCharType(QualType T1, QualType T2) { 14729 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 14730 if (!BT1) 14731 return false; 14732 14733 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 14734 if (!BT2) 14735 return false; 14736 14737 BuiltinType::Kind T1Kind = BT1->getKind(); 14738 BuiltinType::Kind T2Kind = BT2->getKind(); 14739 14740 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 14741 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 14742 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 14743 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 14744 } 14745 14746 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 14747 const ArrayRef<const Expr *> ExprArgs, 14748 SourceLocation CallSiteLoc) { 14749 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 14750 bool IsPointerAttr = Attr->getIsPointer(); 14751 14752 // Retrieve the argument representing the 'type_tag'. 14753 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 14754 if (TypeTagIdxAST >= ExprArgs.size()) { 14755 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 14756 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 14757 return; 14758 } 14759 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 14760 bool FoundWrongKind; 14761 TypeTagData TypeInfo; 14762 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 14763 TypeTagForDatatypeMagicValues.get(), FoundWrongKind, 14764 TypeInfo, isConstantEvaluated())) { 14765 if (FoundWrongKind) 14766 Diag(TypeTagExpr->getExprLoc(), 14767 diag::warn_type_tag_for_datatype_wrong_kind) 14768 << TypeTagExpr->getSourceRange(); 14769 return; 14770 } 14771 14772 // Retrieve the argument representing the 'arg_idx'. 14773 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 14774 if (ArgumentIdxAST >= ExprArgs.size()) { 14775 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 14776 << 1 << Attr->getArgumentIdx().getSourceIndex(); 14777 return; 14778 } 14779 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 14780 if (IsPointerAttr) { 14781 // Skip implicit cast of pointer to `void *' (as a function argument). 14782 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 14783 if (ICE->getType()->isVoidPointerType() && 14784 ICE->getCastKind() == CK_BitCast) 14785 ArgumentExpr = ICE->getSubExpr(); 14786 } 14787 QualType ArgumentType = ArgumentExpr->getType(); 14788 14789 // Passing a `void*' pointer shouldn't trigger a warning. 14790 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 14791 return; 14792 14793 if (TypeInfo.MustBeNull) { 14794 // Type tag with matching void type requires a null pointer. 14795 if (!ArgumentExpr->isNullPointerConstant(Context, 14796 Expr::NPC_ValueDependentIsNotNull)) { 14797 Diag(ArgumentExpr->getExprLoc(), 14798 diag::warn_type_safety_null_pointer_required) 14799 << ArgumentKind->getName() 14800 << ArgumentExpr->getSourceRange() 14801 << TypeTagExpr->getSourceRange(); 14802 } 14803 return; 14804 } 14805 14806 QualType RequiredType = TypeInfo.Type; 14807 if (IsPointerAttr) 14808 RequiredType = Context.getPointerType(RequiredType); 14809 14810 bool mismatch = false; 14811 if (!TypeInfo.LayoutCompatible) { 14812 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 14813 14814 // C++11 [basic.fundamental] p1: 14815 // Plain char, signed char, and unsigned char are three distinct types. 14816 // 14817 // But we treat plain `char' as equivalent to `signed char' or `unsigned 14818 // char' depending on the current char signedness mode. 14819 if (mismatch) 14820 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 14821 RequiredType->getPointeeType())) || 14822 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 14823 mismatch = false; 14824 } else 14825 if (IsPointerAttr) 14826 mismatch = !isLayoutCompatible(Context, 14827 ArgumentType->getPointeeType(), 14828 RequiredType->getPointeeType()); 14829 else 14830 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 14831 14832 if (mismatch) 14833 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 14834 << ArgumentType << ArgumentKind 14835 << TypeInfo.LayoutCompatible << RequiredType 14836 << ArgumentExpr->getSourceRange() 14837 << TypeTagExpr->getSourceRange(); 14838 } 14839 14840 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 14841 CharUnits Alignment) { 14842 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 14843 } 14844 14845 void Sema::DiagnoseMisalignedMembers() { 14846 for (MisalignedMember &m : MisalignedMembers) { 14847 const NamedDecl *ND = m.RD; 14848 if (ND->getName().empty()) { 14849 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 14850 ND = TD; 14851 } 14852 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 14853 << m.MD << ND << m.E->getSourceRange(); 14854 } 14855 MisalignedMembers.clear(); 14856 } 14857 14858 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 14859 E = E->IgnoreParens(); 14860 if (!T->isPointerType() && !T->isIntegerType()) 14861 return; 14862 if (isa<UnaryOperator>(E) && 14863 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 14864 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 14865 if (isa<MemberExpr>(Op)) { 14866 auto MA = llvm::find(MisalignedMembers, MisalignedMember(Op)); 14867 if (MA != MisalignedMembers.end() && 14868 (T->isIntegerType() || 14869 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 14870 Context.getTypeAlignInChars( 14871 T->getPointeeType()) <= MA->Alignment)))) 14872 MisalignedMembers.erase(MA); 14873 } 14874 } 14875 } 14876 14877 void Sema::RefersToMemberWithReducedAlignment( 14878 Expr *E, 14879 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 14880 Action) { 14881 const auto *ME = dyn_cast<MemberExpr>(E); 14882 if (!ME) 14883 return; 14884 14885 // No need to check expressions with an __unaligned-qualified type. 14886 if (E->getType().getQualifiers().hasUnaligned()) 14887 return; 14888 14889 // For a chain of MemberExpr like "a.b.c.d" this list 14890 // will keep FieldDecl's like [d, c, b]. 14891 SmallVector<FieldDecl *, 4> ReverseMemberChain; 14892 const MemberExpr *TopME = nullptr; 14893 bool AnyIsPacked = false; 14894 do { 14895 QualType BaseType = ME->getBase()->getType(); 14896 if (BaseType->isDependentType()) 14897 return; 14898 if (ME->isArrow()) 14899 BaseType = BaseType->getPointeeType(); 14900 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl(); 14901 if (RD->isInvalidDecl()) 14902 return; 14903 14904 ValueDecl *MD = ME->getMemberDecl(); 14905 auto *FD = dyn_cast<FieldDecl>(MD); 14906 // We do not care about non-data members. 14907 if (!FD || FD->isInvalidDecl()) 14908 return; 14909 14910 AnyIsPacked = 14911 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 14912 ReverseMemberChain.push_back(FD); 14913 14914 TopME = ME; 14915 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 14916 } while (ME); 14917 assert(TopME && "We did not compute a topmost MemberExpr!"); 14918 14919 // Not the scope of this diagnostic. 14920 if (!AnyIsPacked) 14921 return; 14922 14923 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 14924 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 14925 // TODO: The innermost base of the member expression may be too complicated. 14926 // For now, just disregard these cases. This is left for future 14927 // improvement. 14928 if (!DRE && !isa<CXXThisExpr>(TopBase)) 14929 return; 14930 14931 // Alignment expected by the whole expression. 14932 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 14933 14934 // No need to do anything else with this case. 14935 if (ExpectedAlignment.isOne()) 14936 return; 14937 14938 // Synthesize offset of the whole access. 14939 CharUnits Offset; 14940 for (auto I = ReverseMemberChain.rbegin(); I != ReverseMemberChain.rend(); 14941 I++) { 14942 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(*I)); 14943 } 14944 14945 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 14946 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 14947 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 14948 14949 // The base expression of the innermost MemberExpr may give 14950 // stronger guarantees than the class containing the member. 14951 if (DRE && !TopME->isArrow()) { 14952 const ValueDecl *VD = DRE->getDecl(); 14953 if (!VD->getType()->isReferenceType()) 14954 CompleteObjectAlignment = 14955 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 14956 } 14957 14958 // Check if the synthesized offset fulfills the alignment. 14959 if (Offset % ExpectedAlignment != 0 || 14960 // It may fulfill the offset it but the effective alignment may still be 14961 // lower than the expected expression alignment. 14962 CompleteObjectAlignment < ExpectedAlignment) { 14963 // If this happens, we want to determine a sensible culprit of this. 14964 // Intuitively, watching the chain of member expressions from right to 14965 // left, we start with the required alignment (as required by the field 14966 // type) but some packed attribute in that chain has reduced the alignment. 14967 // It may happen that another packed structure increases it again. But if 14968 // we are here such increase has not been enough. So pointing the first 14969 // FieldDecl that either is packed or else its RecordDecl is, 14970 // seems reasonable. 14971 FieldDecl *FD = nullptr; 14972 CharUnits Alignment; 14973 for (FieldDecl *FDI : ReverseMemberChain) { 14974 if (FDI->hasAttr<PackedAttr>() || 14975 FDI->getParent()->hasAttr<PackedAttr>()) { 14976 FD = FDI; 14977 Alignment = std::min( 14978 Context.getTypeAlignInChars(FD->getType()), 14979 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 14980 break; 14981 } 14982 } 14983 assert(FD && "We did not find a packed FieldDecl!"); 14984 Action(E, FD->getParent(), FD, Alignment); 14985 } 14986 } 14987 14988 void Sema::CheckAddressOfPackedMember(Expr *rhs) { 14989 using namespace std::placeholders; 14990 14991 RefersToMemberWithReducedAlignment( 14992 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 14993 _2, _3, _4)); 14994 } 14995