1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements extra semantic analysis beyond what is enforced 10 // by the C type system. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/APValue.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/AttrIterator.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclBase.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/DeclarationName.h" 24 #include "clang/AST/EvaluatedExprVisitor.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/ExprCXX.h" 27 #include "clang/AST/ExprObjC.h" 28 #include "clang/AST/ExprOpenMP.h" 29 #include "clang/AST/FormatString.h" 30 #include "clang/AST/NSAPI.h" 31 #include "clang/AST/NonTrivialTypeVisitor.h" 32 #include "clang/AST/OperationKinds.h" 33 #include "clang/AST/RecordLayout.h" 34 #include "clang/AST/Stmt.h" 35 #include "clang/AST/TemplateBase.h" 36 #include "clang/AST/Type.h" 37 #include "clang/AST/TypeLoc.h" 38 #include "clang/AST/UnresolvedSet.h" 39 #include "clang/Basic/AddressSpaces.h" 40 #include "clang/Basic/CharInfo.h" 41 #include "clang/Basic/Diagnostic.h" 42 #include "clang/Basic/IdentifierTable.h" 43 #include "clang/Basic/LLVM.h" 44 #include "clang/Basic/LangOptions.h" 45 #include "clang/Basic/OpenCLOptions.h" 46 #include "clang/Basic/OperatorKinds.h" 47 #include "clang/Basic/PartialDiagnostic.h" 48 #include "clang/Basic/SourceLocation.h" 49 #include "clang/Basic/SourceManager.h" 50 #include "clang/Basic/Specifiers.h" 51 #include "clang/Basic/SyncScope.h" 52 #include "clang/Basic/TargetBuiltins.h" 53 #include "clang/Basic/TargetCXXABI.h" 54 #include "clang/Basic/TargetInfo.h" 55 #include "clang/Basic/TypeTraits.h" 56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 57 #include "clang/Sema/Initialization.h" 58 #include "clang/Sema/Lookup.h" 59 #include "clang/Sema/Ownership.h" 60 #include "clang/Sema/Scope.h" 61 #include "clang/Sema/ScopeInfo.h" 62 #include "clang/Sema/Sema.h" 63 #include "clang/Sema/SemaInternal.h" 64 #include "llvm/ADT/APFloat.h" 65 #include "llvm/ADT/APInt.h" 66 #include "llvm/ADT/APSInt.h" 67 #include "llvm/ADT/ArrayRef.h" 68 #include "llvm/ADT/DenseMap.h" 69 #include "llvm/ADT/FoldingSet.h" 70 #include "llvm/ADT/None.h" 71 #include "llvm/ADT/Optional.h" 72 #include "llvm/ADT/STLExtras.h" 73 #include "llvm/ADT/SmallBitVector.h" 74 #include "llvm/ADT/SmallPtrSet.h" 75 #include "llvm/ADT/SmallString.h" 76 #include "llvm/ADT/SmallVector.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/ADT/StringSwitch.h" 79 #include "llvm/ADT/Triple.h" 80 #include "llvm/Support/AtomicOrdering.h" 81 #include "llvm/Support/Casting.h" 82 #include "llvm/Support/Compiler.h" 83 #include "llvm/Support/ConvertUTF.h" 84 #include "llvm/Support/ErrorHandling.h" 85 #include "llvm/Support/Format.h" 86 #include "llvm/Support/Locale.h" 87 #include "llvm/Support/MathExtras.h" 88 #include "llvm/Support/SaveAndRestore.h" 89 #include "llvm/Support/raw_ostream.h" 90 #include <algorithm> 91 #include <bitset> 92 #include <cassert> 93 #include <cstddef> 94 #include <cstdint> 95 #include <functional> 96 #include <limits> 97 #include <string> 98 #include <tuple> 99 #include <utility> 100 101 using namespace clang; 102 using namespace sema; 103 104 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 105 unsigned ByteNo) const { 106 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 107 Context.getTargetInfo()); 108 } 109 110 /// Checks that a call expression's argument count is the desired number. 111 /// This is useful when doing custom type-checking. Returns true on error. 112 static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) { 113 unsigned argCount = call->getNumArgs(); 114 if (argCount == desiredArgCount) return false; 115 116 if (argCount < desiredArgCount) 117 return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args) 118 << 0 /*function call*/ << desiredArgCount << argCount 119 << call->getSourceRange(); 120 121 // Highlight all the excess arguments. 122 SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(), 123 call->getArg(argCount - 1)->getEndLoc()); 124 125 return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args) 126 << 0 /*function call*/ << desiredArgCount << argCount 127 << call->getArg(1)->getSourceRange(); 128 } 129 130 /// Check that the first argument to __builtin_annotation is an integer 131 /// and the second argument is a non-wide string literal. 132 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 133 if (checkArgCount(S, TheCall, 2)) 134 return true; 135 136 // First argument should be an integer. 137 Expr *ValArg = TheCall->getArg(0); 138 QualType Ty = ValArg->getType(); 139 if (!Ty->isIntegerType()) { 140 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 141 << ValArg->getSourceRange(); 142 return true; 143 } 144 145 // Second argument should be a constant string. 146 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 147 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 148 if (!Literal || !Literal->isAscii()) { 149 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 150 << StrArg->getSourceRange(); 151 return true; 152 } 153 154 TheCall->setType(Ty); 155 return false; 156 } 157 158 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 159 // We need at least one argument. 160 if (TheCall->getNumArgs() < 1) { 161 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 162 << 0 << 1 << TheCall->getNumArgs() 163 << TheCall->getCallee()->getSourceRange(); 164 return true; 165 } 166 167 // All arguments should be wide string literals. 168 for (Expr *Arg : TheCall->arguments()) { 169 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 170 if (!Literal || !Literal->isWide()) { 171 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 172 << Arg->getSourceRange(); 173 return true; 174 } 175 } 176 177 return false; 178 } 179 180 /// Check that the argument to __builtin_addressof is a glvalue, and set the 181 /// result type to the corresponding pointer type. 182 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 183 if (checkArgCount(S, TheCall, 1)) 184 return true; 185 186 ExprResult Arg(TheCall->getArg(0)); 187 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 188 if (ResultType.isNull()) 189 return true; 190 191 TheCall->setArg(0, Arg.get()); 192 TheCall->setType(ResultType); 193 return false; 194 } 195 196 /// Check the number of arguments and set the result type to 197 /// the argument type. 198 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) { 199 if (checkArgCount(S, TheCall, 1)) 200 return true; 201 202 TheCall->setType(TheCall->getArg(0)->getType()); 203 return false; 204 } 205 206 /// Check that the value argument for __builtin_is_aligned(value, alignment) and 207 /// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer 208 /// type (but not a function pointer) and that the alignment is a power-of-two. 209 static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) { 210 if (checkArgCount(S, TheCall, 2)) 211 return true; 212 213 clang::Expr *Source = TheCall->getArg(0); 214 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned; 215 216 auto IsValidIntegerType = [](QualType Ty) { 217 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType(); 218 }; 219 QualType SrcTy = Source->getType(); 220 // We should also be able to use it with arrays (but not functions!). 221 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) { 222 SrcTy = S.Context.getDecayedType(SrcTy); 223 } 224 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) || 225 SrcTy->isFunctionPointerType()) { 226 // FIXME: this is not quite the right error message since we don't allow 227 // floating point types, or member pointers. 228 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand) 229 << SrcTy; 230 return true; 231 } 232 233 clang::Expr *AlignOp = TheCall->getArg(1); 234 if (!IsValidIntegerType(AlignOp->getType())) { 235 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int) 236 << AlignOp->getType(); 237 return true; 238 } 239 Expr::EvalResult AlignResult; 240 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1; 241 // We can't check validity of alignment if it is value dependent. 242 if (!AlignOp->isValueDependent() && 243 AlignOp->EvaluateAsInt(AlignResult, S.Context, 244 Expr::SE_AllowSideEffects)) { 245 llvm::APSInt AlignValue = AlignResult.Val.getInt(); 246 llvm::APSInt MaxValue( 247 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits)); 248 if (AlignValue < 1) { 249 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1; 250 return true; 251 } 252 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) { 253 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big) 254 << MaxValue.toString(10); 255 return true; 256 } 257 if (!AlignValue.isPowerOf2()) { 258 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two); 259 return true; 260 } 261 if (AlignValue == 1) { 262 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless) 263 << IsBooleanAlignBuiltin; 264 } 265 } 266 267 ExprResult SrcArg = S.PerformCopyInitialization( 268 InitializedEntity::InitializeParameter(S.Context, SrcTy, false), 269 SourceLocation(), Source); 270 if (SrcArg.isInvalid()) 271 return true; 272 TheCall->setArg(0, SrcArg.get()); 273 ExprResult AlignArg = 274 S.PerformCopyInitialization(InitializedEntity::InitializeParameter( 275 S.Context, AlignOp->getType(), false), 276 SourceLocation(), AlignOp); 277 if (AlignArg.isInvalid()) 278 return true; 279 TheCall->setArg(1, AlignArg.get()); 280 // For align_up/align_down, the return type is the same as the (potentially 281 // decayed) argument type including qualifiers. For is_aligned(), the result 282 // is always bool. 283 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy); 284 return false; 285 } 286 287 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall, 288 unsigned BuiltinID) { 289 if (checkArgCount(S, TheCall, 3)) 290 return true; 291 292 // First two arguments should be integers. 293 for (unsigned I = 0; I < 2; ++I) { 294 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I)); 295 if (Arg.isInvalid()) return true; 296 TheCall->setArg(I, Arg.get()); 297 298 QualType Ty = Arg.get()->getType(); 299 if (!Ty->isIntegerType()) { 300 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 301 << Ty << Arg.get()->getSourceRange(); 302 return true; 303 } 304 } 305 306 // Third argument should be a pointer to a non-const integer. 307 // IRGen correctly handles volatile, restrict, and address spaces, and 308 // the other qualifiers aren't possible. 309 { 310 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2)); 311 if (Arg.isInvalid()) return true; 312 TheCall->setArg(2, Arg.get()); 313 314 QualType Ty = Arg.get()->getType(); 315 const auto *PtrTy = Ty->getAs<PointerType>(); 316 if (!PtrTy || 317 !PtrTy->getPointeeType()->isIntegerType() || 318 PtrTy->getPointeeType().isConstQualified()) { 319 S.Diag(Arg.get()->getBeginLoc(), 320 diag::err_overflow_builtin_must_be_ptr_int) 321 << Ty << Arg.get()->getSourceRange(); 322 return true; 323 } 324 } 325 326 // Disallow signed ExtIntType args larger than 128 bits to mul function until 327 // we improve backend support. 328 if (BuiltinID == Builtin::BI__builtin_mul_overflow) { 329 for (unsigned I = 0; I < 3; ++I) { 330 const auto Arg = TheCall->getArg(I); 331 // Third argument will be a pointer. 332 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType(); 333 if (Ty->isExtIntType() && Ty->isSignedIntegerType() && 334 S.getASTContext().getIntWidth(Ty) > 128) 335 return S.Diag(Arg->getBeginLoc(), 336 diag::err_overflow_builtin_ext_int_max_size) 337 << 128; 338 } 339 } 340 341 return false; 342 } 343 344 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 345 if (checkArgCount(S, BuiltinCall, 2)) 346 return true; 347 348 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 349 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 350 Expr *Call = BuiltinCall->getArg(0); 351 Expr *Chain = BuiltinCall->getArg(1); 352 353 if (Call->getStmtClass() != Stmt::CallExprClass) { 354 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 355 << Call->getSourceRange(); 356 return true; 357 } 358 359 auto CE = cast<CallExpr>(Call); 360 if (CE->getCallee()->getType()->isBlockPointerType()) { 361 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 362 << Call->getSourceRange(); 363 return true; 364 } 365 366 const Decl *TargetDecl = CE->getCalleeDecl(); 367 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 368 if (FD->getBuiltinID()) { 369 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 370 << Call->getSourceRange(); 371 return true; 372 } 373 374 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 375 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 376 << Call->getSourceRange(); 377 return true; 378 } 379 380 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 381 if (ChainResult.isInvalid()) 382 return true; 383 if (!ChainResult.get()->getType()->isPointerType()) { 384 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 385 << Chain->getSourceRange(); 386 return true; 387 } 388 389 QualType ReturnTy = CE->getCallReturnType(S.Context); 390 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 391 QualType BuiltinTy = S.Context.getFunctionType( 392 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 393 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 394 395 Builtin = 396 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 397 398 BuiltinCall->setType(CE->getType()); 399 BuiltinCall->setValueKind(CE->getValueKind()); 400 BuiltinCall->setObjectKind(CE->getObjectKind()); 401 BuiltinCall->setCallee(Builtin); 402 BuiltinCall->setArg(1, ChainResult.get()); 403 404 return false; 405 } 406 407 namespace { 408 409 class EstimateSizeFormatHandler 410 : public analyze_format_string::FormatStringHandler { 411 size_t Size; 412 413 public: 414 EstimateSizeFormatHandler(StringRef Format) 415 : Size(std::min(Format.find(0), Format.size()) + 416 1 /* null byte always written by sprintf */) {} 417 418 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 419 const char *, unsigned SpecifierLen) override { 420 421 const size_t FieldWidth = computeFieldWidth(FS); 422 const size_t Precision = computePrecision(FS); 423 424 // The actual format. 425 switch (FS.getConversionSpecifier().getKind()) { 426 // Just a char. 427 case analyze_format_string::ConversionSpecifier::cArg: 428 case analyze_format_string::ConversionSpecifier::CArg: 429 Size += std::max(FieldWidth, (size_t)1); 430 break; 431 // Just an integer. 432 case analyze_format_string::ConversionSpecifier::dArg: 433 case analyze_format_string::ConversionSpecifier::DArg: 434 case analyze_format_string::ConversionSpecifier::iArg: 435 case analyze_format_string::ConversionSpecifier::oArg: 436 case analyze_format_string::ConversionSpecifier::OArg: 437 case analyze_format_string::ConversionSpecifier::uArg: 438 case analyze_format_string::ConversionSpecifier::UArg: 439 case analyze_format_string::ConversionSpecifier::xArg: 440 case analyze_format_string::ConversionSpecifier::XArg: 441 Size += std::max(FieldWidth, Precision); 442 break; 443 444 // %g style conversion switches between %f or %e style dynamically. 445 // %f always takes less space, so default to it. 446 case analyze_format_string::ConversionSpecifier::gArg: 447 case analyze_format_string::ConversionSpecifier::GArg: 448 449 // Floating point number in the form '[+]ddd.ddd'. 450 case analyze_format_string::ConversionSpecifier::fArg: 451 case analyze_format_string::ConversionSpecifier::FArg: 452 Size += std::max(FieldWidth, 1 /* integer part */ + 453 (Precision ? 1 + Precision 454 : 0) /* period + decimal */); 455 break; 456 457 // Floating point number in the form '[-]d.ddde[+-]dd'. 458 case analyze_format_string::ConversionSpecifier::eArg: 459 case analyze_format_string::ConversionSpecifier::EArg: 460 Size += 461 std::max(FieldWidth, 462 1 /* integer part */ + 463 (Precision ? 1 + Precision : 0) /* period + decimal */ + 464 1 /* e or E letter */ + 2 /* exponent */); 465 break; 466 467 // Floating point number in the form '[-]0xh.hhhhp±dd'. 468 case analyze_format_string::ConversionSpecifier::aArg: 469 case analyze_format_string::ConversionSpecifier::AArg: 470 Size += 471 std::max(FieldWidth, 472 2 /* 0x */ + 1 /* integer part */ + 473 (Precision ? 1 + Precision : 0) /* period + decimal */ + 474 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */); 475 break; 476 477 // Just a string. 478 case analyze_format_string::ConversionSpecifier::sArg: 479 case analyze_format_string::ConversionSpecifier::SArg: 480 Size += FieldWidth; 481 break; 482 483 // Just a pointer in the form '0xddd'. 484 case analyze_format_string::ConversionSpecifier::pArg: 485 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision); 486 break; 487 488 // A plain percent. 489 case analyze_format_string::ConversionSpecifier::PercentArg: 490 Size += 1; 491 break; 492 493 default: 494 break; 495 } 496 497 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix(); 498 499 if (FS.hasAlternativeForm()) { 500 switch (FS.getConversionSpecifier().getKind()) { 501 default: 502 break; 503 // Force a leading '0'. 504 case analyze_format_string::ConversionSpecifier::oArg: 505 Size += 1; 506 break; 507 // Force a leading '0x'. 508 case analyze_format_string::ConversionSpecifier::xArg: 509 case analyze_format_string::ConversionSpecifier::XArg: 510 Size += 2; 511 break; 512 // Force a period '.' before decimal, even if precision is 0. 513 case analyze_format_string::ConversionSpecifier::aArg: 514 case analyze_format_string::ConversionSpecifier::AArg: 515 case analyze_format_string::ConversionSpecifier::eArg: 516 case analyze_format_string::ConversionSpecifier::EArg: 517 case analyze_format_string::ConversionSpecifier::fArg: 518 case analyze_format_string::ConversionSpecifier::FArg: 519 case analyze_format_string::ConversionSpecifier::gArg: 520 case analyze_format_string::ConversionSpecifier::GArg: 521 Size += (Precision ? 0 : 1); 522 break; 523 } 524 } 525 assert(SpecifierLen <= Size && "no underflow"); 526 Size -= SpecifierLen; 527 return true; 528 } 529 530 size_t getSizeLowerBound() const { return Size; } 531 532 private: 533 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) { 534 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth(); 535 size_t FieldWidth = 0; 536 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant) 537 FieldWidth = FW.getConstantAmount(); 538 return FieldWidth; 539 } 540 541 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) { 542 const analyze_format_string::OptionalAmount &FW = FS.getPrecision(); 543 size_t Precision = 0; 544 545 // See man 3 printf for default precision value based on the specifier. 546 switch (FW.getHowSpecified()) { 547 case analyze_format_string::OptionalAmount::NotSpecified: 548 switch (FS.getConversionSpecifier().getKind()) { 549 default: 550 break; 551 case analyze_format_string::ConversionSpecifier::dArg: // %d 552 case analyze_format_string::ConversionSpecifier::DArg: // %D 553 case analyze_format_string::ConversionSpecifier::iArg: // %i 554 Precision = 1; 555 break; 556 case analyze_format_string::ConversionSpecifier::oArg: // %d 557 case analyze_format_string::ConversionSpecifier::OArg: // %D 558 case analyze_format_string::ConversionSpecifier::uArg: // %d 559 case analyze_format_string::ConversionSpecifier::UArg: // %D 560 case analyze_format_string::ConversionSpecifier::xArg: // %d 561 case analyze_format_string::ConversionSpecifier::XArg: // %D 562 Precision = 1; 563 break; 564 case analyze_format_string::ConversionSpecifier::fArg: // %f 565 case analyze_format_string::ConversionSpecifier::FArg: // %F 566 case analyze_format_string::ConversionSpecifier::eArg: // %e 567 case analyze_format_string::ConversionSpecifier::EArg: // %E 568 case analyze_format_string::ConversionSpecifier::gArg: // %g 569 case analyze_format_string::ConversionSpecifier::GArg: // %G 570 Precision = 6; 571 break; 572 case analyze_format_string::ConversionSpecifier::pArg: // %d 573 Precision = 1; 574 break; 575 } 576 break; 577 case analyze_format_string::OptionalAmount::Constant: 578 Precision = FW.getConstantAmount(); 579 break; 580 default: 581 break; 582 } 583 return Precision; 584 } 585 }; 586 587 } // namespace 588 589 /// Check a call to BuiltinID for buffer overflows. If BuiltinID is a 590 /// __builtin_*_chk function, then use the object size argument specified in the 591 /// source. Otherwise, infer the object size using __builtin_object_size. 592 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, 593 CallExpr *TheCall) { 594 // FIXME: There are some more useful checks we could be doing here: 595 // - Evaluate strlen of strcpy arguments, use as object size. 596 597 if (TheCall->isValueDependent() || TheCall->isTypeDependent() || 598 isConstantEvaluated()) 599 return; 600 601 unsigned BuiltinID = FD->getBuiltinID(/*ConsiderWrappers=*/true); 602 if (!BuiltinID) 603 return; 604 605 const TargetInfo &TI = getASTContext().getTargetInfo(); 606 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType()); 607 608 unsigned DiagID = 0; 609 bool IsChkVariant = false; 610 Optional<llvm::APSInt> UsedSize; 611 unsigned SizeIndex, ObjectIndex; 612 switch (BuiltinID) { 613 default: 614 return; 615 case Builtin::BIsprintf: 616 case Builtin::BI__builtin___sprintf_chk: { 617 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3; 618 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 619 620 if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) { 621 622 if (!Format->isAscii() && !Format->isUTF8()) 623 return; 624 625 StringRef FormatStrRef = Format->getString(); 626 EstimateSizeFormatHandler H(FormatStrRef); 627 const char *FormatBytes = FormatStrRef.data(); 628 const ConstantArrayType *T = 629 Context.getAsConstantArrayType(Format->getType()); 630 assert(T && "String literal not of constant array type!"); 631 size_t TypeSize = T->getSize().getZExtValue(); 632 633 // In case there's a null byte somewhere. 634 size_t StrLen = 635 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 636 if (!analyze_format_string::ParsePrintfString( 637 H, FormatBytes, FormatBytes + StrLen, getLangOpts(), 638 Context.getTargetInfo(), false)) { 639 DiagID = diag::warn_fortify_source_format_overflow; 640 UsedSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound()) 641 .extOrTrunc(SizeTypeWidth); 642 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) { 643 IsChkVariant = true; 644 ObjectIndex = 2; 645 } else { 646 IsChkVariant = false; 647 ObjectIndex = 0; 648 } 649 break; 650 } 651 } 652 return; 653 } 654 case Builtin::BI__builtin___memcpy_chk: 655 case Builtin::BI__builtin___memmove_chk: 656 case Builtin::BI__builtin___memset_chk: 657 case Builtin::BI__builtin___strlcat_chk: 658 case Builtin::BI__builtin___strlcpy_chk: 659 case Builtin::BI__builtin___strncat_chk: 660 case Builtin::BI__builtin___strncpy_chk: 661 case Builtin::BI__builtin___stpncpy_chk: 662 case Builtin::BI__builtin___memccpy_chk: 663 case Builtin::BI__builtin___mempcpy_chk: { 664 DiagID = diag::warn_builtin_chk_overflow; 665 IsChkVariant = true; 666 SizeIndex = TheCall->getNumArgs() - 2; 667 ObjectIndex = TheCall->getNumArgs() - 1; 668 break; 669 } 670 671 case Builtin::BI__builtin___snprintf_chk: 672 case Builtin::BI__builtin___vsnprintf_chk: { 673 DiagID = diag::warn_builtin_chk_overflow; 674 IsChkVariant = true; 675 SizeIndex = 1; 676 ObjectIndex = 3; 677 break; 678 } 679 680 case Builtin::BIstrncat: 681 case Builtin::BI__builtin_strncat: 682 case Builtin::BIstrncpy: 683 case Builtin::BI__builtin_strncpy: 684 case Builtin::BIstpncpy: 685 case Builtin::BI__builtin_stpncpy: { 686 // Whether these functions overflow depends on the runtime strlen of the 687 // string, not just the buffer size, so emitting the "always overflow" 688 // diagnostic isn't quite right. We should still diagnose passing a buffer 689 // size larger than the destination buffer though; this is a runtime abort 690 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise. 691 DiagID = diag::warn_fortify_source_size_mismatch; 692 SizeIndex = TheCall->getNumArgs() - 1; 693 ObjectIndex = 0; 694 break; 695 } 696 697 case Builtin::BImemcpy: 698 case Builtin::BI__builtin_memcpy: 699 case Builtin::BImemmove: 700 case Builtin::BI__builtin_memmove: 701 case Builtin::BImemset: 702 case Builtin::BI__builtin_memset: 703 case Builtin::BImempcpy: 704 case Builtin::BI__builtin_mempcpy: { 705 DiagID = diag::warn_fortify_source_overflow; 706 SizeIndex = TheCall->getNumArgs() - 1; 707 ObjectIndex = 0; 708 break; 709 } 710 case Builtin::BIsnprintf: 711 case Builtin::BI__builtin_snprintf: 712 case Builtin::BIvsnprintf: 713 case Builtin::BI__builtin_vsnprintf: { 714 DiagID = diag::warn_fortify_source_size_mismatch; 715 SizeIndex = 1; 716 ObjectIndex = 0; 717 break; 718 } 719 } 720 721 llvm::APSInt ObjectSize; 722 // For __builtin___*_chk, the object size is explicitly provided by the caller 723 // (usually using __builtin_object_size). Use that value to check this call. 724 if (IsChkVariant) { 725 Expr::EvalResult Result; 726 Expr *SizeArg = TheCall->getArg(ObjectIndex); 727 if (!SizeArg->EvaluateAsInt(Result, getASTContext())) 728 return; 729 ObjectSize = Result.Val.getInt(); 730 731 // Otherwise, try to evaluate an imaginary call to __builtin_object_size. 732 } else { 733 // If the parameter has a pass_object_size attribute, then we should use its 734 // (potentially) more strict checking mode. Otherwise, conservatively assume 735 // type 0. 736 int BOSType = 0; 737 if (const auto *POS = 738 FD->getParamDecl(ObjectIndex)->getAttr<PassObjectSizeAttr>()) 739 BOSType = POS->getType(); 740 741 Expr *ObjArg = TheCall->getArg(ObjectIndex); 742 uint64_t Result; 743 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType)) 744 return; 745 // Get the object size in the target's size_t width. 746 ObjectSize = llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth); 747 } 748 749 // Evaluate the number of bytes of the object that this call will use. 750 if (!UsedSize) { 751 Expr::EvalResult Result; 752 Expr *UsedSizeArg = TheCall->getArg(SizeIndex); 753 if (!UsedSizeArg->EvaluateAsInt(Result, getASTContext())) 754 return; 755 UsedSize = Result.Val.getInt().extOrTrunc(SizeTypeWidth); 756 } 757 758 if (UsedSize.getValue().ule(ObjectSize)) 759 return; 760 761 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID); 762 // Skim off the details of whichever builtin was called to produce a better 763 // diagnostic, as it's unlikley that the user wrote the __builtin explicitly. 764 if (IsChkVariant) { 765 FunctionName = FunctionName.drop_front(std::strlen("__builtin___")); 766 FunctionName = FunctionName.drop_back(std::strlen("_chk")); 767 } else if (FunctionName.startswith("__builtin_")) { 768 FunctionName = FunctionName.drop_front(std::strlen("__builtin_")); 769 } 770 771 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 772 PDiag(DiagID) 773 << FunctionName << ObjectSize.toString(/*Radix=*/10) 774 << UsedSize.getValue().toString(/*Radix=*/10)); 775 } 776 777 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 778 Scope::ScopeFlags NeededScopeFlags, 779 unsigned DiagID) { 780 // Scopes aren't available during instantiation. Fortunately, builtin 781 // functions cannot be template args so they cannot be formed through template 782 // instantiation. Therefore checking once during the parse is sufficient. 783 if (SemaRef.inTemplateInstantiation()) 784 return false; 785 786 Scope *S = SemaRef.getCurScope(); 787 while (S && !S->isSEHExceptScope()) 788 S = S->getParent(); 789 if (!S || !(S->getFlags() & NeededScopeFlags)) { 790 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 791 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 792 << DRE->getDecl()->getIdentifier(); 793 return true; 794 } 795 796 return false; 797 } 798 799 static inline bool isBlockPointer(Expr *Arg) { 800 return Arg->getType()->isBlockPointerType(); 801 } 802 803 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 804 /// void*, which is a requirement of device side enqueue. 805 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 806 const BlockPointerType *BPT = 807 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 808 ArrayRef<QualType> Params = 809 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes(); 810 unsigned ArgCounter = 0; 811 bool IllegalParams = false; 812 // Iterate through the block parameters until either one is found that is not 813 // a local void*, or the block is valid. 814 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 815 I != E; ++I, ++ArgCounter) { 816 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 817 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 818 LangAS::opencl_local) { 819 // Get the location of the error. If a block literal has been passed 820 // (BlockExpr) then we can point straight to the offending argument, 821 // else we just point to the variable reference. 822 SourceLocation ErrorLoc; 823 if (isa<BlockExpr>(BlockArg)) { 824 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 825 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 826 } else if (isa<DeclRefExpr>(BlockArg)) { 827 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 828 } 829 S.Diag(ErrorLoc, 830 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 831 IllegalParams = true; 832 } 833 } 834 835 return IllegalParams; 836 } 837 838 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 839 if (!S.getOpenCLOptions().isEnabled("cl_khr_subgroups")) { 840 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 841 << 1 << Call->getDirectCallee() << "cl_khr_subgroups"; 842 return true; 843 } 844 return false; 845 } 846 847 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 848 if (checkArgCount(S, TheCall, 2)) 849 return true; 850 851 if (checkOpenCLSubgroupExt(S, TheCall)) 852 return true; 853 854 // First argument is an ndrange_t type. 855 Expr *NDRangeArg = TheCall->getArg(0); 856 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 857 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 858 << TheCall->getDirectCallee() << "'ndrange_t'"; 859 return true; 860 } 861 862 Expr *BlockArg = TheCall->getArg(1); 863 if (!isBlockPointer(BlockArg)) { 864 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 865 << TheCall->getDirectCallee() << "block"; 866 return true; 867 } 868 return checkOpenCLBlockArgs(S, BlockArg); 869 } 870 871 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 872 /// get_kernel_work_group_size 873 /// and get_kernel_preferred_work_group_size_multiple builtin functions. 874 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 875 if (checkArgCount(S, TheCall, 1)) 876 return true; 877 878 Expr *BlockArg = TheCall->getArg(0); 879 if (!isBlockPointer(BlockArg)) { 880 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 881 << TheCall->getDirectCallee() << "block"; 882 return true; 883 } 884 return checkOpenCLBlockArgs(S, BlockArg); 885 } 886 887 /// Diagnose integer type and any valid implicit conversion to it. 888 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 889 const QualType &IntType); 890 891 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 892 unsigned Start, unsigned End) { 893 bool IllegalParams = false; 894 for (unsigned I = Start; I <= End; ++I) 895 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 896 S.Context.getSizeType()); 897 return IllegalParams; 898 } 899 900 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 901 /// 'local void*' parameter of passed block. 902 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 903 Expr *BlockArg, 904 unsigned NumNonVarArgs) { 905 const BlockPointerType *BPT = 906 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 907 unsigned NumBlockParams = 908 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams(); 909 unsigned TotalNumArgs = TheCall->getNumArgs(); 910 911 // For each argument passed to the block, a corresponding uint needs to 912 // be passed to describe the size of the local memory. 913 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 914 S.Diag(TheCall->getBeginLoc(), 915 diag::err_opencl_enqueue_kernel_local_size_args); 916 return true; 917 } 918 919 // Check that the sizes of the local memory are specified by integers. 920 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 921 TotalNumArgs - 1); 922 } 923 924 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 925 /// overload formats specified in Table 6.13.17.1. 926 /// int enqueue_kernel(queue_t queue, 927 /// kernel_enqueue_flags_t flags, 928 /// const ndrange_t ndrange, 929 /// void (^block)(void)) 930 /// int enqueue_kernel(queue_t queue, 931 /// kernel_enqueue_flags_t flags, 932 /// const ndrange_t ndrange, 933 /// uint num_events_in_wait_list, 934 /// clk_event_t *event_wait_list, 935 /// clk_event_t *event_ret, 936 /// void (^block)(void)) 937 /// int enqueue_kernel(queue_t queue, 938 /// kernel_enqueue_flags_t flags, 939 /// const ndrange_t ndrange, 940 /// void (^block)(local void*, ...), 941 /// uint size0, ...) 942 /// int enqueue_kernel(queue_t queue, 943 /// kernel_enqueue_flags_t flags, 944 /// const ndrange_t ndrange, 945 /// uint num_events_in_wait_list, 946 /// clk_event_t *event_wait_list, 947 /// clk_event_t *event_ret, 948 /// void (^block)(local void*, ...), 949 /// uint size0, ...) 950 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 951 unsigned NumArgs = TheCall->getNumArgs(); 952 953 if (NumArgs < 4) { 954 S.Diag(TheCall->getBeginLoc(), 955 diag::err_typecheck_call_too_few_args_at_least) 956 << 0 << 4 << NumArgs; 957 return true; 958 } 959 960 Expr *Arg0 = TheCall->getArg(0); 961 Expr *Arg1 = TheCall->getArg(1); 962 Expr *Arg2 = TheCall->getArg(2); 963 Expr *Arg3 = TheCall->getArg(3); 964 965 // First argument always needs to be a queue_t type. 966 if (!Arg0->getType()->isQueueT()) { 967 S.Diag(TheCall->getArg(0)->getBeginLoc(), 968 diag::err_opencl_builtin_expected_type) 969 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 970 return true; 971 } 972 973 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 974 if (!Arg1->getType()->isIntegerType()) { 975 S.Diag(TheCall->getArg(1)->getBeginLoc(), 976 diag::err_opencl_builtin_expected_type) 977 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 978 return true; 979 } 980 981 // Third argument is always an ndrange_t type. 982 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 983 S.Diag(TheCall->getArg(2)->getBeginLoc(), 984 diag::err_opencl_builtin_expected_type) 985 << TheCall->getDirectCallee() << "'ndrange_t'"; 986 return true; 987 } 988 989 // With four arguments, there is only one form that the function could be 990 // called in: no events and no variable arguments. 991 if (NumArgs == 4) { 992 // check that the last argument is the right block type. 993 if (!isBlockPointer(Arg3)) { 994 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 995 << TheCall->getDirectCallee() << "block"; 996 return true; 997 } 998 // we have a block type, check the prototype 999 const BlockPointerType *BPT = 1000 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 1001 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) { 1002 S.Diag(Arg3->getBeginLoc(), 1003 diag::err_opencl_enqueue_kernel_blocks_no_args); 1004 return true; 1005 } 1006 return false; 1007 } 1008 // we can have block + varargs. 1009 if (isBlockPointer(Arg3)) 1010 return (checkOpenCLBlockArgs(S, Arg3) || 1011 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 1012 // last two cases with either exactly 7 args or 7 args and varargs. 1013 if (NumArgs >= 7) { 1014 // check common block argument. 1015 Expr *Arg6 = TheCall->getArg(6); 1016 if (!isBlockPointer(Arg6)) { 1017 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1018 << TheCall->getDirectCallee() << "block"; 1019 return true; 1020 } 1021 if (checkOpenCLBlockArgs(S, Arg6)) 1022 return true; 1023 1024 // Forth argument has to be any integer type. 1025 if (!Arg3->getType()->isIntegerType()) { 1026 S.Diag(TheCall->getArg(3)->getBeginLoc(), 1027 diag::err_opencl_builtin_expected_type) 1028 << TheCall->getDirectCallee() << "integer"; 1029 return true; 1030 } 1031 // check remaining common arguments. 1032 Expr *Arg4 = TheCall->getArg(4); 1033 Expr *Arg5 = TheCall->getArg(5); 1034 1035 // Fifth argument is always passed as a pointer to clk_event_t. 1036 if (!Arg4->isNullPointerConstant(S.Context, 1037 Expr::NPC_ValueDependentIsNotNull) && 1038 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 1039 S.Diag(TheCall->getArg(4)->getBeginLoc(), 1040 diag::err_opencl_builtin_expected_type) 1041 << TheCall->getDirectCallee() 1042 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1043 return true; 1044 } 1045 1046 // Sixth argument is always passed as a pointer to clk_event_t. 1047 if (!Arg5->isNullPointerConstant(S.Context, 1048 Expr::NPC_ValueDependentIsNotNull) && 1049 !(Arg5->getType()->isPointerType() && 1050 Arg5->getType()->getPointeeType()->isClkEventT())) { 1051 S.Diag(TheCall->getArg(5)->getBeginLoc(), 1052 diag::err_opencl_builtin_expected_type) 1053 << TheCall->getDirectCallee() 1054 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1055 return true; 1056 } 1057 1058 if (NumArgs == 7) 1059 return false; 1060 1061 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 1062 } 1063 1064 // None of the specific case has been detected, give generic error 1065 S.Diag(TheCall->getBeginLoc(), 1066 diag::err_opencl_enqueue_kernel_incorrect_args); 1067 return true; 1068 } 1069 1070 /// Returns OpenCL access qual. 1071 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 1072 return D->getAttr<OpenCLAccessAttr>(); 1073 } 1074 1075 /// Returns true if pipe element type is different from the pointer. 1076 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 1077 const Expr *Arg0 = Call->getArg(0); 1078 // First argument type should always be pipe. 1079 if (!Arg0->getType()->isPipeType()) { 1080 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1081 << Call->getDirectCallee() << Arg0->getSourceRange(); 1082 return true; 1083 } 1084 OpenCLAccessAttr *AccessQual = 1085 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 1086 // Validates the access qualifier is compatible with the call. 1087 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 1088 // read_only and write_only, and assumed to be read_only if no qualifier is 1089 // specified. 1090 switch (Call->getDirectCallee()->getBuiltinID()) { 1091 case Builtin::BIread_pipe: 1092 case Builtin::BIreserve_read_pipe: 1093 case Builtin::BIcommit_read_pipe: 1094 case Builtin::BIwork_group_reserve_read_pipe: 1095 case Builtin::BIsub_group_reserve_read_pipe: 1096 case Builtin::BIwork_group_commit_read_pipe: 1097 case Builtin::BIsub_group_commit_read_pipe: 1098 if (!(!AccessQual || AccessQual->isReadOnly())) { 1099 S.Diag(Arg0->getBeginLoc(), 1100 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1101 << "read_only" << Arg0->getSourceRange(); 1102 return true; 1103 } 1104 break; 1105 case Builtin::BIwrite_pipe: 1106 case Builtin::BIreserve_write_pipe: 1107 case Builtin::BIcommit_write_pipe: 1108 case Builtin::BIwork_group_reserve_write_pipe: 1109 case Builtin::BIsub_group_reserve_write_pipe: 1110 case Builtin::BIwork_group_commit_write_pipe: 1111 case Builtin::BIsub_group_commit_write_pipe: 1112 if (!(AccessQual && AccessQual->isWriteOnly())) { 1113 S.Diag(Arg0->getBeginLoc(), 1114 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1115 << "write_only" << Arg0->getSourceRange(); 1116 return true; 1117 } 1118 break; 1119 default: 1120 break; 1121 } 1122 return false; 1123 } 1124 1125 /// Returns true if pipe element type is different from the pointer. 1126 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 1127 const Expr *Arg0 = Call->getArg(0); 1128 const Expr *ArgIdx = Call->getArg(Idx); 1129 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 1130 const QualType EltTy = PipeTy->getElementType(); 1131 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 1132 // The Idx argument should be a pointer and the type of the pointer and 1133 // the type of pipe element should also be the same. 1134 if (!ArgTy || 1135 !S.Context.hasSameType( 1136 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 1137 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1138 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 1139 << ArgIdx->getType() << ArgIdx->getSourceRange(); 1140 return true; 1141 } 1142 return false; 1143 } 1144 1145 // Performs semantic analysis for the read/write_pipe call. 1146 // \param S Reference to the semantic analyzer. 1147 // \param Call A pointer to the builtin call. 1148 // \return True if a semantic error has been found, false otherwise. 1149 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 1150 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 1151 // functions have two forms. 1152 switch (Call->getNumArgs()) { 1153 case 2: 1154 if (checkOpenCLPipeArg(S, Call)) 1155 return true; 1156 // The call with 2 arguments should be 1157 // read/write_pipe(pipe T, T*). 1158 // Check packet type T. 1159 if (checkOpenCLPipePacketType(S, Call, 1)) 1160 return true; 1161 break; 1162 1163 case 4: { 1164 if (checkOpenCLPipeArg(S, Call)) 1165 return true; 1166 // The call with 4 arguments should be 1167 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 1168 // Check reserve_id_t. 1169 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1170 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1171 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1172 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1173 return true; 1174 } 1175 1176 // Check the index. 1177 const Expr *Arg2 = Call->getArg(2); 1178 if (!Arg2->getType()->isIntegerType() && 1179 !Arg2->getType()->isUnsignedIntegerType()) { 1180 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1181 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1182 << Arg2->getType() << Arg2->getSourceRange(); 1183 return true; 1184 } 1185 1186 // Check packet type T. 1187 if (checkOpenCLPipePacketType(S, Call, 3)) 1188 return true; 1189 } break; 1190 default: 1191 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 1192 << Call->getDirectCallee() << Call->getSourceRange(); 1193 return true; 1194 } 1195 1196 return false; 1197 } 1198 1199 // Performs a semantic analysis on the {work_group_/sub_group_ 1200 // /_}reserve_{read/write}_pipe 1201 // \param S Reference to the semantic analyzer. 1202 // \param Call The call to the builtin function to be analyzed. 1203 // \return True if a semantic error was found, false otherwise. 1204 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 1205 if (checkArgCount(S, Call, 2)) 1206 return true; 1207 1208 if (checkOpenCLPipeArg(S, Call)) 1209 return true; 1210 1211 // Check the reserve size. 1212 if (!Call->getArg(1)->getType()->isIntegerType() && 1213 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 1214 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1215 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1216 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1217 return true; 1218 } 1219 1220 // Since return type of reserve_read/write_pipe built-in function is 1221 // reserve_id_t, which is not defined in the builtin def file , we used int 1222 // as return type and need to override the return type of these functions. 1223 Call->setType(S.Context.OCLReserveIDTy); 1224 1225 return false; 1226 } 1227 1228 // Performs a semantic analysis on {work_group_/sub_group_ 1229 // /_}commit_{read/write}_pipe 1230 // \param S Reference to the semantic analyzer. 1231 // \param Call The call to the builtin function to be analyzed. 1232 // \return True if a semantic error was found, false otherwise. 1233 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 1234 if (checkArgCount(S, Call, 2)) 1235 return true; 1236 1237 if (checkOpenCLPipeArg(S, Call)) 1238 return true; 1239 1240 // Check reserve_id_t. 1241 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1242 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1243 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1244 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1245 return true; 1246 } 1247 1248 return false; 1249 } 1250 1251 // Performs a semantic analysis on the call to built-in Pipe 1252 // Query Functions. 1253 // \param S Reference to the semantic analyzer. 1254 // \param Call The call to the builtin function to be analyzed. 1255 // \return True if a semantic error was found, false otherwise. 1256 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 1257 if (checkArgCount(S, Call, 1)) 1258 return true; 1259 1260 if (!Call->getArg(0)->getType()->isPipeType()) { 1261 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1262 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 1263 return true; 1264 } 1265 1266 return false; 1267 } 1268 1269 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. 1270 // Performs semantic analysis for the to_global/local/private call. 1271 // \param S Reference to the semantic analyzer. 1272 // \param BuiltinID ID of the builtin function. 1273 // \param Call A pointer to the builtin call. 1274 // \return True if a semantic error has been found, false otherwise. 1275 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 1276 CallExpr *Call) { 1277 if (checkArgCount(S, Call, 1)) 1278 return true; 1279 1280 auto RT = Call->getArg(0)->getType(); 1281 if (!RT->isPointerType() || RT->getPointeeType() 1282 .getAddressSpace() == LangAS::opencl_constant) { 1283 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 1284 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 1285 return true; 1286 } 1287 1288 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 1289 S.Diag(Call->getArg(0)->getBeginLoc(), 1290 diag::warn_opencl_generic_address_space_arg) 1291 << Call->getDirectCallee()->getNameInfo().getAsString() 1292 << Call->getArg(0)->getSourceRange(); 1293 } 1294 1295 RT = RT->getPointeeType(); 1296 auto Qual = RT.getQualifiers(); 1297 switch (BuiltinID) { 1298 case Builtin::BIto_global: 1299 Qual.setAddressSpace(LangAS::opencl_global); 1300 break; 1301 case Builtin::BIto_local: 1302 Qual.setAddressSpace(LangAS::opencl_local); 1303 break; 1304 case Builtin::BIto_private: 1305 Qual.setAddressSpace(LangAS::opencl_private); 1306 break; 1307 default: 1308 llvm_unreachable("Invalid builtin function"); 1309 } 1310 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 1311 RT.getUnqualifiedType(), Qual))); 1312 1313 return false; 1314 } 1315 1316 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { 1317 if (checkArgCount(S, TheCall, 1)) 1318 return ExprError(); 1319 1320 // Compute __builtin_launder's parameter type from the argument. 1321 // The parameter type is: 1322 // * The type of the argument if it's not an array or function type, 1323 // Otherwise, 1324 // * The decayed argument type. 1325 QualType ParamTy = [&]() { 1326 QualType ArgTy = TheCall->getArg(0)->getType(); 1327 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe()) 1328 return S.Context.getPointerType(Ty->getElementType()); 1329 if (ArgTy->isFunctionType()) { 1330 return S.Context.getPointerType(ArgTy); 1331 } 1332 return ArgTy; 1333 }(); 1334 1335 TheCall->setType(ParamTy); 1336 1337 auto DiagSelect = [&]() -> llvm::Optional<unsigned> { 1338 if (!ParamTy->isPointerType()) 1339 return 0; 1340 if (ParamTy->isFunctionPointerType()) 1341 return 1; 1342 if (ParamTy->isVoidPointerType()) 1343 return 2; 1344 return llvm::Optional<unsigned>{}; 1345 }(); 1346 if (DiagSelect.hasValue()) { 1347 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) 1348 << DiagSelect.getValue() << TheCall->getSourceRange(); 1349 return ExprError(); 1350 } 1351 1352 // We either have an incomplete class type, or we have a class template 1353 // whose instantiation has not been forced. Example: 1354 // 1355 // template <class T> struct Foo { T value; }; 1356 // Foo<int> *p = nullptr; 1357 // auto *d = __builtin_launder(p); 1358 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(), 1359 diag::err_incomplete_type)) 1360 return ExprError(); 1361 1362 assert(ParamTy->getPointeeType()->isObjectType() && 1363 "Unhandled non-object pointer case"); 1364 1365 InitializedEntity Entity = 1366 InitializedEntity::InitializeParameter(S.Context, ParamTy, false); 1367 ExprResult Arg = 1368 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0)); 1369 if (Arg.isInvalid()) 1370 return ExprError(); 1371 TheCall->setArg(0, Arg.get()); 1372 1373 return TheCall; 1374 } 1375 1376 // Emit an error and return true if the current architecture is not in the list 1377 // of supported architectures. 1378 static bool 1379 CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1380 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 1381 llvm::Triple::ArchType CurArch = 1382 S.getASTContext().getTargetInfo().getTriple().getArch(); 1383 if (llvm::is_contained(SupportedArchs, CurArch)) 1384 return false; 1385 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1386 << TheCall->getSourceRange(); 1387 return true; 1388 } 1389 1390 static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr, 1391 SourceLocation CallSiteLoc); 1392 1393 bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 1394 CallExpr *TheCall) { 1395 switch (TI.getTriple().getArch()) { 1396 default: 1397 // Some builtins don't require additional checking, so just consider these 1398 // acceptable. 1399 return false; 1400 case llvm::Triple::arm: 1401 case llvm::Triple::armeb: 1402 case llvm::Triple::thumb: 1403 case llvm::Triple::thumbeb: 1404 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall); 1405 case llvm::Triple::aarch64: 1406 case llvm::Triple::aarch64_32: 1407 case llvm::Triple::aarch64_be: 1408 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall); 1409 case llvm::Triple::bpfeb: 1410 case llvm::Triple::bpfel: 1411 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall); 1412 case llvm::Triple::hexagon: 1413 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall); 1414 case llvm::Triple::mips: 1415 case llvm::Triple::mipsel: 1416 case llvm::Triple::mips64: 1417 case llvm::Triple::mips64el: 1418 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall); 1419 case llvm::Triple::systemz: 1420 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall); 1421 case llvm::Triple::x86: 1422 case llvm::Triple::x86_64: 1423 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall); 1424 case llvm::Triple::ppc: 1425 case llvm::Triple::ppc64: 1426 case llvm::Triple::ppc64le: 1427 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall); 1428 case llvm::Triple::amdgcn: 1429 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall); 1430 } 1431 } 1432 1433 ExprResult 1434 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 1435 CallExpr *TheCall) { 1436 ExprResult TheCallResult(TheCall); 1437 1438 // Find out if any arguments are required to be integer constant expressions. 1439 unsigned ICEArguments = 0; 1440 ASTContext::GetBuiltinTypeError Error; 1441 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 1442 if (Error != ASTContext::GE_None) 1443 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 1444 1445 // If any arguments are required to be ICE's, check and diagnose. 1446 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 1447 // Skip arguments not required to be ICE's. 1448 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 1449 1450 llvm::APSInt Result; 1451 if (SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 1452 return true; 1453 ICEArguments &= ~(1 << ArgNo); 1454 } 1455 1456 switch (BuiltinID) { 1457 case Builtin::BI__builtin___CFStringMakeConstantString: 1458 assert(TheCall->getNumArgs() == 1 && 1459 "Wrong # arguments to builtin CFStringMakeConstantString"); 1460 if (CheckObjCString(TheCall->getArg(0))) 1461 return ExprError(); 1462 break; 1463 case Builtin::BI__builtin_ms_va_start: 1464 case Builtin::BI__builtin_stdarg_start: 1465 case Builtin::BI__builtin_va_start: 1466 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1467 return ExprError(); 1468 break; 1469 case Builtin::BI__va_start: { 1470 switch (Context.getTargetInfo().getTriple().getArch()) { 1471 case llvm::Triple::aarch64: 1472 case llvm::Triple::arm: 1473 case llvm::Triple::thumb: 1474 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 1475 return ExprError(); 1476 break; 1477 default: 1478 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1479 return ExprError(); 1480 break; 1481 } 1482 break; 1483 } 1484 1485 // The acquire, release, and no fence variants are ARM and AArch64 only. 1486 case Builtin::BI_interlockedbittestandset_acq: 1487 case Builtin::BI_interlockedbittestandset_rel: 1488 case Builtin::BI_interlockedbittestandset_nf: 1489 case Builtin::BI_interlockedbittestandreset_acq: 1490 case Builtin::BI_interlockedbittestandreset_rel: 1491 case Builtin::BI_interlockedbittestandreset_nf: 1492 if (CheckBuiltinTargetSupport( 1493 *this, BuiltinID, TheCall, 1494 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 1495 return ExprError(); 1496 break; 1497 1498 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 1499 case Builtin::BI_bittest64: 1500 case Builtin::BI_bittestandcomplement64: 1501 case Builtin::BI_bittestandreset64: 1502 case Builtin::BI_bittestandset64: 1503 case Builtin::BI_interlockedbittestandreset64: 1504 case Builtin::BI_interlockedbittestandset64: 1505 if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall, 1506 {llvm::Triple::x86_64, llvm::Triple::arm, 1507 llvm::Triple::thumb, llvm::Triple::aarch64})) 1508 return ExprError(); 1509 break; 1510 1511 case Builtin::BI__builtin_isgreater: 1512 case Builtin::BI__builtin_isgreaterequal: 1513 case Builtin::BI__builtin_isless: 1514 case Builtin::BI__builtin_islessequal: 1515 case Builtin::BI__builtin_islessgreater: 1516 case Builtin::BI__builtin_isunordered: 1517 if (SemaBuiltinUnorderedCompare(TheCall)) 1518 return ExprError(); 1519 break; 1520 case Builtin::BI__builtin_fpclassify: 1521 if (SemaBuiltinFPClassification(TheCall, 6)) 1522 return ExprError(); 1523 break; 1524 case Builtin::BI__builtin_isfinite: 1525 case Builtin::BI__builtin_isinf: 1526 case Builtin::BI__builtin_isinf_sign: 1527 case Builtin::BI__builtin_isnan: 1528 case Builtin::BI__builtin_isnormal: 1529 case Builtin::BI__builtin_signbit: 1530 case Builtin::BI__builtin_signbitf: 1531 case Builtin::BI__builtin_signbitl: 1532 if (SemaBuiltinFPClassification(TheCall, 1)) 1533 return ExprError(); 1534 break; 1535 case Builtin::BI__builtin_shufflevector: 1536 return SemaBuiltinShuffleVector(TheCall); 1537 // TheCall will be freed by the smart pointer here, but that's fine, since 1538 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 1539 case Builtin::BI__builtin_prefetch: 1540 if (SemaBuiltinPrefetch(TheCall)) 1541 return ExprError(); 1542 break; 1543 case Builtin::BI__builtin_alloca_with_align: 1544 if (SemaBuiltinAllocaWithAlign(TheCall)) 1545 return ExprError(); 1546 LLVM_FALLTHROUGH; 1547 case Builtin::BI__builtin_alloca: 1548 Diag(TheCall->getBeginLoc(), diag::warn_alloca) 1549 << TheCall->getDirectCallee(); 1550 break; 1551 case Builtin::BI__assume: 1552 case Builtin::BI__builtin_assume: 1553 if (SemaBuiltinAssume(TheCall)) 1554 return ExprError(); 1555 break; 1556 case Builtin::BI__builtin_assume_aligned: 1557 if (SemaBuiltinAssumeAligned(TheCall)) 1558 return ExprError(); 1559 break; 1560 case Builtin::BI__builtin_dynamic_object_size: 1561 case Builtin::BI__builtin_object_size: 1562 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 1563 return ExprError(); 1564 break; 1565 case Builtin::BI__builtin_longjmp: 1566 if (SemaBuiltinLongjmp(TheCall)) 1567 return ExprError(); 1568 break; 1569 case Builtin::BI__builtin_setjmp: 1570 if (SemaBuiltinSetjmp(TheCall)) 1571 return ExprError(); 1572 break; 1573 case Builtin::BI_setjmp: 1574 case Builtin::BI_setjmpex: 1575 if (checkArgCount(*this, TheCall, 1)) 1576 return true; 1577 break; 1578 case Builtin::BI__builtin_classify_type: 1579 if (checkArgCount(*this, TheCall, 1)) return true; 1580 TheCall->setType(Context.IntTy); 1581 break; 1582 case Builtin::BI__builtin_complex: 1583 if (SemaBuiltinComplex(TheCall)) 1584 return ExprError(); 1585 break; 1586 case Builtin::BI__builtin_constant_p: { 1587 if (checkArgCount(*this, TheCall, 1)) return true; 1588 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 1589 if (Arg.isInvalid()) return true; 1590 TheCall->setArg(0, Arg.get()); 1591 TheCall->setType(Context.IntTy); 1592 break; 1593 } 1594 case Builtin::BI__builtin_launder: 1595 return SemaBuiltinLaunder(*this, TheCall); 1596 case Builtin::BI__sync_fetch_and_add: 1597 case Builtin::BI__sync_fetch_and_add_1: 1598 case Builtin::BI__sync_fetch_and_add_2: 1599 case Builtin::BI__sync_fetch_and_add_4: 1600 case Builtin::BI__sync_fetch_and_add_8: 1601 case Builtin::BI__sync_fetch_and_add_16: 1602 case Builtin::BI__sync_fetch_and_sub: 1603 case Builtin::BI__sync_fetch_and_sub_1: 1604 case Builtin::BI__sync_fetch_and_sub_2: 1605 case Builtin::BI__sync_fetch_and_sub_4: 1606 case Builtin::BI__sync_fetch_and_sub_8: 1607 case Builtin::BI__sync_fetch_and_sub_16: 1608 case Builtin::BI__sync_fetch_and_or: 1609 case Builtin::BI__sync_fetch_and_or_1: 1610 case Builtin::BI__sync_fetch_and_or_2: 1611 case Builtin::BI__sync_fetch_and_or_4: 1612 case Builtin::BI__sync_fetch_and_or_8: 1613 case Builtin::BI__sync_fetch_and_or_16: 1614 case Builtin::BI__sync_fetch_and_and: 1615 case Builtin::BI__sync_fetch_and_and_1: 1616 case Builtin::BI__sync_fetch_and_and_2: 1617 case Builtin::BI__sync_fetch_and_and_4: 1618 case Builtin::BI__sync_fetch_and_and_8: 1619 case Builtin::BI__sync_fetch_and_and_16: 1620 case Builtin::BI__sync_fetch_and_xor: 1621 case Builtin::BI__sync_fetch_and_xor_1: 1622 case Builtin::BI__sync_fetch_and_xor_2: 1623 case Builtin::BI__sync_fetch_and_xor_4: 1624 case Builtin::BI__sync_fetch_and_xor_8: 1625 case Builtin::BI__sync_fetch_and_xor_16: 1626 case Builtin::BI__sync_fetch_and_nand: 1627 case Builtin::BI__sync_fetch_and_nand_1: 1628 case Builtin::BI__sync_fetch_and_nand_2: 1629 case Builtin::BI__sync_fetch_and_nand_4: 1630 case Builtin::BI__sync_fetch_and_nand_8: 1631 case Builtin::BI__sync_fetch_and_nand_16: 1632 case Builtin::BI__sync_add_and_fetch: 1633 case Builtin::BI__sync_add_and_fetch_1: 1634 case Builtin::BI__sync_add_and_fetch_2: 1635 case Builtin::BI__sync_add_and_fetch_4: 1636 case Builtin::BI__sync_add_and_fetch_8: 1637 case Builtin::BI__sync_add_and_fetch_16: 1638 case Builtin::BI__sync_sub_and_fetch: 1639 case Builtin::BI__sync_sub_and_fetch_1: 1640 case Builtin::BI__sync_sub_and_fetch_2: 1641 case Builtin::BI__sync_sub_and_fetch_4: 1642 case Builtin::BI__sync_sub_and_fetch_8: 1643 case Builtin::BI__sync_sub_and_fetch_16: 1644 case Builtin::BI__sync_and_and_fetch: 1645 case Builtin::BI__sync_and_and_fetch_1: 1646 case Builtin::BI__sync_and_and_fetch_2: 1647 case Builtin::BI__sync_and_and_fetch_4: 1648 case Builtin::BI__sync_and_and_fetch_8: 1649 case Builtin::BI__sync_and_and_fetch_16: 1650 case Builtin::BI__sync_or_and_fetch: 1651 case Builtin::BI__sync_or_and_fetch_1: 1652 case Builtin::BI__sync_or_and_fetch_2: 1653 case Builtin::BI__sync_or_and_fetch_4: 1654 case Builtin::BI__sync_or_and_fetch_8: 1655 case Builtin::BI__sync_or_and_fetch_16: 1656 case Builtin::BI__sync_xor_and_fetch: 1657 case Builtin::BI__sync_xor_and_fetch_1: 1658 case Builtin::BI__sync_xor_and_fetch_2: 1659 case Builtin::BI__sync_xor_and_fetch_4: 1660 case Builtin::BI__sync_xor_and_fetch_8: 1661 case Builtin::BI__sync_xor_and_fetch_16: 1662 case Builtin::BI__sync_nand_and_fetch: 1663 case Builtin::BI__sync_nand_and_fetch_1: 1664 case Builtin::BI__sync_nand_and_fetch_2: 1665 case Builtin::BI__sync_nand_and_fetch_4: 1666 case Builtin::BI__sync_nand_and_fetch_8: 1667 case Builtin::BI__sync_nand_and_fetch_16: 1668 case Builtin::BI__sync_val_compare_and_swap: 1669 case Builtin::BI__sync_val_compare_and_swap_1: 1670 case Builtin::BI__sync_val_compare_and_swap_2: 1671 case Builtin::BI__sync_val_compare_and_swap_4: 1672 case Builtin::BI__sync_val_compare_and_swap_8: 1673 case Builtin::BI__sync_val_compare_and_swap_16: 1674 case Builtin::BI__sync_bool_compare_and_swap: 1675 case Builtin::BI__sync_bool_compare_and_swap_1: 1676 case Builtin::BI__sync_bool_compare_and_swap_2: 1677 case Builtin::BI__sync_bool_compare_and_swap_4: 1678 case Builtin::BI__sync_bool_compare_and_swap_8: 1679 case Builtin::BI__sync_bool_compare_and_swap_16: 1680 case Builtin::BI__sync_lock_test_and_set: 1681 case Builtin::BI__sync_lock_test_and_set_1: 1682 case Builtin::BI__sync_lock_test_and_set_2: 1683 case Builtin::BI__sync_lock_test_and_set_4: 1684 case Builtin::BI__sync_lock_test_and_set_8: 1685 case Builtin::BI__sync_lock_test_and_set_16: 1686 case Builtin::BI__sync_lock_release: 1687 case Builtin::BI__sync_lock_release_1: 1688 case Builtin::BI__sync_lock_release_2: 1689 case Builtin::BI__sync_lock_release_4: 1690 case Builtin::BI__sync_lock_release_8: 1691 case Builtin::BI__sync_lock_release_16: 1692 case Builtin::BI__sync_swap: 1693 case Builtin::BI__sync_swap_1: 1694 case Builtin::BI__sync_swap_2: 1695 case Builtin::BI__sync_swap_4: 1696 case Builtin::BI__sync_swap_8: 1697 case Builtin::BI__sync_swap_16: 1698 return SemaBuiltinAtomicOverloaded(TheCallResult); 1699 case Builtin::BI__sync_synchronize: 1700 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 1701 << TheCall->getCallee()->getSourceRange(); 1702 break; 1703 case Builtin::BI__builtin_nontemporal_load: 1704 case Builtin::BI__builtin_nontemporal_store: 1705 return SemaBuiltinNontemporalOverloaded(TheCallResult); 1706 case Builtin::BI__builtin_memcpy_inline: { 1707 clang::Expr *SizeOp = TheCall->getArg(2); 1708 // We warn about copying to or from `nullptr` pointers when `size` is 1709 // greater than 0. When `size` is value dependent we cannot evaluate its 1710 // value so we bail out. 1711 if (SizeOp->isValueDependent()) 1712 break; 1713 if (!SizeOp->EvaluateKnownConstInt(Context).isNullValue()) { 1714 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 1715 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc()); 1716 } 1717 break; 1718 } 1719 #define BUILTIN(ID, TYPE, ATTRS) 1720 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 1721 case Builtin::BI##ID: \ 1722 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 1723 #include "clang/Basic/Builtins.def" 1724 case Builtin::BI__annotation: 1725 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 1726 return ExprError(); 1727 break; 1728 case Builtin::BI__builtin_annotation: 1729 if (SemaBuiltinAnnotation(*this, TheCall)) 1730 return ExprError(); 1731 break; 1732 case Builtin::BI__builtin_addressof: 1733 if (SemaBuiltinAddressof(*this, TheCall)) 1734 return ExprError(); 1735 break; 1736 case Builtin::BI__builtin_is_aligned: 1737 case Builtin::BI__builtin_align_up: 1738 case Builtin::BI__builtin_align_down: 1739 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID)) 1740 return ExprError(); 1741 break; 1742 case Builtin::BI__builtin_add_overflow: 1743 case Builtin::BI__builtin_sub_overflow: 1744 case Builtin::BI__builtin_mul_overflow: 1745 if (SemaBuiltinOverflow(*this, TheCall, BuiltinID)) 1746 return ExprError(); 1747 break; 1748 case Builtin::BI__builtin_operator_new: 1749 case Builtin::BI__builtin_operator_delete: { 1750 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 1751 ExprResult Res = 1752 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 1753 if (Res.isInvalid()) 1754 CorrectDelayedTyposInExpr(TheCallResult.get()); 1755 return Res; 1756 } 1757 case Builtin::BI__builtin_dump_struct: { 1758 // We first want to ensure we are called with 2 arguments 1759 if (checkArgCount(*this, TheCall, 2)) 1760 return ExprError(); 1761 // Ensure that the first argument is of type 'struct XX *' 1762 const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts(); 1763 const QualType PtrArgType = PtrArg->getType(); 1764 if (!PtrArgType->isPointerType() || 1765 !PtrArgType->getPointeeType()->isRecordType()) { 1766 Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1767 << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType 1768 << "structure pointer"; 1769 return ExprError(); 1770 } 1771 1772 // Ensure that the second argument is of type 'FunctionType' 1773 const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts(); 1774 const QualType FnPtrArgType = FnPtrArg->getType(); 1775 if (!FnPtrArgType->isPointerType()) { 1776 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1777 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1778 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1779 return ExprError(); 1780 } 1781 1782 const auto *FuncType = 1783 FnPtrArgType->getPointeeType()->getAs<FunctionType>(); 1784 1785 if (!FuncType) { 1786 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1787 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1788 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1789 return ExprError(); 1790 } 1791 1792 if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) { 1793 if (!FT->getNumParams()) { 1794 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1795 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1796 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1797 return ExprError(); 1798 } 1799 QualType PT = FT->getParamType(0); 1800 if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy || 1801 !PT->isPointerType() || !PT->getPointeeType()->isCharType() || 1802 !PT->getPointeeType().isConstQualified()) { 1803 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1804 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1805 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1806 return ExprError(); 1807 } 1808 } 1809 1810 TheCall->setType(Context.IntTy); 1811 break; 1812 } 1813 case Builtin::BI__builtin_expect_with_probability: { 1814 // We first want to ensure we are called with 3 arguments 1815 if (checkArgCount(*this, TheCall, 3)) 1816 return ExprError(); 1817 // then check probability is constant float in range [0.0, 1.0] 1818 const Expr *ProbArg = TheCall->getArg(2); 1819 SmallVector<PartialDiagnosticAt, 8> Notes; 1820 Expr::EvalResult Eval; 1821 Eval.Diag = &Notes; 1822 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Expr::EvaluateForCodeGen, 1823 Context)) || 1824 !Eval.Val.isFloat()) { 1825 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float) 1826 << ProbArg->getSourceRange(); 1827 for (const PartialDiagnosticAt &PDiag : Notes) 1828 Diag(PDiag.first, PDiag.second); 1829 return ExprError(); 1830 } 1831 llvm::APFloat Probability = Eval.Val.getFloat(); 1832 bool LoseInfo = false; 1833 Probability.convert(llvm::APFloat::IEEEdouble(), 1834 llvm::RoundingMode::Dynamic, &LoseInfo); 1835 if (!(Probability >= llvm::APFloat(0.0) && 1836 Probability <= llvm::APFloat(1.0))) { 1837 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range) 1838 << ProbArg->getSourceRange(); 1839 return ExprError(); 1840 } 1841 break; 1842 } 1843 case Builtin::BI__builtin_preserve_access_index: 1844 if (SemaBuiltinPreserveAI(*this, TheCall)) 1845 return ExprError(); 1846 break; 1847 case Builtin::BI__builtin_call_with_static_chain: 1848 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 1849 return ExprError(); 1850 break; 1851 case Builtin::BI__exception_code: 1852 case Builtin::BI_exception_code: 1853 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 1854 diag::err_seh___except_block)) 1855 return ExprError(); 1856 break; 1857 case Builtin::BI__exception_info: 1858 case Builtin::BI_exception_info: 1859 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 1860 diag::err_seh___except_filter)) 1861 return ExprError(); 1862 break; 1863 case Builtin::BI__GetExceptionInfo: 1864 if (checkArgCount(*this, TheCall, 1)) 1865 return ExprError(); 1866 1867 if (CheckCXXThrowOperand( 1868 TheCall->getBeginLoc(), 1869 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 1870 TheCall)) 1871 return ExprError(); 1872 1873 TheCall->setType(Context.VoidPtrTy); 1874 break; 1875 // OpenCL v2.0, s6.13.16 - Pipe functions 1876 case Builtin::BIread_pipe: 1877 case Builtin::BIwrite_pipe: 1878 // Since those two functions are declared with var args, we need a semantic 1879 // check for the argument. 1880 if (SemaBuiltinRWPipe(*this, TheCall)) 1881 return ExprError(); 1882 break; 1883 case Builtin::BIreserve_read_pipe: 1884 case Builtin::BIreserve_write_pipe: 1885 case Builtin::BIwork_group_reserve_read_pipe: 1886 case Builtin::BIwork_group_reserve_write_pipe: 1887 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 1888 return ExprError(); 1889 break; 1890 case Builtin::BIsub_group_reserve_read_pipe: 1891 case Builtin::BIsub_group_reserve_write_pipe: 1892 if (checkOpenCLSubgroupExt(*this, TheCall) || 1893 SemaBuiltinReserveRWPipe(*this, TheCall)) 1894 return ExprError(); 1895 break; 1896 case Builtin::BIcommit_read_pipe: 1897 case Builtin::BIcommit_write_pipe: 1898 case Builtin::BIwork_group_commit_read_pipe: 1899 case Builtin::BIwork_group_commit_write_pipe: 1900 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 1901 return ExprError(); 1902 break; 1903 case Builtin::BIsub_group_commit_read_pipe: 1904 case Builtin::BIsub_group_commit_write_pipe: 1905 if (checkOpenCLSubgroupExt(*this, TheCall) || 1906 SemaBuiltinCommitRWPipe(*this, TheCall)) 1907 return ExprError(); 1908 break; 1909 case Builtin::BIget_pipe_num_packets: 1910 case Builtin::BIget_pipe_max_packets: 1911 if (SemaBuiltinPipePackets(*this, TheCall)) 1912 return ExprError(); 1913 break; 1914 case Builtin::BIto_global: 1915 case Builtin::BIto_local: 1916 case Builtin::BIto_private: 1917 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 1918 return ExprError(); 1919 break; 1920 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 1921 case Builtin::BIenqueue_kernel: 1922 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 1923 return ExprError(); 1924 break; 1925 case Builtin::BIget_kernel_work_group_size: 1926 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 1927 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 1928 return ExprError(); 1929 break; 1930 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 1931 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 1932 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 1933 return ExprError(); 1934 break; 1935 case Builtin::BI__builtin_os_log_format: 1936 Cleanup.setExprNeedsCleanups(true); 1937 LLVM_FALLTHROUGH; 1938 case Builtin::BI__builtin_os_log_format_buffer_size: 1939 if (SemaBuiltinOSLogFormat(TheCall)) 1940 return ExprError(); 1941 break; 1942 case Builtin::BI__builtin_frame_address: 1943 case Builtin::BI__builtin_return_address: { 1944 if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF)) 1945 return ExprError(); 1946 1947 // -Wframe-address warning if non-zero passed to builtin 1948 // return/frame address. 1949 Expr::EvalResult Result; 1950 if (TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) && 1951 Result.Val.getInt() != 0) 1952 Diag(TheCall->getBeginLoc(), diag::warn_frame_address) 1953 << ((BuiltinID == Builtin::BI__builtin_return_address) 1954 ? "__builtin_return_address" 1955 : "__builtin_frame_address") 1956 << TheCall->getSourceRange(); 1957 break; 1958 } 1959 1960 case Builtin::BI__builtin_matrix_transpose: 1961 return SemaBuiltinMatrixTranspose(TheCall, TheCallResult); 1962 1963 case Builtin::BI__builtin_matrix_column_major_load: 1964 return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult); 1965 1966 case Builtin::BI__builtin_matrix_column_major_store: 1967 return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult); 1968 } 1969 1970 // Since the target specific builtins for each arch overlap, only check those 1971 // of the arch we are compiling for. 1972 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 1973 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) { 1974 assert(Context.getAuxTargetInfo() && 1975 "Aux Target Builtin, but not an aux target?"); 1976 1977 if (CheckTSBuiltinFunctionCall( 1978 *Context.getAuxTargetInfo(), 1979 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall)) 1980 return ExprError(); 1981 } else { 1982 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID, 1983 TheCall)) 1984 return ExprError(); 1985 } 1986 } 1987 1988 return TheCallResult; 1989 } 1990 1991 // Get the valid immediate range for the specified NEON type code. 1992 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 1993 NeonTypeFlags Type(t); 1994 int IsQuad = ForceQuad ? true : Type.isQuad(); 1995 switch (Type.getEltType()) { 1996 case NeonTypeFlags::Int8: 1997 case NeonTypeFlags::Poly8: 1998 return shift ? 7 : (8 << IsQuad) - 1; 1999 case NeonTypeFlags::Int16: 2000 case NeonTypeFlags::Poly16: 2001 return shift ? 15 : (4 << IsQuad) - 1; 2002 case NeonTypeFlags::Int32: 2003 return shift ? 31 : (2 << IsQuad) - 1; 2004 case NeonTypeFlags::Int64: 2005 case NeonTypeFlags::Poly64: 2006 return shift ? 63 : (1 << IsQuad) - 1; 2007 case NeonTypeFlags::Poly128: 2008 return shift ? 127 : (1 << IsQuad) - 1; 2009 case NeonTypeFlags::Float16: 2010 assert(!shift && "cannot shift float types!"); 2011 return (4 << IsQuad) - 1; 2012 case NeonTypeFlags::Float32: 2013 assert(!shift && "cannot shift float types!"); 2014 return (2 << IsQuad) - 1; 2015 case NeonTypeFlags::Float64: 2016 assert(!shift && "cannot shift float types!"); 2017 return (1 << IsQuad) - 1; 2018 case NeonTypeFlags::BFloat16: 2019 assert(!shift && "cannot shift float types!"); 2020 return (4 << IsQuad) - 1; 2021 } 2022 llvm_unreachable("Invalid NeonTypeFlag!"); 2023 } 2024 2025 /// getNeonEltType - Return the QualType corresponding to the elements of 2026 /// the vector type specified by the NeonTypeFlags. This is used to check 2027 /// the pointer arguments for Neon load/store intrinsics. 2028 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 2029 bool IsPolyUnsigned, bool IsInt64Long) { 2030 switch (Flags.getEltType()) { 2031 case NeonTypeFlags::Int8: 2032 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 2033 case NeonTypeFlags::Int16: 2034 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 2035 case NeonTypeFlags::Int32: 2036 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 2037 case NeonTypeFlags::Int64: 2038 if (IsInt64Long) 2039 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 2040 else 2041 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 2042 : Context.LongLongTy; 2043 case NeonTypeFlags::Poly8: 2044 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 2045 case NeonTypeFlags::Poly16: 2046 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 2047 case NeonTypeFlags::Poly64: 2048 if (IsInt64Long) 2049 return Context.UnsignedLongTy; 2050 else 2051 return Context.UnsignedLongLongTy; 2052 case NeonTypeFlags::Poly128: 2053 break; 2054 case NeonTypeFlags::Float16: 2055 return Context.HalfTy; 2056 case NeonTypeFlags::Float32: 2057 return Context.FloatTy; 2058 case NeonTypeFlags::Float64: 2059 return Context.DoubleTy; 2060 case NeonTypeFlags::BFloat16: 2061 return Context.BFloat16Ty; 2062 } 2063 llvm_unreachable("Invalid NeonTypeFlag!"); 2064 } 2065 2066 bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2067 // Range check SVE intrinsics that take immediate values. 2068 SmallVector<std::tuple<int,int,int>, 3> ImmChecks; 2069 2070 switch (BuiltinID) { 2071 default: 2072 return false; 2073 #define GET_SVE_IMMEDIATE_CHECK 2074 #include "clang/Basic/arm_sve_sema_rangechecks.inc" 2075 #undef GET_SVE_IMMEDIATE_CHECK 2076 } 2077 2078 // Perform all the immediate checks for this builtin call. 2079 bool HasError = false; 2080 for (auto &I : ImmChecks) { 2081 int ArgNum, CheckTy, ElementSizeInBits; 2082 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I; 2083 2084 typedef bool(*OptionSetCheckFnTy)(int64_t Value); 2085 2086 // Function that checks whether the operand (ArgNum) is an immediate 2087 // that is one of the predefined values. 2088 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm, 2089 int ErrDiag) -> bool { 2090 // We can't check the value of a dependent argument. 2091 Expr *Arg = TheCall->getArg(ArgNum); 2092 if (Arg->isTypeDependent() || Arg->isValueDependent()) 2093 return false; 2094 2095 // Check constant-ness first. 2096 llvm::APSInt Imm; 2097 if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm)) 2098 return true; 2099 2100 if (!CheckImm(Imm.getSExtValue())) 2101 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange(); 2102 return false; 2103 }; 2104 2105 switch ((SVETypeFlags::ImmCheckType)CheckTy) { 2106 case SVETypeFlags::ImmCheck0_31: 2107 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31)) 2108 HasError = true; 2109 break; 2110 case SVETypeFlags::ImmCheck0_13: 2111 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13)) 2112 HasError = true; 2113 break; 2114 case SVETypeFlags::ImmCheck1_16: 2115 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16)) 2116 HasError = true; 2117 break; 2118 case SVETypeFlags::ImmCheck0_7: 2119 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7)) 2120 HasError = true; 2121 break; 2122 case SVETypeFlags::ImmCheckExtract: 2123 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2124 (2048 / ElementSizeInBits) - 1)) 2125 HasError = true; 2126 break; 2127 case SVETypeFlags::ImmCheckShiftRight: 2128 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits)) 2129 HasError = true; 2130 break; 2131 case SVETypeFlags::ImmCheckShiftRightNarrow: 2132 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 2133 ElementSizeInBits / 2)) 2134 HasError = true; 2135 break; 2136 case SVETypeFlags::ImmCheckShiftLeft: 2137 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2138 ElementSizeInBits - 1)) 2139 HasError = true; 2140 break; 2141 case SVETypeFlags::ImmCheckLaneIndex: 2142 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2143 (128 / (1 * ElementSizeInBits)) - 1)) 2144 HasError = true; 2145 break; 2146 case SVETypeFlags::ImmCheckLaneIndexCompRotate: 2147 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2148 (128 / (2 * ElementSizeInBits)) - 1)) 2149 HasError = true; 2150 break; 2151 case SVETypeFlags::ImmCheckLaneIndexDot: 2152 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2153 (128 / (4 * ElementSizeInBits)) - 1)) 2154 HasError = true; 2155 break; 2156 case SVETypeFlags::ImmCheckComplexRot90_270: 2157 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; }, 2158 diag::err_rotation_argument_to_cadd)) 2159 HasError = true; 2160 break; 2161 case SVETypeFlags::ImmCheckComplexRotAll90: 2162 if (CheckImmediateInSet( 2163 [](int64_t V) { 2164 return V == 0 || V == 90 || V == 180 || V == 270; 2165 }, 2166 diag::err_rotation_argument_to_cmla)) 2167 HasError = true; 2168 break; 2169 case SVETypeFlags::ImmCheck0_1: 2170 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1)) 2171 HasError = true; 2172 break; 2173 case SVETypeFlags::ImmCheck0_2: 2174 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2)) 2175 HasError = true; 2176 break; 2177 case SVETypeFlags::ImmCheck0_3: 2178 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3)) 2179 HasError = true; 2180 break; 2181 } 2182 } 2183 2184 return HasError; 2185 } 2186 2187 bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI, 2188 unsigned BuiltinID, CallExpr *TheCall) { 2189 llvm::APSInt Result; 2190 uint64_t mask = 0; 2191 unsigned TV = 0; 2192 int PtrArgNum = -1; 2193 bool HasConstPtr = false; 2194 switch (BuiltinID) { 2195 #define GET_NEON_OVERLOAD_CHECK 2196 #include "clang/Basic/arm_neon.inc" 2197 #include "clang/Basic/arm_fp16.inc" 2198 #undef GET_NEON_OVERLOAD_CHECK 2199 } 2200 2201 // For NEON intrinsics which are overloaded on vector element type, validate 2202 // the immediate which specifies which variant to emit. 2203 unsigned ImmArg = TheCall->getNumArgs()-1; 2204 if (mask) { 2205 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 2206 return true; 2207 2208 TV = Result.getLimitedValue(64); 2209 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 2210 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 2211 << TheCall->getArg(ImmArg)->getSourceRange(); 2212 } 2213 2214 if (PtrArgNum >= 0) { 2215 // Check that pointer arguments have the specified type. 2216 Expr *Arg = TheCall->getArg(PtrArgNum); 2217 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 2218 Arg = ICE->getSubExpr(); 2219 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 2220 QualType RHSTy = RHS.get()->getType(); 2221 2222 llvm::Triple::ArchType Arch = TI.getTriple().getArch(); 2223 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 2224 Arch == llvm::Triple::aarch64_32 || 2225 Arch == llvm::Triple::aarch64_be; 2226 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong; 2227 QualType EltTy = 2228 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 2229 if (HasConstPtr) 2230 EltTy = EltTy.withConst(); 2231 QualType LHSTy = Context.getPointerType(EltTy); 2232 AssignConvertType ConvTy; 2233 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 2234 if (RHS.isInvalid()) 2235 return true; 2236 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 2237 RHS.get(), AA_Assigning)) 2238 return true; 2239 } 2240 2241 // For NEON intrinsics which take an immediate value as part of the 2242 // instruction, range check them here. 2243 unsigned i = 0, l = 0, u = 0; 2244 switch (BuiltinID) { 2245 default: 2246 return false; 2247 #define GET_NEON_IMMEDIATE_CHECK 2248 #include "clang/Basic/arm_neon.inc" 2249 #include "clang/Basic/arm_fp16.inc" 2250 #undef GET_NEON_IMMEDIATE_CHECK 2251 } 2252 2253 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2254 } 2255 2256 bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2257 switch (BuiltinID) { 2258 default: 2259 return false; 2260 #include "clang/Basic/arm_mve_builtin_sema.inc" 2261 } 2262 } 2263 2264 bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2265 CallExpr *TheCall) { 2266 bool Err = false; 2267 switch (BuiltinID) { 2268 default: 2269 return false; 2270 #include "clang/Basic/arm_cde_builtin_sema.inc" 2271 } 2272 2273 if (Err) 2274 return true; 2275 2276 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true); 2277 } 2278 2279 bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI, 2280 const Expr *CoprocArg, bool WantCDE) { 2281 if (isConstantEvaluated()) 2282 return false; 2283 2284 // We can't check the value of a dependent argument. 2285 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent()) 2286 return false; 2287 2288 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context); 2289 int64_t CoprocNo = CoprocNoAP.getExtValue(); 2290 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative"); 2291 2292 uint32_t CDECoprocMask = TI.getARMCDECoprocMask(); 2293 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo)); 2294 2295 if (IsCDECoproc != WantCDE) 2296 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc) 2297 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange(); 2298 2299 return false; 2300 } 2301 2302 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 2303 unsigned MaxWidth) { 2304 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 2305 BuiltinID == ARM::BI__builtin_arm_ldaex || 2306 BuiltinID == ARM::BI__builtin_arm_strex || 2307 BuiltinID == ARM::BI__builtin_arm_stlex || 2308 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2309 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2310 BuiltinID == AArch64::BI__builtin_arm_strex || 2311 BuiltinID == AArch64::BI__builtin_arm_stlex) && 2312 "unexpected ARM builtin"); 2313 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 2314 BuiltinID == ARM::BI__builtin_arm_ldaex || 2315 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2316 BuiltinID == AArch64::BI__builtin_arm_ldaex; 2317 2318 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 2319 2320 // Ensure that we have the proper number of arguments. 2321 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 2322 return true; 2323 2324 // Inspect the pointer argument of the atomic builtin. This should always be 2325 // a pointer type, whose element is an integral scalar or pointer type. 2326 // Because it is a pointer type, we don't have to worry about any implicit 2327 // casts here. 2328 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 2329 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 2330 if (PointerArgRes.isInvalid()) 2331 return true; 2332 PointerArg = PointerArgRes.get(); 2333 2334 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 2335 if (!pointerType) { 2336 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 2337 << PointerArg->getType() << PointerArg->getSourceRange(); 2338 return true; 2339 } 2340 2341 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 2342 // task is to insert the appropriate casts into the AST. First work out just 2343 // what the appropriate type is. 2344 QualType ValType = pointerType->getPointeeType(); 2345 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 2346 if (IsLdrex) 2347 AddrType.addConst(); 2348 2349 // Issue a warning if the cast is dodgy. 2350 CastKind CastNeeded = CK_NoOp; 2351 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 2352 CastNeeded = CK_BitCast; 2353 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 2354 << PointerArg->getType() << Context.getPointerType(AddrType) 2355 << AA_Passing << PointerArg->getSourceRange(); 2356 } 2357 2358 // Finally, do the cast and replace the argument with the corrected version. 2359 AddrType = Context.getPointerType(AddrType); 2360 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 2361 if (PointerArgRes.isInvalid()) 2362 return true; 2363 PointerArg = PointerArgRes.get(); 2364 2365 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 2366 2367 // In general, we allow ints, floats and pointers to be loaded and stored. 2368 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 2369 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 2370 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 2371 << PointerArg->getType() << PointerArg->getSourceRange(); 2372 return true; 2373 } 2374 2375 // But ARM doesn't have instructions to deal with 128-bit versions. 2376 if (Context.getTypeSize(ValType) > MaxWidth) { 2377 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 2378 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 2379 << PointerArg->getType() << PointerArg->getSourceRange(); 2380 return true; 2381 } 2382 2383 switch (ValType.getObjCLifetime()) { 2384 case Qualifiers::OCL_None: 2385 case Qualifiers::OCL_ExplicitNone: 2386 // okay 2387 break; 2388 2389 case Qualifiers::OCL_Weak: 2390 case Qualifiers::OCL_Strong: 2391 case Qualifiers::OCL_Autoreleasing: 2392 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 2393 << ValType << PointerArg->getSourceRange(); 2394 return true; 2395 } 2396 2397 if (IsLdrex) { 2398 TheCall->setType(ValType); 2399 return false; 2400 } 2401 2402 // Initialize the argument to be stored. 2403 ExprResult ValArg = TheCall->getArg(0); 2404 InitializedEntity Entity = InitializedEntity::InitializeParameter( 2405 Context, ValType, /*consume*/ false); 2406 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 2407 if (ValArg.isInvalid()) 2408 return true; 2409 TheCall->setArg(0, ValArg.get()); 2410 2411 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 2412 // but the custom checker bypasses all default analysis. 2413 TheCall->setType(Context.IntTy); 2414 return false; 2415 } 2416 2417 bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2418 CallExpr *TheCall) { 2419 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 2420 BuiltinID == ARM::BI__builtin_arm_ldaex || 2421 BuiltinID == ARM::BI__builtin_arm_strex || 2422 BuiltinID == ARM::BI__builtin_arm_stlex) { 2423 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 2424 } 2425 2426 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 2427 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 2428 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 2429 } 2430 2431 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 2432 BuiltinID == ARM::BI__builtin_arm_wsr64) 2433 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 2434 2435 if (BuiltinID == ARM::BI__builtin_arm_rsr || 2436 BuiltinID == ARM::BI__builtin_arm_rsrp || 2437 BuiltinID == ARM::BI__builtin_arm_wsr || 2438 BuiltinID == ARM::BI__builtin_arm_wsrp) 2439 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2440 2441 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2442 return true; 2443 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall)) 2444 return true; 2445 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2446 return true; 2447 2448 // For intrinsics which take an immediate value as part of the instruction, 2449 // range check them here. 2450 // FIXME: VFP Intrinsics should error if VFP not present. 2451 switch (BuiltinID) { 2452 default: return false; 2453 case ARM::BI__builtin_arm_ssat: 2454 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 2455 case ARM::BI__builtin_arm_usat: 2456 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 2457 case ARM::BI__builtin_arm_ssat16: 2458 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 2459 case ARM::BI__builtin_arm_usat16: 2460 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 2461 case ARM::BI__builtin_arm_vcvtr_f: 2462 case ARM::BI__builtin_arm_vcvtr_d: 2463 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 2464 case ARM::BI__builtin_arm_dmb: 2465 case ARM::BI__builtin_arm_dsb: 2466 case ARM::BI__builtin_arm_isb: 2467 case ARM::BI__builtin_arm_dbg: 2468 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 2469 case ARM::BI__builtin_arm_cdp: 2470 case ARM::BI__builtin_arm_cdp2: 2471 case ARM::BI__builtin_arm_mcr: 2472 case ARM::BI__builtin_arm_mcr2: 2473 case ARM::BI__builtin_arm_mrc: 2474 case ARM::BI__builtin_arm_mrc2: 2475 case ARM::BI__builtin_arm_mcrr: 2476 case ARM::BI__builtin_arm_mcrr2: 2477 case ARM::BI__builtin_arm_mrrc: 2478 case ARM::BI__builtin_arm_mrrc2: 2479 case ARM::BI__builtin_arm_ldc: 2480 case ARM::BI__builtin_arm_ldcl: 2481 case ARM::BI__builtin_arm_ldc2: 2482 case ARM::BI__builtin_arm_ldc2l: 2483 case ARM::BI__builtin_arm_stc: 2484 case ARM::BI__builtin_arm_stcl: 2485 case ARM::BI__builtin_arm_stc2: 2486 case ARM::BI__builtin_arm_stc2l: 2487 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) || 2488 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), 2489 /*WantCDE*/ false); 2490 } 2491 } 2492 2493 bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, 2494 unsigned BuiltinID, 2495 CallExpr *TheCall) { 2496 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 2497 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2498 BuiltinID == AArch64::BI__builtin_arm_strex || 2499 BuiltinID == AArch64::BI__builtin_arm_stlex) { 2500 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 2501 } 2502 2503 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 2504 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 2505 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || 2506 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 2507 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 2508 } 2509 2510 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 2511 BuiltinID == AArch64::BI__builtin_arm_wsr64) 2512 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2513 2514 // Memory Tagging Extensions (MTE) Intrinsics 2515 if (BuiltinID == AArch64::BI__builtin_arm_irg || 2516 BuiltinID == AArch64::BI__builtin_arm_addg || 2517 BuiltinID == AArch64::BI__builtin_arm_gmi || 2518 BuiltinID == AArch64::BI__builtin_arm_ldg || 2519 BuiltinID == AArch64::BI__builtin_arm_stg || 2520 BuiltinID == AArch64::BI__builtin_arm_subp) { 2521 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 2522 } 2523 2524 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 2525 BuiltinID == AArch64::BI__builtin_arm_rsrp || 2526 BuiltinID == AArch64::BI__builtin_arm_wsr || 2527 BuiltinID == AArch64::BI__builtin_arm_wsrp) 2528 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2529 2530 // Only check the valid encoding range. Any constant in this range would be 2531 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 2532 // an exception for incorrect registers. This matches MSVC behavior. 2533 if (BuiltinID == AArch64::BI_ReadStatusReg || 2534 BuiltinID == AArch64::BI_WriteStatusReg) 2535 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 2536 2537 if (BuiltinID == AArch64::BI__getReg) 2538 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 2539 2540 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2541 return true; 2542 2543 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall)) 2544 return true; 2545 2546 // For intrinsics which take an immediate value as part of the instruction, 2547 // range check them here. 2548 unsigned i = 0, l = 0, u = 0; 2549 switch (BuiltinID) { 2550 default: return false; 2551 case AArch64::BI__builtin_arm_dmb: 2552 case AArch64::BI__builtin_arm_dsb: 2553 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 2554 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break; 2555 } 2556 2557 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2558 } 2559 2560 static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) { 2561 if (Arg->getType()->getAsPlaceholderType()) 2562 return false; 2563 2564 // The first argument needs to be a record field access. 2565 // If it is an array element access, we delay decision 2566 // to BPF backend to check whether the access is a 2567 // field access or not. 2568 return (Arg->IgnoreParens()->getObjectKind() == OK_BitField || 2569 dyn_cast<MemberExpr>(Arg->IgnoreParens()) || 2570 dyn_cast<ArraySubscriptExpr>(Arg->IgnoreParens())); 2571 } 2572 2573 static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) { 2574 QualType ArgType = Arg->getType(); 2575 if (ArgType->getAsPlaceholderType()) 2576 return false; 2577 2578 // for TYPE_EXISTENCE/TYPE_SIZEOF reloc type 2579 // format: 2580 // 1. __builtin_preserve_type_info(*(<type> *)0, flag); 2581 // 2. <type> var; 2582 // __builtin_preserve_type_info(var, flag); 2583 if (!dyn_cast<DeclRefExpr>(Arg->IgnoreParens()) && 2584 !dyn_cast<UnaryOperator>(Arg->IgnoreParens())) 2585 return false; 2586 2587 // Typedef type. 2588 if (ArgType->getAs<TypedefType>()) 2589 return true; 2590 2591 // Record type or Enum type. 2592 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 2593 if (const auto *RT = Ty->getAs<RecordType>()) { 2594 if (!RT->getDecl()->getDeclName().isEmpty()) 2595 return true; 2596 } else if (const auto *ET = Ty->getAs<EnumType>()) { 2597 if (!ET->getDecl()->getDeclName().isEmpty()) 2598 return true; 2599 } 2600 2601 return false; 2602 } 2603 2604 static bool isValidBPFPreserveEnumValueArg(Expr *Arg) { 2605 QualType ArgType = Arg->getType(); 2606 if (ArgType->getAsPlaceholderType()) 2607 return false; 2608 2609 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type 2610 // format: 2611 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>, 2612 // flag); 2613 const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens()); 2614 if (!UO) 2615 return false; 2616 2617 const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr()); 2618 if (!CE || CE->getCastKind() != CK_IntegralToPointer) 2619 return false; 2620 2621 // The integer must be from an EnumConstantDecl. 2622 const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr()); 2623 if (!DR) 2624 return false; 2625 2626 const EnumConstantDecl *Enumerator = 2627 dyn_cast<EnumConstantDecl>(DR->getDecl()); 2628 if (!Enumerator) 2629 return false; 2630 2631 // The type must be EnumType. 2632 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 2633 const auto *ET = Ty->getAs<EnumType>(); 2634 if (!ET) 2635 return false; 2636 2637 // The enum value must be supported. 2638 for (auto *EDI : ET->getDecl()->enumerators()) { 2639 if (EDI == Enumerator) 2640 return true; 2641 } 2642 2643 return false; 2644 } 2645 2646 bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID, 2647 CallExpr *TheCall) { 2648 assert((BuiltinID == BPF::BI__builtin_preserve_field_info || 2649 BuiltinID == BPF::BI__builtin_btf_type_id || 2650 BuiltinID == BPF::BI__builtin_preserve_type_info || 2651 BuiltinID == BPF::BI__builtin_preserve_enum_value) && 2652 "unexpected BPF builtin"); 2653 2654 if (checkArgCount(*this, TheCall, 2)) 2655 return true; 2656 2657 // The second argument needs to be a constant int 2658 Expr *Arg = TheCall->getArg(1); 2659 Optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context); 2660 diag::kind kind; 2661 if (!Value) { 2662 if (BuiltinID == BPF::BI__builtin_preserve_field_info) 2663 kind = diag::err_preserve_field_info_not_const; 2664 else if (BuiltinID == BPF::BI__builtin_btf_type_id) 2665 kind = diag::err_btf_type_id_not_const; 2666 else if (BuiltinID == BPF::BI__builtin_preserve_type_info) 2667 kind = diag::err_preserve_type_info_not_const; 2668 else 2669 kind = diag::err_preserve_enum_value_not_const; 2670 Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange(); 2671 return true; 2672 } 2673 2674 // The first argument 2675 Arg = TheCall->getArg(0); 2676 bool InvalidArg = false; 2677 bool ReturnUnsignedInt = true; 2678 if (BuiltinID == BPF::BI__builtin_preserve_field_info) { 2679 if (!isValidBPFPreserveFieldInfoArg(Arg)) { 2680 InvalidArg = true; 2681 kind = diag::err_preserve_field_info_not_field; 2682 } 2683 } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) { 2684 if (!isValidBPFPreserveTypeInfoArg(Arg)) { 2685 InvalidArg = true; 2686 kind = diag::err_preserve_type_info_invalid; 2687 } 2688 } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) { 2689 if (!isValidBPFPreserveEnumValueArg(Arg)) { 2690 InvalidArg = true; 2691 kind = diag::err_preserve_enum_value_invalid; 2692 } 2693 ReturnUnsignedInt = false; 2694 } 2695 2696 if (InvalidArg) { 2697 Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange(); 2698 return true; 2699 } 2700 2701 if (ReturnUnsignedInt) 2702 TheCall->setType(Context.UnsignedIntTy); 2703 else 2704 TheCall->setType(Context.UnsignedLongTy); 2705 return false; 2706 } 2707 2708 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 2709 struct ArgInfo { 2710 uint8_t OpNum; 2711 bool IsSigned; 2712 uint8_t BitWidth; 2713 uint8_t Align; 2714 }; 2715 struct BuiltinInfo { 2716 unsigned BuiltinID; 2717 ArgInfo Infos[2]; 2718 }; 2719 2720 static BuiltinInfo Infos[] = { 2721 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 2722 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 2723 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 2724 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} }, 2725 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 2726 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 2727 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 2728 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 2729 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 2730 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 2731 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 2732 2733 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 2734 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 2735 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 2736 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 2737 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 2738 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 2739 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 2740 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 2741 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 2742 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 2743 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 2744 2745 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 2746 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 2747 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 2748 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 2749 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 2750 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 2751 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 2752 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 2753 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 2754 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 2755 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 2756 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 2757 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 2758 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 2759 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 2760 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 2761 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 2762 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 2763 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 2764 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 2765 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 2766 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 2767 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 2768 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 2769 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 2770 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 2771 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 2772 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 2773 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 2774 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 2775 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 2776 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 2777 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 2778 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 2779 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 2780 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 2781 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 2782 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 2783 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 2784 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 2785 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 2786 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 2787 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 2788 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 2789 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 2790 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 2791 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 2792 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 2793 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 2794 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 2795 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 2796 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 2797 {{ 1, false, 6, 0 }} }, 2798 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 2799 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 2800 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 2801 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 2802 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 2803 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 2804 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 2805 {{ 1, false, 5, 0 }} }, 2806 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 2807 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 2808 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 2809 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 2810 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 2811 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 2812 { 2, false, 5, 0 }} }, 2813 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 2814 { 2, false, 6, 0 }} }, 2815 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 2816 { 3, false, 5, 0 }} }, 2817 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 2818 { 3, false, 6, 0 }} }, 2819 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 2820 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 2821 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 2822 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 2823 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 2824 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 2825 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 2826 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 2827 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 2828 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 2829 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 2830 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 2831 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 2832 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 2833 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 2834 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 2835 {{ 2, false, 4, 0 }, 2836 { 3, false, 5, 0 }} }, 2837 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 2838 {{ 2, false, 4, 0 }, 2839 { 3, false, 5, 0 }} }, 2840 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 2841 {{ 2, false, 4, 0 }, 2842 { 3, false, 5, 0 }} }, 2843 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 2844 {{ 2, false, 4, 0 }, 2845 { 3, false, 5, 0 }} }, 2846 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 2847 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 2848 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 2849 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 2850 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 2851 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 2852 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 2853 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 2854 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 2855 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 2856 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 2857 { 2, false, 5, 0 }} }, 2858 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 2859 { 2, false, 6, 0 }} }, 2860 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 2861 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 2862 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 2863 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 2864 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 2865 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 2866 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 2867 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 2868 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 2869 {{ 1, false, 4, 0 }} }, 2870 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 2871 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 2872 {{ 1, false, 4, 0 }} }, 2873 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 2874 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 2875 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 2876 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 2877 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 2878 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 2879 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 2880 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 2881 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 2882 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 2883 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 2884 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 2885 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 2886 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 2887 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 2888 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 2889 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 2890 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 2891 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 2892 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 2893 {{ 3, false, 1, 0 }} }, 2894 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 2895 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 2896 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 2897 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 2898 {{ 3, false, 1, 0 }} }, 2899 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 2900 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 2901 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 2902 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 2903 {{ 3, false, 1, 0 }} }, 2904 }; 2905 2906 // Use a dynamically initialized static to sort the table exactly once on 2907 // first run. 2908 static const bool SortOnce = 2909 (llvm::sort(Infos, 2910 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 2911 return LHS.BuiltinID < RHS.BuiltinID; 2912 }), 2913 true); 2914 (void)SortOnce; 2915 2916 const BuiltinInfo *F = llvm::partition_point( 2917 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; }); 2918 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 2919 return false; 2920 2921 bool Error = false; 2922 2923 for (const ArgInfo &A : F->Infos) { 2924 // Ignore empty ArgInfo elements. 2925 if (A.BitWidth == 0) 2926 continue; 2927 2928 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 2929 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 2930 if (!A.Align) { 2931 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 2932 } else { 2933 unsigned M = 1 << A.Align; 2934 Min *= M; 2935 Max *= M; 2936 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max) | 2937 SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 2938 } 2939 } 2940 return Error; 2941 } 2942 2943 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 2944 CallExpr *TheCall) { 2945 return CheckHexagonBuiltinArgument(BuiltinID, TheCall); 2946 } 2947 2948 bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI, 2949 unsigned BuiltinID, CallExpr *TheCall) { 2950 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) || 2951 CheckMipsBuiltinArgument(BuiltinID, TheCall); 2952 } 2953 2954 bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, 2955 CallExpr *TheCall) { 2956 2957 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID && 2958 BuiltinID <= Mips::BI__builtin_mips_lwx) { 2959 if (!TI.hasFeature("dsp")) 2960 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp); 2961 } 2962 2963 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID && 2964 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) { 2965 if (!TI.hasFeature("dspr2")) 2966 return Diag(TheCall->getBeginLoc(), 2967 diag::err_mips_builtin_requires_dspr2); 2968 } 2969 2970 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID && 2971 BuiltinID <= Mips::BI__builtin_msa_xori_b) { 2972 if (!TI.hasFeature("msa")) 2973 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa); 2974 } 2975 2976 return false; 2977 } 2978 2979 // CheckMipsBuiltinArgument - Checks the constant value passed to the 2980 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The 2981 // ordering for DSP is unspecified. MSA is ordered by the data format used 2982 // by the underlying instruction i.e., df/m, df/n and then by size. 2983 // 2984 // FIXME: The size tests here should instead be tablegen'd along with the 2985 // definitions from include/clang/Basic/BuiltinsMips.def. 2986 // FIXME: GCC is strict on signedness for some of these intrinsics, we should 2987 // be too. 2988 bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 2989 unsigned i = 0, l = 0, u = 0, m = 0; 2990 switch (BuiltinID) { 2991 default: return false; 2992 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 2993 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 2994 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 2995 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 2996 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 2997 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 2998 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 2999 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the 3000 // df/m field. 3001 // These intrinsics take an unsigned 3 bit immediate. 3002 case Mips::BI__builtin_msa_bclri_b: 3003 case Mips::BI__builtin_msa_bnegi_b: 3004 case Mips::BI__builtin_msa_bseti_b: 3005 case Mips::BI__builtin_msa_sat_s_b: 3006 case Mips::BI__builtin_msa_sat_u_b: 3007 case Mips::BI__builtin_msa_slli_b: 3008 case Mips::BI__builtin_msa_srai_b: 3009 case Mips::BI__builtin_msa_srari_b: 3010 case Mips::BI__builtin_msa_srli_b: 3011 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 3012 case Mips::BI__builtin_msa_binsli_b: 3013 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 3014 // These intrinsics take an unsigned 4 bit immediate. 3015 case Mips::BI__builtin_msa_bclri_h: 3016 case Mips::BI__builtin_msa_bnegi_h: 3017 case Mips::BI__builtin_msa_bseti_h: 3018 case Mips::BI__builtin_msa_sat_s_h: 3019 case Mips::BI__builtin_msa_sat_u_h: 3020 case Mips::BI__builtin_msa_slli_h: 3021 case Mips::BI__builtin_msa_srai_h: 3022 case Mips::BI__builtin_msa_srari_h: 3023 case Mips::BI__builtin_msa_srli_h: 3024 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 3025 case Mips::BI__builtin_msa_binsli_h: 3026 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 3027 // These intrinsics take an unsigned 5 bit immediate. 3028 // The first block of intrinsics actually have an unsigned 5 bit field, 3029 // not a df/n field. 3030 case Mips::BI__builtin_msa_cfcmsa: 3031 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; 3032 case Mips::BI__builtin_msa_clei_u_b: 3033 case Mips::BI__builtin_msa_clei_u_h: 3034 case Mips::BI__builtin_msa_clei_u_w: 3035 case Mips::BI__builtin_msa_clei_u_d: 3036 case Mips::BI__builtin_msa_clti_u_b: 3037 case Mips::BI__builtin_msa_clti_u_h: 3038 case Mips::BI__builtin_msa_clti_u_w: 3039 case Mips::BI__builtin_msa_clti_u_d: 3040 case Mips::BI__builtin_msa_maxi_u_b: 3041 case Mips::BI__builtin_msa_maxi_u_h: 3042 case Mips::BI__builtin_msa_maxi_u_w: 3043 case Mips::BI__builtin_msa_maxi_u_d: 3044 case Mips::BI__builtin_msa_mini_u_b: 3045 case Mips::BI__builtin_msa_mini_u_h: 3046 case Mips::BI__builtin_msa_mini_u_w: 3047 case Mips::BI__builtin_msa_mini_u_d: 3048 case Mips::BI__builtin_msa_addvi_b: 3049 case Mips::BI__builtin_msa_addvi_h: 3050 case Mips::BI__builtin_msa_addvi_w: 3051 case Mips::BI__builtin_msa_addvi_d: 3052 case Mips::BI__builtin_msa_bclri_w: 3053 case Mips::BI__builtin_msa_bnegi_w: 3054 case Mips::BI__builtin_msa_bseti_w: 3055 case Mips::BI__builtin_msa_sat_s_w: 3056 case Mips::BI__builtin_msa_sat_u_w: 3057 case Mips::BI__builtin_msa_slli_w: 3058 case Mips::BI__builtin_msa_srai_w: 3059 case Mips::BI__builtin_msa_srari_w: 3060 case Mips::BI__builtin_msa_srli_w: 3061 case Mips::BI__builtin_msa_srlri_w: 3062 case Mips::BI__builtin_msa_subvi_b: 3063 case Mips::BI__builtin_msa_subvi_h: 3064 case Mips::BI__builtin_msa_subvi_w: 3065 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 3066 case Mips::BI__builtin_msa_binsli_w: 3067 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 3068 // These intrinsics take an unsigned 6 bit immediate. 3069 case Mips::BI__builtin_msa_bclri_d: 3070 case Mips::BI__builtin_msa_bnegi_d: 3071 case Mips::BI__builtin_msa_bseti_d: 3072 case Mips::BI__builtin_msa_sat_s_d: 3073 case Mips::BI__builtin_msa_sat_u_d: 3074 case Mips::BI__builtin_msa_slli_d: 3075 case Mips::BI__builtin_msa_srai_d: 3076 case Mips::BI__builtin_msa_srari_d: 3077 case Mips::BI__builtin_msa_srli_d: 3078 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 3079 case Mips::BI__builtin_msa_binsli_d: 3080 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 3081 // These intrinsics take a signed 5 bit immediate. 3082 case Mips::BI__builtin_msa_ceqi_b: 3083 case Mips::BI__builtin_msa_ceqi_h: 3084 case Mips::BI__builtin_msa_ceqi_w: 3085 case Mips::BI__builtin_msa_ceqi_d: 3086 case Mips::BI__builtin_msa_clti_s_b: 3087 case Mips::BI__builtin_msa_clti_s_h: 3088 case Mips::BI__builtin_msa_clti_s_w: 3089 case Mips::BI__builtin_msa_clti_s_d: 3090 case Mips::BI__builtin_msa_clei_s_b: 3091 case Mips::BI__builtin_msa_clei_s_h: 3092 case Mips::BI__builtin_msa_clei_s_w: 3093 case Mips::BI__builtin_msa_clei_s_d: 3094 case Mips::BI__builtin_msa_maxi_s_b: 3095 case Mips::BI__builtin_msa_maxi_s_h: 3096 case Mips::BI__builtin_msa_maxi_s_w: 3097 case Mips::BI__builtin_msa_maxi_s_d: 3098 case Mips::BI__builtin_msa_mini_s_b: 3099 case Mips::BI__builtin_msa_mini_s_h: 3100 case Mips::BI__builtin_msa_mini_s_w: 3101 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 3102 // These intrinsics take an unsigned 8 bit immediate. 3103 case Mips::BI__builtin_msa_andi_b: 3104 case Mips::BI__builtin_msa_nori_b: 3105 case Mips::BI__builtin_msa_ori_b: 3106 case Mips::BI__builtin_msa_shf_b: 3107 case Mips::BI__builtin_msa_shf_h: 3108 case Mips::BI__builtin_msa_shf_w: 3109 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 3110 case Mips::BI__builtin_msa_bseli_b: 3111 case Mips::BI__builtin_msa_bmnzi_b: 3112 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 3113 // df/n format 3114 // These intrinsics take an unsigned 4 bit immediate. 3115 case Mips::BI__builtin_msa_copy_s_b: 3116 case Mips::BI__builtin_msa_copy_u_b: 3117 case Mips::BI__builtin_msa_insve_b: 3118 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 3119 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 3120 // These intrinsics take an unsigned 3 bit immediate. 3121 case Mips::BI__builtin_msa_copy_s_h: 3122 case Mips::BI__builtin_msa_copy_u_h: 3123 case Mips::BI__builtin_msa_insve_h: 3124 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 3125 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 3126 // These intrinsics take an unsigned 2 bit immediate. 3127 case Mips::BI__builtin_msa_copy_s_w: 3128 case Mips::BI__builtin_msa_copy_u_w: 3129 case Mips::BI__builtin_msa_insve_w: 3130 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 3131 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 3132 // These intrinsics take an unsigned 1 bit immediate. 3133 case Mips::BI__builtin_msa_copy_s_d: 3134 case Mips::BI__builtin_msa_copy_u_d: 3135 case Mips::BI__builtin_msa_insve_d: 3136 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 3137 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 3138 // Memory offsets and immediate loads. 3139 // These intrinsics take a signed 10 bit immediate. 3140 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 3141 case Mips::BI__builtin_msa_ldi_h: 3142 case Mips::BI__builtin_msa_ldi_w: 3143 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 3144 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 3145 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 3146 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 3147 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 3148 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break; 3149 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break; 3150 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 3151 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 3152 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 3153 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 3154 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break; 3155 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break; 3156 } 3157 3158 if (!m) 3159 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3160 3161 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 3162 SemaBuiltinConstantArgMultiple(TheCall, i, m); 3163 } 3164 3165 bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3166 CallExpr *TheCall) { 3167 unsigned i = 0, l = 0, u = 0; 3168 bool Is64BitBltin = BuiltinID == PPC::BI__builtin_divde || 3169 BuiltinID == PPC::BI__builtin_divdeu || 3170 BuiltinID == PPC::BI__builtin_bpermd; 3171 bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64; 3172 bool IsBltinExtDiv = BuiltinID == PPC::BI__builtin_divwe || 3173 BuiltinID == PPC::BI__builtin_divweu || 3174 BuiltinID == PPC::BI__builtin_divde || 3175 BuiltinID == PPC::BI__builtin_divdeu; 3176 3177 if (Is64BitBltin && !IsTarget64Bit) 3178 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 3179 << TheCall->getSourceRange(); 3180 3181 if ((IsBltinExtDiv && !TI.hasFeature("extdiv")) || 3182 (BuiltinID == PPC::BI__builtin_bpermd && !TI.hasFeature("bpermd"))) 3183 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7) 3184 << TheCall->getSourceRange(); 3185 3186 auto SemaVSXCheck = [&](CallExpr *TheCall) -> bool { 3187 if (!TI.hasFeature("vsx")) 3188 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7) 3189 << TheCall->getSourceRange(); 3190 return false; 3191 }; 3192 3193 switch (BuiltinID) { 3194 default: return false; 3195 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 3196 case PPC::BI__builtin_altivec_crypto_vshasigmad: 3197 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3198 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3199 case PPC::BI__builtin_altivec_dss: 3200 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3); 3201 case PPC::BI__builtin_tbegin: 3202 case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break; 3203 case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break; 3204 case PPC::BI__builtin_tabortwc: 3205 case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break; 3206 case PPC::BI__builtin_tabortwci: 3207 case PPC::BI__builtin_tabortdci: 3208 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 3209 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); 3210 case PPC::BI__builtin_altivec_dst: 3211 case PPC::BI__builtin_altivec_dstt: 3212 case PPC::BI__builtin_altivec_dstst: 3213 case PPC::BI__builtin_altivec_dststt: 3214 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 3215 case PPC::BI__builtin_vsx_xxpermdi: 3216 case PPC::BI__builtin_vsx_xxsldwi: 3217 return SemaBuiltinVSX(TheCall); 3218 case PPC::BI__builtin_unpack_vector_int128: 3219 return SemaVSXCheck(TheCall) || 3220 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3221 case PPC::BI__builtin_pack_vector_int128: 3222 return SemaVSXCheck(TheCall); 3223 case PPC::BI__builtin_altivec_vgnb: 3224 return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7); 3225 case PPC::BI__builtin_vsx_xxeval: 3226 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255); 3227 case PPC::BI__builtin_altivec_vsldbi: 3228 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 3229 case PPC::BI__builtin_altivec_vsrdbi: 3230 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 3231 case PPC::BI__builtin_vsx_xxpermx: 3232 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7); 3233 } 3234 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3235 } 3236 3237 bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, 3238 CallExpr *TheCall) { 3239 // position of memory order and scope arguments in the builtin 3240 unsigned OrderIndex, ScopeIndex; 3241 switch (BuiltinID) { 3242 case AMDGPU::BI__builtin_amdgcn_atomic_inc32: 3243 case AMDGPU::BI__builtin_amdgcn_atomic_inc64: 3244 case AMDGPU::BI__builtin_amdgcn_atomic_dec32: 3245 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: 3246 OrderIndex = 2; 3247 ScopeIndex = 3; 3248 break; 3249 case AMDGPU::BI__builtin_amdgcn_fence: 3250 OrderIndex = 0; 3251 ScopeIndex = 1; 3252 break; 3253 default: 3254 return false; 3255 } 3256 3257 ExprResult Arg = TheCall->getArg(OrderIndex); 3258 auto ArgExpr = Arg.get(); 3259 Expr::EvalResult ArgResult; 3260 3261 if (!ArgExpr->EvaluateAsInt(ArgResult, Context)) 3262 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int) 3263 << ArgExpr->getType(); 3264 int ord = ArgResult.Val.getInt().getZExtValue(); 3265 3266 // Check valididty of memory ordering as per C11 / C++11's memody model. 3267 switch (static_cast<llvm::AtomicOrderingCABI>(ord)) { 3268 case llvm::AtomicOrderingCABI::acquire: 3269 case llvm::AtomicOrderingCABI::release: 3270 case llvm::AtomicOrderingCABI::acq_rel: 3271 case llvm::AtomicOrderingCABI::seq_cst: 3272 break; 3273 default: { 3274 return Diag(ArgExpr->getBeginLoc(), 3275 diag::warn_atomic_op_has_invalid_memory_order) 3276 << ArgExpr->getSourceRange(); 3277 } 3278 } 3279 3280 Arg = TheCall->getArg(ScopeIndex); 3281 ArgExpr = Arg.get(); 3282 Expr::EvalResult ArgResult1; 3283 // Check that sync scope is a constant literal 3284 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Expr::EvaluateForCodeGen, 3285 Context)) 3286 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal) 3287 << ArgExpr->getType(); 3288 3289 return false; 3290 } 3291 3292 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 3293 CallExpr *TheCall) { 3294 if (BuiltinID == SystemZ::BI__builtin_tabort) { 3295 Expr *Arg = TheCall->getArg(0); 3296 if (Optional<llvm::APSInt> AbortCode = Arg->getIntegerConstantExpr(Context)) 3297 if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256) 3298 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 3299 << Arg->getSourceRange(); 3300 } 3301 3302 // For intrinsics which take an immediate value as part of the instruction, 3303 // range check them here. 3304 unsigned i = 0, l = 0, u = 0; 3305 switch (BuiltinID) { 3306 default: return false; 3307 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 3308 case SystemZ::BI__builtin_s390_verimb: 3309 case SystemZ::BI__builtin_s390_verimh: 3310 case SystemZ::BI__builtin_s390_verimf: 3311 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 3312 case SystemZ::BI__builtin_s390_vfaeb: 3313 case SystemZ::BI__builtin_s390_vfaeh: 3314 case SystemZ::BI__builtin_s390_vfaef: 3315 case SystemZ::BI__builtin_s390_vfaebs: 3316 case SystemZ::BI__builtin_s390_vfaehs: 3317 case SystemZ::BI__builtin_s390_vfaefs: 3318 case SystemZ::BI__builtin_s390_vfaezb: 3319 case SystemZ::BI__builtin_s390_vfaezh: 3320 case SystemZ::BI__builtin_s390_vfaezf: 3321 case SystemZ::BI__builtin_s390_vfaezbs: 3322 case SystemZ::BI__builtin_s390_vfaezhs: 3323 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 3324 case SystemZ::BI__builtin_s390_vfisb: 3325 case SystemZ::BI__builtin_s390_vfidb: 3326 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 3327 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3328 case SystemZ::BI__builtin_s390_vftcisb: 3329 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 3330 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 3331 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 3332 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 3333 case SystemZ::BI__builtin_s390_vstrcb: 3334 case SystemZ::BI__builtin_s390_vstrch: 3335 case SystemZ::BI__builtin_s390_vstrcf: 3336 case SystemZ::BI__builtin_s390_vstrczb: 3337 case SystemZ::BI__builtin_s390_vstrczh: 3338 case SystemZ::BI__builtin_s390_vstrczf: 3339 case SystemZ::BI__builtin_s390_vstrcbs: 3340 case SystemZ::BI__builtin_s390_vstrchs: 3341 case SystemZ::BI__builtin_s390_vstrcfs: 3342 case SystemZ::BI__builtin_s390_vstrczbs: 3343 case SystemZ::BI__builtin_s390_vstrczhs: 3344 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 3345 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 3346 case SystemZ::BI__builtin_s390_vfminsb: 3347 case SystemZ::BI__builtin_s390_vfmaxsb: 3348 case SystemZ::BI__builtin_s390_vfmindb: 3349 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 3350 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break; 3351 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break; 3352 } 3353 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3354 } 3355 3356 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 3357 /// This checks that the target supports __builtin_cpu_supports and 3358 /// that the string argument is constant and valid. 3359 static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI, 3360 CallExpr *TheCall) { 3361 Expr *Arg = TheCall->getArg(0); 3362 3363 // Check if the argument is a string literal. 3364 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3365 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3366 << Arg->getSourceRange(); 3367 3368 // Check the contents of the string. 3369 StringRef Feature = 3370 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3371 if (!TI.validateCpuSupports(Feature)) 3372 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 3373 << Arg->getSourceRange(); 3374 return false; 3375 } 3376 3377 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 3378 /// This checks that the target supports __builtin_cpu_is and 3379 /// that the string argument is constant and valid. 3380 static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) { 3381 Expr *Arg = TheCall->getArg(0); 3382 3383 // Check if the argument is a string literal. 3384 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3385 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3386 << Arg->getSourceRange(); 3387 3388 // Check the contents of the string. 3389 StringRef Feature = 3390 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3391 if (!TI.validateCpuIs(Feature)) 3392 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 3393 << Arg->getSourceRange(); 3394 return false; 3395 } 3396 3397 // Check if the rounding mode is legal. 3398 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 3399 // Indicates if this instruction has rounding control or just SAE. 3400 bool HasRC = false; 3401 3402 unsigned ArgNum = 0; 3403 switch (BuiltinID) { 3404 default: 3405 return false; 3406 case X86::BI__builtin_ia32_vcvttsd2si32: 3407 case X86::BI__builtin_ia32_vcvttsd2si64: 3408 case X86::BI__builtin_ia32_vcvttsd2usi32: 3409 case X86::BI__builtin_ia32_vcvttsd2usi64: 3410 case X86::BI__builtin_ia32_vcvttss2si32: 3411 case X86::BI__builtin_ia32_vcvttss2si64: 3412 case X86::BI__builtin_ia32_vcvttss2usi32: 3413 case X86::BI__builtin_ia32_vcvttss2usi64: 3414 ArgNum = 1; 3415 break; 3416 case X86::BI__builtin_ia32_maxpd512: 3417 case X86::BI__builtin_ia32_maxps512: 3418 case X86::BI__builtin_ia32_minpd512: 3419 case X86::BI__builtin_ia32_minps512: 3420 ArgNum = 2; 3421 break; 3422 case X86::BI__builtin_ia32_cvtps2pd512_mask: 3423 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 3424 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 3425 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 3426 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 3427 case X86::BI__builtin_ia32_cvttps2dq512_mask: 3428 case X86::BI__builtin_ia32_cvttps2qq512_mask: 3429 case X86::BI__builtin_ia32_cvttps2udq512_mask: 3430 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 3431 case X86::BI__builtin_ia32_exp2pd_mask: 3432 case X86::BI__builtin_ia32_exp2ps_mask: 3433 case X86::BI__builtin_ia32_getexppd512_mask: 3434 case X86::BI__builtin_ia32_getexpps512_mask: 3435 case X86::BI__builtin_ia32_rcp28pd_mask: 3436 case X86::BI__builtin_ia32_rcp28ps_mask: 3437 case X86::BI__builtin_ia32_rsqrt28pd_mask: 3438 case X86::BI__builtin_ia32_rsqrt28ps_mask: 3439 case X86::BI__builtin_ia32_vcomisd: 3440 case X86::BI__builtin_ia32_vcomiss: 3441 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 3442 ArgNum = 3; 3443 break; 3444 case X86::BI__builtin_ia32_cmppd512_mask: 3445 case X86::BI__builtin_ia32_cmpps512_mask: 3446 case X86::BI__builtin_ia32_cmpsd_mask: 3447 case X86::BI__builtin_ia32_cmpss_mask: 3448 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 3449 case X86::BI__builtin_ia32_getexpsd128_round_mask: 3450 case X86::BI__builtin_ia32_getexpss128_round_mask: 3451 case X86::BI__builtin_ia32_getmantpd512_mask: 3452 case X86::BI__builtin_ia32_getmantps512_mask: 3453 case X86::BI__builtin_ia32_maxsd_round_mask: 3454 case X86::BI__builtin_ia32_maxss_round_mask: 3455 case X86::BI__builtin_ia32_minsd_round_mask: 3456 case X86::BI__builtin_ia32_minss_round_mask: 3457 case X86::BI__builtin_ia32_rcp28sd_round_mask: 3458 case X86::BI__builtin_ia32_rcp28ss_round_mask: 3459 case X86::BI__builtin_ia32_reducepd512_mask: 3460 case X86::BI__builtin_ia32_reduceps512_mask: 3461 case X86::BI__builtin_ia32_rndscalepd_mask: 3462 case X86::BI__builtin_ia32_rndscaleps_mask: 3463 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 3464 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 3465 ArgNum = 4; 3466 break; 3467 case X86::BI__builtin_ia32_fixupimmpd512_mask: 3468 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 3469 case X86::BI__builtin_ia32_fixupimmps512_mask: 3470 case X86::BI__builtin_ia32_fixupimmps512_maskz: 3471 case X86::BI__builtin_ia32_fixupimmsd_mask: 3472 case X86::BI__builtin_ia32_fixupimmsd_maskz: 3473 case X86::BI__builtin_ia32_fixupimmss_mask: 3474 case X86::BI__builtin_ia32_fixupimmss_maskz: 3475 case X86::BI__builtin_ia32_getmantsd_round_mask: 3476 case X86::BI__builtin_ia32_getmantss_round_mask: 3477 case X86::BI__builtin_ia32_rangepd512_mask: 3478 case X86::BI__builtin_ia32_rangeps512_mask: 3479 case X86::BI__builtin_ia32_rangesd128_round_mask: 3480 case X86::BI__builtin_ia32_rangess128_round_mask: 3481 case X86::BI__builtin_ia32_reducesd_mask: 3482 case X86::BI__builtin_ia32_reducess_mask: 3483 case X86::BI__builtin_ia32_rndscalesd_round_mask: 3484 case X86::BI__builtin_ia32_rndscaless_round_mask: 3485 ArgNum = 5; 3486 break; 3487 case X86::BI__builtin_ia32_vcvtsd2si64: 3488 case X86::BI__builtin_ia32_vcvtsd2si32: 3489 case X86::BI__builtin_ia32_vcvtsd2usi32: 3490 case X86::BI__builtin_ia32_vcvtsd2usi64: 3491 case X86::BI__builtin_ia32_vcvtss2si32: 3492 case X86::BI__builtin_ia32_vcvtss2si64: 3493 case X86::BI__builtin_ia32_vcvtss2usi32: 3494 case X86::BI__builtin_ia32_vcvtss2usi64: 3495 case X86::BI__builtin_ia32_sqrtpd512: 3496 case X86::BI__builtin_ia32_sqrtps512: 3497 ArgNum = 1; 3498 HasRC = true; 3499 break; 3500 case X86::BI__builtin_ia32_addpd512: 3501 case X86::BI__builtin_ia32_addps512: 3502 case X86::BI__builtin_ia32_divpd512: 3503 case X86::BI__builtin_ia32_divps512: 3504 case X86::BI__builtin_ia32_mulpd512: 3505 case X86::BI__builtin_ia32_mulps512: 3506 case X86::BI__builtin_ia32_subpd512: 3507 case X86::BI__builtin_ia32_subps512: 3508 case X86::BI__builtin_ia32_cvtsi2sd64: 3509 case X86::BI__builtin_ia32_cvtsi2ss32: 3510 case X86::BI__builtin_ia32_cvtsi2ss64: 3511 case X86::BI__builtin_ia32_cvtusi2sd64: 3512 case X86::BI__builtin_ia32_cvtusi2ss32: 3513 case X86::BI__builtin_ia32_cvtusi2ss64: 3514 ArgNum = 2; 3515 HasRC = true; 3516 break; 3517 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 3518 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 3519 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 3520 case X86::BI__builtin_ia32_cvtpd2dq512_mask: 3521 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 3522 case X86::BI__builtin_ia32_cvtpd2udq512_mask: 3523 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 3524 case X86::BI__builtin_ia32_cvtps2dq512_mask: 3525 case X86::BI__builtin_ia32_cvtps2qq512_mask: 3526 case X86::BI__builtin_ia32_cvtps2udq512_mask: 3527 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 3528 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 3529 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 3530 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 3531 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 3532 ArgNum = 3; 3533 HasRC = true; 3534 break; 3535 case X86::BI__builtin_ia32_addss_round_mask: 3536 case X86::BI__builtin_ia32_addsd_round_mask: 3537 case X86::BI__builtin_ia32_divss_round_mask: 3538 case X86::BI__builtin_ia32_divsd_round_mask: 3539 case X86::BI__builtin_ia32_mulss_round_mask: 3540 case X86::BI__builtin_ia32_mulsd_round_mask: 3541 case X86::BI__builtin_ia32_subss_round_mask: 3542 case X86::BI__builtin_ia32_subsd_round_mask: 3543 case X86::BI__builtin_ia32_scalefpd512_mask: 3544 case X86::BI__builtin_ia32_scalefps512_mask: 3545 case X86::BI__builtin_ia32_scalefsd_round_mask: 3546 case X86::BI__builtin_ia32_scalefss_round_mask: 3547 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 3548 case X86::BI__builtin_ia32_sqrtsd_round_mask: 3549 case X86::BI__builtin_ia32_sqrtss_round_mask: 3550 case X86::BI__builtin_ia32_vfmaddsd3_mask: 3551 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 3552 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 3553 case X86::BI__builtin_ia32_vfmaddss3_mask: 3554 case X86::BI__builtin_ia32_vfmaddss3_maskz: 3555 case X86::BI__builtin_ia32_vfmaddss3_mask3: 3556 case X86::BI__builtin_ia32_vfmaddpd512_mask: 3557 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 3558 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 3559 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 3560 case X86::BI__builtin_ia32_vfmaddps512_mask: 3561 case X86::BI__builtin_ia32_vfmaddps512_maskz: 3562 case X86::BI__builtin_ia32_vfmaddps512_mask3: 3563 case X86::BI__builtin_ia32_vfmsubps512_mask3: 3564 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 3565 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 3566 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 3567 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 3568 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 3569 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 3570 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 3571 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 3572 ArgNum = 4; 3573 HasRC = true; 3574 break; 3575 } 3576 3577 llvm::APSInt Result; 3578 3579 // We can't check the value of a dependent argument. 3580 Expr *Arg = TheCall->getArg(ArgNum); 3581 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3582 return false; 3583 3584 // Check constant-ness first. 3585 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3586 return true; 3587 3588 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 3589 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 3590 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding 3591 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together. 3592 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 3593 Result == 8/*ROUND_NO_EXC*/ || 3594 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) || 3595 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 3596 return false; 3597 3598 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 3599 << Arg->getSourceRange(); 3600 } 3601 3602 // Check if the gather/scatter scale is legal. 3603 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 3604 CallExpr *TheCall) { 3605 unsigned ArgNum = 0; 3606 switch (BuiltinID) { 3607 default: 3608 return false; 3609 case X86::BI__builtin_ia32_gatherpfdpd: 3610 case X86::BI__builtin_ia32_gatherpfdps: 3611 case X86::BI__builtin_ia32_gatherpfqpd: 3612 case X86::BI__builtin_ia32_gatherpfqps: 3613 case X86::BI__builtin_ia32_scatterpfdpd: 3614 case X86::BI__builtin_ia32_scatterpfdps: 3615 case X86::BI__builtin_ia32_scatterpfqpd: 3616 case X86::BI__builtin_ia32_scatterpfqps: 3617 ArgNum = 3; 3618 break; 3619 case X86::BI__builtin_ia32_gatherd_pd: 3620 case X86::BI__builtin_ia32_gatherd_pd256: 3621 case X86::BI__builtin_ia32_gatherq_pd: 3622 case X86::BI__builtin_ia32_gatherq_pd256: 3623 case X86::BI__builtin_ia32_gatherd_ps: 3624 case X86::BI__builtin_ia32_gatherd_ps256: 3625 case X86::BI__builtin_ia32_gatherq_ps: 3626 case X86::BI__builtin_ia32_gatherq_ps256: 3627 case X86::BI__builtin_ia32_gatherd_q: 3628 case X86::BI__builtin_ia32_gatherd_q256: 3629 case X86::BI__builtin_ia32_gatherq_q: 3630 case X86::BI__builtin_ia32_gatherq_q256: 3631 case X86::BI__builtin_ia32_gatherd_d: 3632 case X86::BI__builtin_ia32_gatherd_d256: 3633 case X86::BI__builtin_ia32_gatherq_d: 3634 case X86::BI__builtin_ia32_gatherq_d256: 3635 case X86::BI__builtin_ia32_gather3div2df: 3636 case X86::BI__builtin_ia32_gather3div2di: 3637 case X86::BI__builtin_ia32_gather3div4df: 3638 case X86::BI__builtin_ia32_gather3div4di: 3639 case X86::BI__builtin_ia32_gather3div4sf: 3640 case X86::BI__builtin_ia32_gather3div4si: 3641 case X86::BI__builtin_ia32_gather3div8sf: 3642 case X86::BI__builtin_ia32_gather3div8si: 3643 case X86::BI__builtin_ia32_gather3siv2df: 3644 case X86::BI__builtin_ia32_gather3siv2di: 3645 case X86::BI__builtin_ia32_gather3siv4df: 3646 case X86::BI__builtin_ia32_gather3siv4di: 3647 case X86::BI__builtin_ia32_gather3siv4sf: 3648 case X86::BI__builtin_ia32_gather3siv4si: 3649 case X86::BI__builtin_ia32_gather3siv8sf: 3650 case X86::BI__builtin_ia32_gather3siv8si: 3651 case X86::BI__builtin_ia32_gathersiv8df: 3652 case X86::BI__builtin_ia32_gathersiv16sf: 3653 case X86::BI__builtin_ia32_gatherdiv8df: 3654 case X86::BI__builtin_ia32_gatherdiv16sf: 3655 case X86::BI__builtin_ia32_gathersiv8di: 3656 case X86::BI__builtin_ia32_gathersiv16si: 3657 case X86::BI__builtin_ia32_gatherdiv8di: 3658 case X86::BI__builtin_ia32_gatherdiv16si: 3659 case X86::BI__builtin_ia32_scatterdiv2df: 3660 case X86::BI__builtin_ia32_scatterdiv2di: 3661 case X86::BI__builtin_ia32_scatterdiv4df: 3662 case X86::BI__builtin_ia32_scatterdiv4di: 3663 case X86::BI__builtin_ia32_scatterdiv4sf: 3664 case X86::BI__builtin_ia32_scatterdiv4si: 3665 case X86::BI__builtin_ia32_scatterdiv8sf: 3666 case X86::BI__builtin_ia32_scatterdiv8si: 3667 case X86::BI__builtin_ia32_scattersiv2df: 3668 case X86::BI__builtin_ia32_scattersiv2di: 3669 case X86::BI__builtin_ia32_scattersiv4df: 3670 case X86::BI__builtin_ia32_scattersiv4di: 3671 case X86::BI__builtin_ia32_scattersiv4sf: 3672 case X86::BI__builtin_ia32_scattersiv4si: 3673 case X86::BI__builtin_ia32_scattersiv8sf: 3674 case X86::BI__builtin_ia32_scattersiv8si: 3675 case X86::BI__builtin_ia32_scattersiv8df: 3676 case X86::BI__builtin_ia32_scattersiv16sf: 3677 case X86::BI__builtin_ia32_scatterdiv8df: 3678 case X86::BI__builtin_ia32_scatterdiv16sf: 3679 case X86::BI__builtin_ia32_scattersiv8di: 3680 case X86::BI__builtin_ia32_scattersiv16si: 3681 case X86::BI__builtin_ia32_scatterdiv8di: 3682 case X86::BI__builtin_ia32_scatterdiv16si: 3683 ArgNum = 4; 3684 break; 3685 } 3686 3687 llvm::APSInt Result; 3688 3689 // We can't check the value of a dependent argument. 3690 Expr *Arg = TheCall->getArg(ArgNum); 3691 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3692 return false; 3693 3694 // Check constant-ness first. 3695 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3696 return true; 3697 3698 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 3699 return false; 3700 3701 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 3702 << Arg->getSourceRange(); 3703 } 3704 3705 enum { TileRegLow = 0, TileRegHigh = 7 }; 3706 3707 bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, 3708 ArrayRef<int> ArgNums) { 3709 for (int ArgNum : ArgNums) { 3710 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh)) 3711 return true; 3712 } 3713 return false; 3714 } 3715 3716 bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall, 3717 ArrayRef<int> ArgNums) { 3718 // Because the max number of tile register is TileRegHigh + 1, so here we use 3719 // each bit to represent the usage of them in bitset. 3720 std::bitset<TileRegHigh + 1> ArgValues; 3721 for (int ArgNum : ArgNums) { 3722 Expr *Arg = TheCall->getArg(ArgNum); 3723 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3724 continue; 3725 3726 llvm::APSInt Result; 3727 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3728 return true; 3729 int ArgExtValue = Result.getExtValue(); 3730 assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) && 3731 "Incorrect tile register num."); 3732 if (ArgValues.test(ArgExtValue)) 3733 return Diag(TheCall->getBeginLoc(), 3734 diag::err_x86_builtin_tile_arg_duplicate) 3735 << TheCall->getArg(ArgNum)->getSourceRange(); 3736 ArgValues.set(ArgExtValue); 3737 } 3738 return false; 3739 } 3740 3741 bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, 3742 ArrayRef<int> ArgNums) { 3743 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) || 3744 CheckX86BuiltinTileDuplicate(TheCall, ArgNums); 3745 } 3746 3747 bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) { 3748 switch (BuiltinID) { 3749 default: 3750 return false; 3751 case X86::BI__builtin_ia32_tileloadd64: 3752 case X86::BI__builtin_ia32_tileloaddt164: 3753 case X86::BI__builtin_ia32_tilestored64: 3754 case X86::BI__builtin_ia32_tilezero: 3755 return CheckX86BuiltinTileArgumentsRange(TheCall, 0); 3756 case X86::BI__builtin_ia32_tdpbssd: 3757 case X86::BI__builtin_ia32_tdpbsud: 3758 case X86::BI__builtin_ia32_tdpbusd: 3759 case X86::BI__builtin_ia32_tdpbuud: 3760 case X86::BI__builtin_ia32_tdpbf16ps: 3761 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2}); 3762 } 3763 } 3764 static bool isX86_32Builtin(unsigned BuiltinID) { 3765 // These builtins only work on x86-32 targets. 3766 switch (BuiltinID) { 3767 case X86::BI__builtin_ia32_readeflags_u32: 3768 case X86::BI__builtin_ia32_writeeflags_u32: 3769 return true; 3770 } 3771 3772 return false; 3773 } 3774 3775 bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3776 CallExpr *TheCall) { 3777 if (BuiltinID == X86::BI__builtin_cpu_supports) 3778 return SemaBuiltinCpuSupports(*this, TI, TheCall); 3779 3780 if (BuiltinID == X86::BI__builtin_cpu_is) 3781 return SemaBuiltinCpuIs(*this, TI, TheCall); 3782 3783 // Check for 32-bit only builtins on a 64-bit target. 3784 const llvm::Triple &TT = TI.getTriple(); 3785 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 3786 return Diag(TheCall->getCallee()->getBeginLoc(), 3787 diag::err_32_bit_builtin_64_bit_tgt); 3788 3789 // If the intrinsic has rounding or SAE make sure its valid. 3790 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 3791 return true; 3792 3793 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 3794 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 3795 return true; 3796 3797 // If the intrinsic has a tile arguments, make sure they are valid. 3798 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall)) 3799 return true; 3800 3801 // For intrinsics which take an immediate value as part of the instruction, 3802 // range check them here. 3803 int i = 0, l = 0, u = 0; 3804 switch (BuiltinID) { 3805 default: 3806 return false; 3807 case X86::BI__builtin_ia32_vec_ext_v2si: 3808 case X86::BI__builtin_ia32_vec_ext_v2di: 3809 case X86::BI__builtin_ia32_vextractf128_pd256: 3810 case X86::BI__builtin_ia32_vextractf128_ps256: 3811 case X86::BI__builtin_ia32_vextractf128_si256: 3812 case X86::BI__builtin_ia32_extract128i256: 3813 case X86::BI__builtin_ia32_extractf64x4_mask: 3814 case X86::BI__builtin_ia32_extracti64x4_mask: 3815 case X86::BI__builtin_ia32_extractf32x8_mask: 3816 case X86::BI__builtin_ia32_extracti32x8_mask: 3817 case X86::BI__builtin_ia32_extractf64x2_256_mask: 3818 case X86::BI__builtin_ia32_extracti64x2_256_mask: 3819 case X86::BI__builtin_ia32_extractf32x4_256_mask: 3820 case X86::BI__builtin_ia32_extracti32x4_256_mask: 3821 i = 1; l = 0; u = 1; 3822 break; 3823 case X86::BI__builtin_ia32_vec_set_v2di: 3824 case X86::BI__builtin_ia32_vinsertf128_pd256: 3825 case X86::BI__builtin_ia32_vinsertf128_ps256: 3826 case X86::BI__builtin_ia32_vinsertf128_si256: 3827 case X86::BI__builtin_ia32_insert128i256: 3828 case X86::BI__builtin_ia32_insertf32x8: 3829 case X86::BI__builtin_ia32_inserti32x8: 3830 case X86::BI__builtin_ia32_insertf64x4: 3831 case X86::BI__builtin_ia32_inserti64x4: 3832 case X86::BI__builtin_ia32_insertf64x2_256: 3833 case X86::BI__builtin_ia32_inserti64x2_256: 3834 case X86::BI__builtin_ia32_insertf32x4_256: 3835 case X86::BI__builtin_ia32_inserti32x4_256: 3836 i = 2; l = 0; u = 1; 3837 break; 3838 case X86::BI__builtin_ia32_vpermilpd: 3839 case X86::BI__builtin_ia32_vec_ext_v4hi: 3840 case X86::BI__builtin_ia32_vec_ext_v4si: 3841 case X86::BI__builtin_ia32_vec_ext_v4sf: 3842 case X86::BI__builtin_ia32_vec_ext_v4di: 3843 case X86::BI__builtin_ia32_extractf32x4_mask: 3844 case X86::BI__builtin_ia32_extracti32x4_mask: 3845 case X86::BI__builtin_ia32_extractf64x2_512_mask: 3846 case X86::BI__builtin_ia32_extracti64x2_512_mask: 3847 i = 1; l = 0; u = 3; 3848 break; 3849 case X86::BI_mm_prefetch: 3850 case X86::BI__builtin_ia32_vec_ext_v8hi: 3851 case X86::BI__builtin_ia32_vec_ext_v8si: 3852 i = 1; l = 0; u = 7; 3853 break; 3854 case X86::BI__builtin_ia32_sha1rnds4: 3855 case X86::BI__builtin_ia32_blendpd: 3856 case X86::BI__builtin_ia32_shufpd: 3857 case X86::BI__builtin_ia32_vec_set_v4hi: 3858 case X86::BI__builtin_ia32_vec_set_v4si: 3859 case X86::BI__builtin_ia32_vec_set_v4di: 3860 case X86::BI__builtin_ia32_shuf_f32x4_256: 3861 case X86::BI__builtin_ia32_shuf_f64x2_256: 3862 case X86::BI__builtin_ia32_shuf_i32x4_256: 3863 case X86::BI__builtin_ia32_shuf_i64x2_256: 3864 case X86::BI__builtin_ia32_insertf64x2_512: 3865 case X86::BI__builtin_ia32_inserti64x2_512: 3866 case X86::BI__builtin_ia32_insertf32x4: 3867 case X86::BI__builtin_ia32_inserti32x4: 3868 i = 2; l = 0; u = 3; 3869 break; 3870 case X86::BI__builtin_ia32_vpermil2pd: 3871 case X86::BI__builtin_ia32_vpermil2pd256: 3872 case X86::BI__builtin_ia32_vpermil2ps: 3873 case X86::BI__builtin_ia32_vpermil2ps256: 3874 i = 3; l = 0; u = 3; 3875 break; 3876 case X86::BI__builtin_ia32_cmpb128_mask: 3877 case X86::BI__builtin_ia32_cmpw128_mask: 3878 case X86::BI__builtin_ia32_cmpd128_mask: 3879 case X86::BI__builtin_ia32_cmpq128_mask: 3880 case X86::BI__builtin_ia32_cmpb256_mask: 3881 case X86::BI__builtin_ia32_cmpw256_mask: 3882 case X86::BI__builtin_ia32_cmpd256_mask: 3883 case X86::BI__builtin_ia32_cmpq256_mask: 3884 case X86::BI__builtin_ia32_cmpb512_mask: 3885 case X86::BI__builtin_ia32_cmpw512_mask: 3886 case X86::BI__builtin_ia32_cmpd512_mask: 3887 case X86::BI__builtin_ia32_cmpq512_mask: 3888 case X86::BI__builtin_ia32_ucmpb128_mask: 3889 case X86::BI__builtin_ia32_ucmpw128_mask: 3890 case X86::BI__builtin_ia32_ucmpd128_mask: 3891 case X86::BI__builtin_ia32_ucmpq128_mask: 3892 case X86::BI__builtin_ia32_ucmpb256_mask: 3893 case X86::BI__builtin_ia32_ucmpw256_mask: 3894 case X86::BI__builtin_ia32_ucmpd256_mask: 3895 case X86::BI__builtin_ia32_ucmpq256_mask: 3896 case X86::BI__builtin_ia32_ucmpb512_mask: 3897 case X86::BI__builtin_ia32_ucmpw512_mask: 3898 case X86::BI__builtin_ia32_ucmpd512_mask: 3899 case X86::BI__builtin_ia32_ucmpq512_mask: 3900 case X86::BI__builtin_ia32_vpcomub: 3901 case X86::BI__builtin_ia32_vpcomuw: 3902 case X86::BI__builtin_ia32_vpcomud: 3903 case X86::BI__builtin_ia32_vpcomuq: 3904 case X86::BI__builtin_ia32_vpcomb: 3905 case X86::BI__builtin_ia32_vpcomw: 3906 case X86::BI__builtin_ia32_vpcomd: 3907 case X86::BI__builtin_ia32_vpcomq: 3908 case X86::BI__builtin_ia32_vec_set_v8hi: 3909 case X86::BI__builtin_ia32_vec_set_v8si: 3910 i = 2; l = 0; u = 7; 3911 break; 3912 case X86::BI__builtin_ia32_vpermilpd256: 3913 case X86::BI__builtin_ia32_roundps: 3914 case X86::BI__builtin_ia32_roundpd: 3915 case X86::BI__builtin_ia32_roundps256: 3916 case X86::BI__builtin_ia32_roundpd256: 3917 case X86::BI__builtin_ia32_getmantpd128_mask: 3918 case X86::BI__builtin_ia32_getmantpd256_mask: 3919 case X86::BI__builtin_ia32_getmantps128_mask: 3920 case X86::BI__builtin_ia32_getmantps256_mask: 3921 case X86::BI__builtin_ia32_getmantpd512_mask: 3922 case X86::BI__builtin_ia32_getmantps512_mask: 3923 case X86::BI__builtin_ia32_vec_ext_v16qi: 3924 case X86::BI__builtin_ia32_vec_ext_v16hi: 3925 i = 1; l = 0; u = 15; 3926 break; 3927 case X86::BI__builtin_ia32_pblendd128: 3928 case X86::BI__builtin_ia32_blendps: 3929 case X86::BI__builtin_ia32_blendpd256: 3930 case X86::BI__builtin_ia32_shufpd256: 3931 case X86::BI__builtin_ia32_roundss: 3932 case X86::BI__builtin_ia32_roundsd: 3933 case X86::BI__builtin_ia32_rangepd128_mask: 3934 case X86::BI__builtin_ia32_rangepd256_mask: 3935 case X86::BI__builtin_ia32_rangepd512_mask: 3936 case X86::BI__builtin_ia32_rangeps128_mask: 3937 case X86::BI__builtin_ia32_rangeps256_mask: 3938 case X86::BI__builtin_ia32_rangeps512_mask: 3939 case X86::BI__builtin_ia32_getmantsd_round_mask: 3940 case X86::BI__builtin_ia32_getmantss_round_mask: 3941 case X86::BI__builtin_ia32_vec_set_v16qi: 3942 case X86::BI__builtin_ia32_vec_set_v16hi: 3943 i = 2; l = 0; u = 15; 3944 break; 3945 case X86::BI__builtin_ia32_vec_ext_v32qi: 3946 i = 1; l = 0; u = 31; 3947 break; 3948 case X86::BI__builtin_ia32_cmpps: 3949 case X86::BI__builtin_ia32_cmpss: 3950 case X86::BI__builtin_ia32_cmppd: 3951 case X86::BI__builtin_ia32_cmpsd: 3952 case X86::BI__builtin_ia32_cmpps256: 3953 case X86::BI__builtin_ia32_cmppd256: 3954 case X86::BI__builtin_ia32_cmpps128_mask: 3955 case X86::BI__builtin_ia32_cmppd128_mask: 3956 case X86::BI__builtin_ia32_cmpps256_mask: 3957 case X86::BI__builtin_ia32_cmppd256_mask: 3958 case X86::BI__builtin_ia32_cmpps512_mask: 3959 case X86::BI__builtin_ia32_cmppd512_mask: 3960 case X86::BI__builtin_ia32_cmpsd_mask: 3961 case X86::BI__builtin_ia32_cmpss_mask: 3962 case X86::BI__builtin_ia32_vec_set_v32qi: 3963 i = 2; l = 0; u = 31; 3964 break; 3965 case X86::BI__builtin_ia32_permdf256: 3966 case X86::BI__builtin_ia32_permdi256: 3967 case X86::BI__builtin_ia32_permdf512: 3968 case X86::BI__builtin_ia32_permdi512: 3969 case X86::BI__builtin_ia32_vpermilps: 3970 case X86::BI__builtin_ia32_vpermilps256: 3971 case X86::BI__builtin_ia32_vpermilpd512: 3972 case X86::BI__builtin_ia32_vpermilps512: 3973 case X86::BI__builtin_ia32_pshufd: 3974 case X86::BI__builtin_ia32_pshufd256: 3975 case X86::BI__builtin_ia32_pshufd512: 3976 case X86::BI__builtin_ia32_pshufhw: 3977 case X86::BI__builtin_ia32_pshufhw256: 3978 case X86::BI__builtin_ia32_pshufhw512: 3979 case X86::BI__builtin_ia32_pshuflw: 3980 case X86::BI__builtin_ia32_pshuflw256: 3981 case X86::BI__builtin_ia32_pshuflw512: 3982 case X86::BI__builtin_ia32_vcvtps2ph: 3983 case X86::BI__builtin_ia32_vcvtps2ph_mask: 3984 case X86::BI__builtin_ia32_vcvtps2ph256: 3985 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 3986 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 3987 case X86::BI__builtin_ia32_rndscaleps_128_mask: 3988 case X86::BI__builtin_ia32_rndscalepd_128_mask: 3989 case X86::BI__builtin_ia32_rndscaleps_256_mask: 3990 case X86::BI__builtin_ia32_rndscalepd_256_mask: 3991 case X86::BI__builtin_ia32_rndscaleps_mask: 3992 case X86::BI__builtin_ia32_rndscalepd_mask: 3993 case X86::BI__builtin_ia32_reducepd128_mask: 3994 case X86::BI__builtin_ia32_reducepd256_mask: 3995 case X86::BI__builtin_ia32_reducepd512_mask: 3996 case X86::BI__builtin_ia32_reduceps128_mask: 3997 case X86::BI__builtin_ia32_reduceps256_mask: 3998 case X86::BI__builtin_ia32_reduceps512_mask: 3999 case X86::BI__builtin_ia32_prold512: 4000 case X86::BI__builtin_ia32_prolq512: 4001 case X86::BI__builtin_ia32_prold128: 4002 case X86::BI__builtin_ia32_prold256: 4003 case X86::BI__builtin_ia32_prolq128: 4004 case X86::BI__builtin_ia32_prolq256: 4005 case X86::BI__builtin_ia32_prord512: 4006 case X86::BI__builtin_ia32_prorq512: 4007 case X86::BI__builtin_ia32_prord128: 4008 case X86::BI__builtin_ia32_prord256: 4009 case X86::BI__builtin_ia32_prorq128: 4010 case X86::BI__builtin_ia32_prorq256: 4011 case X86::BI__builtin_ia32_fpclasspd128_mask: 4012 case X86::BI__builtin_ia32_fpclasspd256_mask: 4013 case X86::BI__builtin_ia32_fpclassps128_mask: 4014 case X86::BI__builtin_ia32_fpclassps256_mask: 4015 case X86::BI__builtin_ia32_fpclassps512_mask: 4016 case X86::BI__builtin_ia32_fpclasspd512_mask: 4017 case X86::BI__builtin_ia32_fpclasssd_mask: 4018 case X86::BI__builtin_ia32_fpclassss_mask: 4019 case X86::BI__builtin_ia32_pslldqi128_byteshift: 4020 case X86::BI__builtin_ia32_pslldqi256_byteshift: 4021 case X86::BI__builtin_ia32_pslldqi512_byteshift: 4022 case X86::BI__builtin_ia32_psrldqi128_byteshift: 4023 case X86::BI__builtin_ia32_psrldqi256_byteshift: 4024 case X86::BI__builtin_ia32_psrldqi512_byteshift: 4025 case X86::BI__builtin_ia32_kshiftliqi: 4026 case X86::BI__builtin_ia32_kshiftlihi: 4027 case X86::BI__builtin_ia32_kshiftlisi: 4028 case X86::BI__builtin_ia32_kshiftlidi: 4029 case X86::BI__builtin_ia32_kshiftriqi: 4030 case X86::BI__builtin_ia32_kshiftrihi: 4031 case X86::BI__builtin_ia32_kshiftrisi: 4032 case X86::BI__builtin_ia32_kshiftridi: 4033 i = 1; l = 0; u = 255; 4034 break; 4035 case X86::BI__builtin_ia32_vperm2f128_pd256: 4036 case X86::BI__builtin_ia32_vperm2f128_ps256: 4037 case X86::BI__builtin_ia32_vperm2f128_si256: 4038 case X86::BI__builtin_ia32_permti256: 4039 case X86::BI__builtin_ia32_pblendw128: 4040 case X86::BI__builtin_ia32_pblendw256: 4041 case X86::BI__builtin_ia32_blendps256: 4042 case X86::BI__builtin_ia32_pblendd256: 4043 case X86::BI__builtin_ia32_palignr128: 4044 case X86::BI__builtin_ia32_palignr256: 4045 case X86::BI__builtin_ia32_palignr512: 4046 case X86::BI__builtin_ia32_alignq512: 4047 case X86::BI__builtin_ia32_alignd512: 4048 case X86::BI__builtin_ia32_alignd128: 4049 case X86::BI__builtin_ia32_alignd256: 4050 case X86::BI__builtin_ia32_alignq128: 4051 case X86::BI__builtin_ia32_alignq256: 4052 case X86::BI__builtin_ia32_vcomisd: 4053 case X86::BI__builtin_ia32_vcomiss: 4054 case X86::BI__builtin_ia32_shuf_f32x4: 4055 case X86::BI__builtin_ia32_shuf_f64x2: 4056 case X86::BI__builtin_ia32_shuf_i32x4: 4057 case X86::BI__builtin_ia32_shuf_i64x2: 4058 case X86::BI__builtin_ia32_shufpd512: 4059 case X86::BI__builtin_ia32_shufps: 4060 case X86::BI__builtin_ia32_shufps256: 4061 case X86::BI__builtin_ia32_shufps512: 4062 case X86::BI__builtin_ia32_dbpsadbw128: 4063 case X86::BI__builtin_ia32_dbpsadbw256: 4064 case X86::BI__builtin_ia32_dbpsadbw512: 4065 case X86::BI__builtin_ia32_vpshldd128: 4066 case X86::BI__builtin_ia32_vpshldd256: 4067 case X86::BI__builtin_ia32_vpshldd512: 4068 case X86::BI__builtin_ia32_vpshldq128: 4069 case X86::BI__builtin_ia32_vpshldq256: 4070 case X86::BI__builtin_ia32_vpshldq512: 4071 case X86::BI__builtin_ia32_vpshldw128: 4072 case X86::BI__builtin_ia32_vpshldw256: 4073 case X86::BI__builtin_ia32_vpshldw512: 4074 case X86::BI__builtin_ia32_vpshrdd128: 4075 case X86::BI__builtin_ia32_vpshrdd256: 4076 case X86::BI__builtin_ia32_vpshrdd512: 4077 case X86::BI__builtin_ia32_vpshrdq128: 4078 case X86::BI__builtin_ia32_vpshrdq256: 4079 case X86::BI__builtin_ia32_vpshrdq512: 4080 case X86::BI__builtin_ia32_vpshrdw128: 4081 case X86::BI__builtin_ia32_vpshrdw256: 4082 case X86::BI__builtin_ia32_vpshrdw512: 4083 i = 2; l = 0; u = 255; 4084 break; 4085 case X86::BI__builtin_ia32_fixupimmpd512_mask: 4086 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 4087 case X86::BI__builtin_ia32_fixupimmps512_mask: 4088 case X86::BI__builtin_ia32_fixupimmps512_maskz: 4089 case X86::BI__builtin_ia32_fixupimmsd_mask: 4090 case X86::BI__builtin_ia32_fixupimmsd_maskz: 4091 case X86::BI__builtin_ia32_fixupimmss_mask: 4092 case X86::BI__builtin_ia32_fixupimmss_maskz: 4093 case X86::BI__builtin_ia32_fixupimmpd128_mask: 4094 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 4095 case X86::BI__builtin_ia32_fixupimmpd256_mask: 4096 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 4097 case X86::BI__builtin_ia32_fixupimmps128_mask: 4098 case X86::BI__builtin_ia32_fixupimmps128_maskz: 4099 case X86::BI__builtin_ia32_fixupimmps256_mask: 4100 case X86::BI__builtin_ia32_fixupimmps256_maskz: 4101 case X86::BI__builtin_ia32_pternlogd512_mask: 4102 case X86::BI__builtin_ia32_pternlogd512_maskz: 4103 case X86::BI__builtin_ia32_pternlogq512_mask: 4104 case X86::BI__builtin_ia32_pternlogq512_maskz: 4105 case X86::BI__builtin_ia32_pternlogd128_mask: 4106 case X86::BI__builtin_ia32_pternlogd128_maskz: 4107 case X86::BI__builtin_ia32_pternlogd256_mask: 4108 case X86::BI__builtin_ia32_pternlogd256_maskz: 4109 case X86::BI__builtin_ia32_pternlogq128_mask: 4110 case X86::BI__builtin_ia32_pternlogq128_maskz: 4111 case X86::BI__builtin_ia32_pternlogq256_mask: 4112 case X86::BI__builtin_ia32_pternlogq256_maskz: 4113 i = 3; l = 0; u = 255; 4114 break; 4115 case X86::BI__builtin_ia32_gatherpfdpd: 4116 case X86::BI__builtin_ia32_gatherpfdps: 4117 case X86::BI__builtin_ia32_gatherpfqpd: 4118 case X86::BI__builtin_ia32_gatherpfqps: 4119 case X86::BI__builtin_ia32_scatterpfdpd: 4120 case X86::BI__builtin_ia32_scatterpfdps: 4121 case X86::BI__builtin_ia32_scatterpfqpd: 4122 case X86::BI__builtin_ia32_scatterpfqps: 4123 i = 4; l = 2; u = 3; 4124 break; 4125 case X86::BI__builtin_ia32_reducesd_mask: 4126 case X86::BI__builtin_ia32_reducess_mask: 4127 case X86::BI__builtin_ia32_rndscalesd_round_mask: 4128 case X86::BI__builtin_ia32_rndscaless_round_mask: 4129 i = 4; l = 0; u = 255; 4130 break; 4131 } 4132 4133 // Note that we don't force a hard error on the range check here, allowing 4134 // template-generated or macro-generated dead code to potentially have out-of- 4135 // range values. These need to code generate, but don't need to necessarily 4136 // make any sense. We use a warning that defaults to an error. 4137 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 4138 } 4139 4140 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 4141 /// parameter with the FormatAttr's correct format_idx and firstDataArg. 4142 /// Returns true when the format fits the function and the FormatStringInfo has 4143 /// been populated. 4144 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 4145 FormatStringInfo *FSI) { 4146 FSI->HasVAListArg = Format->getFirstArg() == 0; 4147 FSI->FormatIdx = Format->getFormatIdx() - 1; 4148 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1; 4149 4150 // The way the format attribute works in GCC, the implicit this argument 4151 // of member functions is counted. However, it doesn't appear in our own 4152 // lists, so decrement format_idx in that case. 4153 if (IsCXXMember) { 4154 if(FSI->FormatIdx == 0) 4155 return false; 4156 --FSI->FormatIdx; 4157 if (FSI->FirstDataArg != 0) 4158 --FSI->FirstDataArg; 4159 } 4160 return true; 4161 } 4162 4163 /// Checks if a the given expression evaluates to null. 4164 /// 4165 /// Returns true if the value evaluates to null. 4166 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 4167 // If the expression has non-null type, it doesn't evaluate to null. 4168 if (auto nullability 4169 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { 4170 if (*nullability == NullabilityKind::NonNull) 4171 return false; 4172 } 4173 4174 // As a special case, transparent unions initialized with zero are 4175 // considered null for the purposes of the nonnull attribute. 4176 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 4177 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 4178 if (const CompoundLiteralExpr *CLE = 4179 dyn_cast<CompoundLiteralExpr>(Expr)) 4180 if (const InitListExpr *ILE = 4181 dyn_cast<InitListExpr>(CLE->getInitializer())) 4182 Expr = ILE->getInit(0); 4183 } 4184 4185 bool Result; 4186 return (!Expr->isValueDependent() && 4187 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 4188 !Result); 4189 } 4190 4191 static void CheckNonNullArgument(Sema &S, 4192 const Expr *ArgExpr, 4193 SourceLocation CallSiteLoc) { 4194 if (CheckNonNullExpr(S, ArgExpr)) 4195 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 4196 S.PDiag(diag::warn_null_arg) 4197 << ArgExpr->getSourceRange()); 4198 } 4199 4200 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 4201 FormatStringInfo FSI; 4202 if ((GetFormatStringType(Format) == FST_NSString) && 4203 getFormatStringInfo(Format, false, &FSI)) { 4204 Idx = FSI.FormatIdx; 4205 return true; 4206 } 4207 return false; 4208 } 4209 4210 /// Diagnose use of %s directive in an NSString which is being passed 4211 /// as formatting string to formatting method. 4212 static void 4213 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 4214 const NamedDecl *FDecl, 4215 Expr **Args, 4216 unsigned NumArgs) { 4217 unsigned Idx = 0; 4218 bool Format = false; 4219 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 4220 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 4221 Idx = 2; 4222 Format = true; 4223 } 4224 else 4225 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 4226 if (S.GetFormatNSStringIdx(I, Idx)) { 4227 Format = true; 4228 break; 4229 } 4230 } 4231 if (!Format || NumArgs <= Idx) 4232 return; 4233 const Expr *FormatExpr = Args[Idx]; 4234 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 4235 FormatExpr = CSCE->getSubExpr(); 4236 const StringLiteral *FormatString; 4237 if (const ObjCStringLiteral *OSL = 4238 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 4239 FormatString = OSL->getString(); 4240 else 4241 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 4242 if (!FormatString) 4243 return; 4244 if (S.FormatStringHasSArg(FormatString)) { 4245 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 4246 << "%s" << 1 << 1; 4247 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 4248 << FDecl->getDeclName(); 4249 } 4250 } 4251 4252 /// Determine whether the given type has a non-null nullability annotation. 4253 static bool isNonNullType(ASTContext &ctx, QualType type) { 4254 if (auto nullability = type->getNullability(ctx)) 4255 return *nullability == NullabilityKind::NonNull; 4256 4257 return false; 4258 } 4259 4260 static void CheckNonNullArguments(Sema &S, 4261 const NamedDecl *FDecl, 4262 const FunctionProtoType *Proto, 4263 ArrayRef<const Expr *> Args, 4264 SourceLocation CallSiteLoc) { 4265 assert((FDecl || Proto) && "Need a function declaration or prototype"); 4266 4267 // Already checked by by constant evaluator. 4268 if (S.isConstantEvaluated()) 4269 return; 4270 // Check the attributes attached to the method/function itself. 4271 llvm::SmallBitVector NonNullArgs; 4272 if (FDecl) { 4273 // Handle the nonnull attribute on the function/method declaration itself. 4274 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 4275 if (!NonNull->args_size()) { 4276 // Easy case: all pointer arguments are nonnull. 4277 for (const auto *Arg : Args) 4278 if (S.isValidPointerAttrType(Arg->getType())) 4279 CheckNonNullArgument(S, Arg, CallSiteLoc); 4280 return; 4281 } 4282 4283 for (const ParamIdx &Idx : NonNull->args()) { 4284 unsigned IdxAST = Idx.getASTIndex(); 4285 if (IdxAST >= Args.size()) 4286 continue; 4287 if (NonNullArgs.empty()) 4288 NonNullArgs.resize(Args.size()); 4289 NonNullArgs.set(IdxAST); 4290 } 4291 } 4292 } 4293 4294 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 4295 // Handle the nonnull attribute on the parameters of the 4296 // function/method. 4297 ArrayRef<ParmVarDecl*> parms; 4298 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 4299 parms = FD->parameters(); 4300 else 4301 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 4302 4303 unsigned ParamIndex = 0; 4304 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 4305 I != E; ++I, ++ParamIndex) { 4306 const ParmVarDecl *PVD = *I; 4307 if (PVD->hasAttr<NonNullAttr>() || 4308 isNonNullType(S.Context, PVD->getType())) { 4309 if (NonNullArgs.empty()) 4310 NonNullArgs.resize(Args.size()); 4311 4312 NonNullArgs.set(ParamIndex); 4313 } 4314 } 4315 } else { 4316 // If we have a non-function, non-method declaration but no 4317 // function prototype, try to dig out the function prototype. 4318 if (!Proto) { 4319 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 4320 QualType type = VD->getType().getNonReferenceType(); 4321 if (auto pointerType = type->getAs<PointerType>()) 4322 type = pointerType->getPointeeType(); 4323 else if (auto blockType = type->getAs<BlockPointerType>()) 4324 type = blockType->getPointeeType(); 4325 // FIXME: data member pointers? 4326 4327 // Dig out the function prototype, if there is one. 4328 Proto = type->getAs<FunctionProtoType>(); 4329 } 4330 } 4331 4332 // Fill in non-null argument information from the nullability 4333 // information on the parameter types (if we have them). 4334 if (Proto) { 4335 unsigned Index = 0; 4336 for (auto paramType : Proto->getParamTypes()) { 4337 if (isNonNullType(S.Context, paramType)) { 4338 if (NonNullArgs.empty()) 4339 NonNullArgs.resize(Args.size()); 4340 4341 NonNullArgs.set(Index); 4342 } 4343 4344 ++Index; 4345 } 4346 } 4347 } 4348 4349 // Check for non-null arguments. 4350 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 4351 ArgIndex != ArgIndexEnd; ++ArgIndex) { 4352 if (NonNullArgs[ArgIndex]) 4353 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc); 4354 } 4355 } 4356 4357 /// Handles the checks for format strings, non-POD arguments to vararg 4358 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if 4359 /// attributes. 4360 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 4361 const Expr *ThisArg, ArrayRef<const Expr *> Args, 4362 bool IsMemberFunction, SourceLocation Loc, 4363 SourceRange Range, VariadicCallType CallType) { 4364 // FIXME: We should check as much as we can in the template definition. 4365 if (CurContext->isDependentContext()) 4366 return; 4367 4368 // Printf and scanf checking. 4369 llvm::SmallBitVector CheckedVarArgs; 4370 if (FDecl) { 4371 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 4372 // Only create vector if there are format attributes. 4373 CheckedVarArgs.resize(Args.size()); 4374 4375 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 4376 CheckedVarArgs); 4377 } 4378 } 4379 4380 // Refuse POD arguments that weren't caught by the format string 4381 // checks above. 4382 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 4383 if (CallType != VariadicDoesNotApply && 4384 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 4385 unsigned NumParams = Proto ? Proto->getNumParams() 4386 : FDecl && isa<FunctionDecl>(FDecl) 4387 ? cast<FunctionDecl>(FDecl)->getNumParams() 4388 : FDecl && isa<ObjCMethodDecl>(FDecl) 4389 ? cast<ObjCMethodDecl>(FDecl)->param_size() 4390 : 0; 4391 4392 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 4393 // Args[ArgIdx] can be null in malformed code. 4394 if (const Expr *Arg = Args[ArgIdx]) { 4395 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 4396 checkVariadicArgument(Arg, CallType); 4397 } 4398 } 4399 } 4400 4401 if (FDecl || Proto) { 4402 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 4403 4404 // Type safety checking. 4405 if (FDecl) { 4406 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 4407 CheckArgumentWithTypeTag(I, Args, Loc); 4408 } 4409 } 4410 4411 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) { 4412 auto *AA = FDecl->getAttr<AllocAlignAttr>(); 4413 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()]; 4414 if (!Arg->isValueDependent()) { 4415 Expr::EvalResult Align; 4416 if (Arg->EvaluateAsInt(Align, Context)) { 4417 const llvm::APSInt &I = Align.Val.getInt(); 4418 if (!I.isPowerOf2()) 4419 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two) 4420 << Arg->getSourceRange(); 4421 4422 if (I > Sema::MaximumAlignment) 4423 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great) 4424 << Arg->getSourceRange() << Sema::MaximumAlignment; 4425 } 4426 } 4427 } 4428 4429 if (FD) 4430 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 4431 } 4432 4433 /// CheckConstructorCall - Check a constructor call for correctness and safety 4434 /// properties not enforced by the C type system. 4435 void Sema::CheckConstructorCall(FunctionDecl *FDecl, 4436 ArrayRef<const Expr *> Args, 4437 const FunctionProtoType *Proto, 4438 SourceLocation Loc) { 4439 VariadicCallType CallType = 4440 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 4441 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 4442 Loc, SourceRange(), CallType); 4443 } 4444 4445 /// CheckFunctionCall - Check a direct function call for various correctness 4446 /// and safety properties not strictly enforced by the C type system. 4447 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 4448 const FunctionProtoType *Proto) { 4449 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 4450 isa<CXXMethodDecl>(FDecl); 4451 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 4452 IsMemberOperatorCall; 4453 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 4454 TheCall->getCallee()); 4455 Expr** Args = TheCall->getArgs(); 4456 unsigned NumArgs = TheCall->getNumArgs(); 4457 4458 Expr *ImplicitThis = nullptr; 4459 if (IsMemberOperatorCall) { 4460 // If this is a call to a member operator, hide the first argument 4461 // from checkCall. 4462 // FIXME: Our choice of AST representation here is less than ideal. 4463 ImplicitThis = Args[0]; 4464 ++Args; 4465 --NumArgs; 4466 } else if (IsMemberFunction) 4467 ImplicitThis = 4468 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 4469 4470 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs), 4471 IsMemberFunction, TheCall->getRParenLoc(), 4472 TheCall->getCallee()->getSourceRange(), CallType); 4473 4474 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 4475 // None of the checks below are needed for functions that don't have 4476 // simple names (e.g., C++ conversion functions). 4477 if (!FnInfo) 4478 return false; 4479 4480 CheckAbsoluteValueFunction(TheCall, FDecl); 4481 CheckMaxUnsignedZero(TheCall, FDecl); 4482 4483 if (getLangOpts().ObjC) 4484 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 4485 4486 unsigned CMId = FDecl->getMemoryFunctionKind(); 4487 if (CMId == 0) 4488 return false; 4489 4490 // Handle memory setting and copying functions. 4491 if (CMId == Builtin::BIstrlcpy || CMId == Builtin::BIstrlcat) 4492 CheckStrlcpycatArguments(TheCall, FnInfo); 4493 else if (CMId == Builtin::BIstrncat) 4494 CheckStrncatArguments(TheCall, FnInfo); 4495 else 4496 CheckMemaccessArguments(TheCall, CMId, FnInfo); 4497 4498 return false; 4499 } 4500 4501 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 4502 ArrayRef<const Expr *> Args) { 4503 VariadicCallType CallType = 4504 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 4505 4506 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 4507 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 4508 CallType); 4509 4510 return false; 4511 } 4512 4513 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 4514 const FunctionProtoType *Proto) { 4515 QualType Ty; 4516 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 4517 Ty = V->getType().getNonReferenceType(); 4518 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 4519 Ty = F->getType().getNonReferenceType(); 4520 else 4521 return false; 4522 4523 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 4524 !Ty->isFunctionProtoType()) 4525 return false; 4526 4527 VariadicCallType CallType; 4528 if (!Proto || !Proto->isVariadic()) { 4529 CallType = VariadicDoesNotApply; 4530 } else if (Ty->isBlockPointerType()) { 4531 CallType = VariadicBlock; 4532 } else { // Ty->isFunctionPointerType() 4533 CallType = VariadicFunction; 4534 } 4535 4536 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 4537 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 4538 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 4539 TheCall->getCallee()->getSourceRange(), CallType); 4540 4541 return false; 4542 } 4543 4544 /// Checks function calls when a FunctionDecl or a NamedDecl is not available, 4545 /// such as function pointers returned from functions. 4546 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 4547 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 4548 TheCall->getCallee()); 4549 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 4550 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 4551 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 4552 TheCall->getCallee()->getSourceRange(), CallType); 4553 4554 return false; 4555 } 4556 4557 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 4558 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 4559 return false; 4560 4561 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 4562 switch (Op) { 4563 case AtomicExpr::AO__c11_atomic_init: 4564 case AtomicExpr::AO__opencl_atomic_init: 4565 llvm_unreachable("There is no ordering argument for an init"); 4566 4567 case AtomicExpr::AO__c11_atomic_load: 4568 case AtomicExpr::AO__opencl_atomic_load: 4569 case AtomicExpr::AO__atomic_load_n: 4570 case AtomicExpr::AO__atomic_load: 4571 return OrderingCABI != llvm::AtomicOrderingCABI::release && 4572 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 4573 4574 case AtomicExpr::AO__c11_atomic_store: 4575 case AtomicExpr::AO__opencl_atomic_store: 4576 case AtomicExpr::AO__atomic_store: 4577 case AtomicExpr::AO__atomic_store_n: 4578 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 4579 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 4580 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 4581 4582 default: 4583 return true; 4584 } 4585 } 4586 4587 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 4588 AtomicExpr::AtomicOp Op) { 4589 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 4590 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 4591 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()}; 4592 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()}, 4593 DRE->getSourceRange(), TheCall->getRParenLoc(), Args, 4594 Op); 4595 } 4596 4597 ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, 4598 SourceLocation RParenLoc, MultiExprArg Args, 4599 AtomicExpr::AtomicOp Op, 4600 AtomicArgumentOrder ArgOrder) { 4601 // All the non-OpenCL operations take one of the following forms. 4602 // The OpenCL operations take the __c11 forms with one extra argument for 4603 // synchronization scope. 4604 enum { 4605 // C __c11_atomic_init(A *, C) 4606 Init, 4607 4608 // C __c11_atomic_load(A *, int) 4609 Load, 4610 4611 // void __atomic_load(A *, CP, int) 4612 LoadCopy, 4613 4614 // void __atomic_store(A *, CP, int) 4615 Copy, 4616 4617 // C __c11_atomic_add(A *, M, int) 4618 Arithmetic, 4619 4620 // C __atomic_exchange_n(A *, CP, int) 4621 Xchg, 4622 4623 // void __atomic_exchange(A *, C *, CP, int) 4624 GNUXchg, 4625 4626 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 4627 C11CmpXchg, 4628 4629 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 4630 GNUCmpXchg 4631 } Form = Init; 4632 4633 const unsigned NumForm = GNUCmpXchg + 1; 4634 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 4635 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 4636 // where: 4637 // C is an appropriate type, 4638 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 4639 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 4640 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 4641 // the int parameters are for orderings. 4642 4643 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 4644 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 4645 "need to update code for modified forms"); 4646 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 4647 AtomicExpr::AO__c11_atomic_fetch_min + 1 == 4648 AtomicExpr::AO__atomic_load, 4649 "need to update code for modified C11 atomics"); 4650 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 4651 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 4652 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 4653 Op <= AtomicExpr::AO__c11_atomic_fetch_min) || 4654 IsOpenCL; 4655 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 4656 Op == AtomicExpr::AO__atomic_store_n || 4657 Op == AtomicExpr::AO__atomic_exchange_n || 4658 Op == AtomicExpr::AO__atomic_compare_exchange_n; 4659 bool IsAddSub = false; 4660 4661 switch (Op) { 4662 case AtomicExpr::AO__c11_atomic_init: 4663 case AtomicExpr::AO__opencl_atomic_init: 4664 Form = Init; 4665 break; 4666 4667 case AtomicExpr::AO__c11_atomic_load: 4668 case AtomicExpr::AO__opencl_atomic_load: 4669 case AtomicExpr::AO__atomic_load_n: 4670 Form = Load; 4671 break; 4672 4673 case AtomicExpr::AO__atomic_load: 4674 Form = LoadCopy; 4675 break; 4676 4677 case AtomicExpr::AO__c11_atomic_store: 4678 case AtomicExpr::AO__opencl_atomic_store: 4679 case AtomicExpr::AO__atomic_store: 4680 case AtomicExpr::AO__atomic_store_n: 4681 Form = Copy; 4682 break; 4683 4684 case AtomicExpr::AO__c11_atomic_fetch_add: 4685 case AtomicExpr::AO__c11_atomic_fetch_sub: 4686 case AtomicExpr::AO__opencl_atomic_fetch_add: 4687 case AtomicExpr::AO__opencl_atomic_fetch_sub: 4688 case AtomicExpr::AO__atomic_fetch_add: 4689 case AtomicExpr::AO__atomic_fetch_sub: 4690 case AtomicExpr::AO__atomic_add_fetch: 4691 case AtomicExpr::AO__atomic_sub_fetch: 4692 IsAddSub = true; 4693 LLVM_FALLTHROUGH; 4694 case AtomicExpr::AO__c11_atomic_fetch_and: 4695 case AtomicExpr::AO__c11_atomic_fetch_or: 4696 case AtomicExpr::AO__c11_atomic_fetch_xor: 4697 case AtomicExpr::AO__opencl_atomic_fetch_and: 4698 case AtomicExpr::AO__opencl_atomic_fetch_or: 4699 case AtomicExpr::AO__opencl_atomic_fetch_xor: 4700 case AtomicExpr::AO__atomic_fetch_and: 4701 case AtomicExpr::AO__atomic_fetch_or: 4702 case AtomicExpr::AO__atomic_fetch_xor: 4703 case AtomicExpr::AO__atomic_fetch_nand: 4704 case AtomicExpr::AO__atomic_and_fetch: 4705 case AtomicExpr::AO__atomic_or_fetch: 4706 case AtomicExpr::AO__atomic_xor_fetch: 4707 case AtomicExpr::AO__atomic_nand_fetch: 4708 case AtomicExpr::AO__c11_atomic_fetch_min: 4709 case AtomicExpr::AO__c11_atomic_fetch_max: 4710 case AtomicExpr::AO__opencl_atomic_fetch_min: 4711 case AtomicExpr::AO__opencl_atomic_fetch_max: 4712 case AtomicExpr::AO__atomic_min_fetch: 4713 case AtomicExpr::AO__atomic_max_fetch: 4714 case AtomicExpr::AO__atomic_fetch_min: 4715 case AtomicExpr::AO__atomic_fetch_max: 4716 Form = Arithmetic; 4717 break; 4718 4719 case AtomicExpr::AO__c11_atomic_exchange: 4720 case AtomicExpr::AO__opencl_atomic_exchange: 4721 case AtomicExpr::AO__atomic_exchange_n: 4722 Form = Xchg; 4723 break; 4724 4725 case AtomicExpr::AO__atomic_exchange: 4726 Form = GNUXchg; 4727 break; 4728 4729 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 4730 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 4731 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 4732 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 4733 Form = C11CmpXchg; 4734 break; 4735 4736 case AtomicExpr::AO__atomic_compare_exchange: 4737 case AtomicExpr::AO__atomic_compare_exchange_n: 4738 Form = GNUCmpXchg; 4739 break; 4740 } 4741 4742 unsigned AdjustedNumArgs = NumArgs[Form]; 4743 if (IsOpenCL && Op != AtomicExpr::AO__opencl_atomic_init) 4744 ++AdjustedNumArgs; 4745 // Check we have the right number of arguments. 4746 if (Args.size() < AdjustedNumArgs) { 4747 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args) 4748 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 4749 << ExprRange; 4750 return ExprError(); 4751 } else if (Args.size() > AdjustedNumArgs) { 4752 Diag(Args[AdjustedNumArgs]->getBeginLoc(), 4753 diag::err_typecheck_call_too_many_args) 4754 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 4755 << ExprRange; 4756 return ExprError(); 4757 } 4758 4759 // Inspect the first argument of the atomic operation. 4760 Expr *Ptr = Args[0]; 4761 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 4762 if (ConvertedPtr.isInvalid()) 4763 return ExprError(); 4764 4765 Ptr = ConvertedPtr.get(); 4766 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 4767 if (!pointerType) { 4768 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer) 4769 << Ptr->getType() << Ptr->getSourceRange(); 4770 return ExprError(); 4771 } 4772 4773 // For a __c11 builtin, this should be a pointer to an _Atomic type. 4774 QualType AtomTy = pointerType->getPointeeType(); // 'A' 4775 QualType ValType = AtomTy; // 'C' 4776 if (IsC11) { 4777 if (!AtomTy->isAtomicType()) { 4778 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic) 4779 << Ptr->getType() << Ptr->getSourceRange(); 4780 return ExprError(); 4781 } 4782 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 4783 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 4784 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic) 4785 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 4786 << Ptr->getSourceRange(); 4787 return ExprError(); 4788 } 4789 ValType = AtomTy->castAs<AtomicType>()->getValueType(); 4790 } else if (Form != Load && Form != LoadCopy) { 4791 if (ValType.isConstQualified()) { 4792 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer) 4793 << Ptr->getType() << Ptr->getSourceRange(); 4794 return ExprError(); 4795 } 4796 } 4797 4798 // For an arithmetic operation, the implied arithmetic must be well-formed. 4799 if (Form == Arithmetic) { 4800 // gcc does not enforce these rules for GNU atomics, but we do so for sanity. 4801 if (IsAddSub && !ValType->isIntegerType() 4802 && !ValType->isPointerType()) { 4803 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 4804 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4805 return ExprError(); 4806 } 4807 if (!IsAddSub && !ValType->isIntegerType()) { 4808 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int) 4809 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4810 return ExprError(); 4811 } 4812 if (IsC11 && ValType->isPointerType() && 4813 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 4814 diag::err_incomplete_type)) { 4815 return ExprError(); 4816 } 4817 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 4818 // For __atomic_*_n operations, the value type must be a scalar integral or 4819 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 4820 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 4821 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4822 return ExprError(); 4823 } 4824 4825 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 4826 !AtomTy->isScalarType()) { 4827 // For GNU atomics, require a trivially-copyable type. This is not part of 4828 // the GNU atomics specification, but we enforce it for sanity. 4829 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy) 4830 << Ptr->getType() << Ptr->getSourceRange(); 4831 return ExprError(); 4832 } 4833 4834 switch (ValType.getObjCLifetime()) { 4835 case Qualifiers::OCL_None: 4836 case Qualifiers::OCL_ExplicitNone: 4837 // okay 4838 break; 4839 4840 case Qualifiers::OCL_Weak: 4841 case Qualifiers::OCL_Strong: 4842 case Qualifiers::OCL_Autoreleasing: 4843 // FIXME: Can this happen? By this point, ValType should be known 4844 // to be trivially copyable. 4845 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership) 4846 << ValType << Ptr->getSourceRange(); 4847 return ExprError(); 4848 } 4849 4850 // All atomic operations have an overload which takes a pointer to a volatile 4851 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 4852 // into the result or the other operands. Similarly atomic_load takes a 4853 // pointer to a const 'A'. 4854 ValType.removeLocalVolatile(); 4855 ValType.removeLocalConst(); 4856 QualType ResultType = ValType; 4857 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 4858 Form == Init) 4859 ResultType = Context.VoidTy; 4860 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 4861 ResultType = Context.BoolTy; 4862 4863 // The type of a parameter passed 'by value'. In the GNU atomics, such 4864 // arguments are actually passed as pointers. 4865 QualType ByValType = ValType; // 'CP' 4866 bool IsPassedByAddress = false; 4867 if (!IsC11 && !IsN) { 4868 ByValType = Ptr->getType(); 4869 IsPassedByAddress = true; 4870 } 4871 4872 SmallVector<Expr *, 5> APIOrderedArgs; 4873 if (ArgOrder == Sema::AtomicArgumentOrder::AST) { 4874 APIOrderedArgs.push_back(Args[0]); 4875 switch (Form) { 4876 case Init: 4877 case Load: 4878 APIOrderedArgs.push_back(Args[1]); // Val1/Order 4879 break; 4880 case LoadCopy: 4881 case Copy: 4882 case Arithmetic: 4883 case Xchg: 4884 APIOrderedArgs.push_back(Args[2]); // Val1 4885 APIOrderedArgs.push_back(Args[1]); // Order 4886 break; 4887 case GNUXchg: 4888 APIOrderedArgs.push_back(Args[2]); // Val1 4889 APIOrderedArgs.push_back(Args[3]); // Val2 4890 APIOrderedArgs.push_back(Args[1]); // Order 4891 break; 4892 case C11CmpXchg: 4893 APIOrderedArgs.push_back(Args[2]); // Val1 4894 APIOrderedArgs.push_back(Args[4]); // Val2 4895 APIOrderedArgs.push_back(Args[1]); // Order 4896 APIOrderedArgs.push_back(Args[3]); // OrderFail 4897 break; 4898 case GNUCmpXchg: 4899 APIOrderedArgs.push_back(Args[2]); // Val1 4900 APIOrderedArgs.push_back(Args[4]); // Val2 4901 APIOrderedArgs.push_back(Args[5]); // Weak 4902 APIOrderedArgs.push_back(Args[1]); // Order 4903 APIOrderedArgs.push_back(Args[3]); // OrderFail 4904 break; 4905 } 4906 } else 4907 APIOrderedArgs.append(Args.begin(), Args.end()); 4908 4909 // The first argument's non-CV pointer type is used to deduce the type of 4910 // subsequent arguments, except for: 4911 // - weak flag (always converted to bool) 4912 // - memory order (always converted to int) 4913 // - scope (always converted to int) 4914 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) { 4915 QualType Ty; 4916 if (i < NumVals[Form] + 1) { 4917 switch (i) { 4918 case 0: 4919 // The first argument is always a pointer. It has a fixed type. 4920 // It is always dereferenced, a nullptr is undefined. 4921 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 4922 // Nothing else to do: we already know all we want about this pointer. 4923 continue; 4924 case 1: 4925 // The second argument is the non-atomic operand. For arithmetic, this 4926 // is always passed by value, and for a compare_exchange it is always 4927 // passed by address. For the rest, GNU uses by-address and C11 uses 4928 // by-value. 4929 assert(Form != Load); 4930 if (Form == Init || (Form == Arithmetic && ValType->isIntegerType())) 4931 Ty = ValType; 4932 else if (Form == Copy || Form == Xchg) { 4933 if (IsPassedByAddress) { 4934 // The value pointer is always dereferenced, a nullptr is undefined. 4935 CheckNonNullArgument(*this, APIOrderedArgs[i], 4936 ExprRange.getBegin()); 4937 } 4938 Ty = ByValType; 4939 } else if (Form == Arithmetic) 4940 Ty = Context.getPointerDiffType(); 4941 else { 4942 Expr *ValArg = APIOrderedArgs[i]; 4943 // The value pointer is always dereferenced, a nullptr is undefined. 4944 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin()); 4945 LangAS AS = LangAS::Default; 4946 // Keep address space of non-atomic pointer type. 4947 if (const PointerType *PtrTy = 4948 ValArg->getType()->getAs<PointerType>()) { 4949 AS = PtrTy->getPointeeType().getAddressSpace(); 4950 } 4951 Ty = Context.getPointerType( 4952 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 4953 } 4954 break; 4955 case 2: 4956 // The third argument to compare_exchange / GNU exchange is the desired 4957 // value, either by-value (for the C11 and *_n variant) or as a pointer. 4958 if (IsPassedByAddress) 4959 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 4960 Ty = ByValType; 4961 break; 4962 case 3: 4963 // The fourth argument to GNU compare_exchange is a 'weak' flag. 4964 Ty = Context.BoolTy; 4965 break; 4966 } 4967 } else { 4968 // The order(s) and scope are always converted to int. 4969 Ty = Context.IntTy; 4970 } 4971 4972 InitializedEntity Entity = 4973 InitializedEntity::InitializeParameter(Context, Ty, false); 4974 ExprResult Arg = APIOrderedArgs[i]; 4975 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 4976 if (Arg.isInvalid()) 4977 return true; 4978 APIOrderedArgs[i] = Arg.get(); 4979 } 4980 4981 // Permute the arguments into a 'consistent' order. 4982 SmallVector<Expr*, 5> SubExprs; 4983 SubExprs.push_back(Ptr); 4984 switch (Form) { 4985 case Init: 4986 // Note, AtomicExpr::getVal1() has a special case for this atomic. 4987 SubExprs.push_back(APIOrderedArgs[1]); // Val1 4988 break; 4989 case Load: 4990 SubExprs.push_back(APIOrderedArgs[1]); // Order 4991 break; 4992 case LoadCopy: 4993 case Copy: 4994 case Arithmetic: 4995 case Xchg: 4996 SubExprs.push_back(APIOrderedArgs[2]); // Order 4997 SubExprs.push_back(APIOrderedArgs[1]); // Val1 4998 break; 4999 case GNUXchg: 5000 // Note, AtomicExpr::getVal2() has a special case for this atomic. 5001 SubExprs.push_back(APIOrderedArgs[3]); // Order 5002 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5003 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5004 break; 5005 case C11CmpXchg: 5006 SubExprs.push_back(APIOrderedArgs[3]); // Order 5007 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5008 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail 5009 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5010 break; 5011 case GNUCmpXchg: 5012 SubExprs.push_back(APIOrderedArgs[4]); // Order 5013 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5014 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail 5015 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5016 SubExprs.push_back(APIOrderedArgs[3]); // Weak 5017 break; 5018 } 5019 5020 if (SubExprs.size() >= 2 && Form != Init) { 5021 if (Optional<llvm::APSInt> Result = 5022 SubExprs[1]->getIntegerConstantExpr(Context)) 5023 if (!isValidOrderingForOp(Result->getSExtValue(), Op)) 5024 Diag(SubExprs[1]->getBeginLoc(), 5025 diag::warn_atomic_op_has_invalid_memory_order) 5026 << SubExprs[1]->getSourceRange(); 5027 } 5028 5029 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 5030 auto *Scope = Args[Args.size() - 1]; 5031 if (Optional<llvm::APSInt> Result = 5032 Scope->getIntegerConstantExpr(Context)) { 5033 if (!ScopeModel->isValid(Result->getZExtValue())) 5034 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 5035 << Scope->getSourceRange(); 5036 } 5037 SubExprs.push_back(Scope); 5038 } 5039 5040 AtomicExpr *AE = new (Context) 5041 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc); 5042 5043 if ((Op == AtomicExpr::AO__c11_atomic_load || 5044 Op == AtomicExpr::AO__c11_atomic_store || 5045 Op == AtomicExpr::AO__opencl_atomic_load || 5046 Op == AtomicExpr::AO__opencl_atomic_store ) && 5047 Context.AtomicUsesUnsupportedLibcall(AE)) 5048 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 5049 << ((Op == AtomicExpr::AO__c11_atomic_load || 5050 Op == AtomicExpr::AO__opencl_atomic_load) 5051 ? 0 5052 : 1); 5053 5054 if (ValType->isExtIntType()) { 5055 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_ext_int_prohibit); 5056 return ExprError(); 5057 } 5058 5059 return AE; 5060 } 5061 5062 /// checkBuiltinArgument - Given a call to a builtin function, perform 5063 /// normal type-checking on the given argument, updating the call in 5064 /// place. This is useful when a builtin function requires custom 5065 /// type-checking for some of its arguments but not necessarily all of 5066 /// them. 5067 /// 5068 /// Returns true on error. 5069 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 5070 FunctionDecl *Fn = E->getDirectCallee(); 5071 assert(Fn && "builtin call without direct callee!"); 5072 5073 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 5074 InitializedEntity Entity = 5075 InitializedEntity::InitializeParameter(S.Context, Param); 5076 5077 ExprResult Arg = E->getArg(0); 5078 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 5079 if (Arg.isInvalid()) 5080 return true; 5081 5082 E->setArg(ArgIndex, Arg.get()); 5083 return false; 5084 } 5085 5086 /// We have a call to a function like __sync_fetch_and_add, which is an 5087 /// overloaded function based on the pointer type of its first argument. 5088 /// The main BuildCallExpr routines have already promoted the types of 5089 /// arguments because all of these calls are prototyped as void(...). 5090 /// 5091 /// This function goes through and does final semantic checking for these 5092 /// builtins, as well as generating any warnings. 5093 ExprResult 5094 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 5095 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 5096 Expr *Callee = TheCall->getCallee(); 5097 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 5098 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 5099 5100 // Ensure that we have at least one argument to do type inference from. 5101 if (TheCall->getNumArgs() < 1) { 5102 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 5103 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange(); 5104 return ExprError(); 5105 } 5106 5107 // Inspect the first argument of the atomic builtin. This should always be 5108 // a pointer type, whose element is an integral scalar or pointer type. 5109 // Because it is a pointer type, we don't have to worry about any implicit 5110 // casts here. 5111 // FIXME: We don't allow floating point scalars as input. 5112 Expr *FirstArg = TheCall->getArg(0); 5113 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 5114 if (FirstArgResult.isInvalid()) 5115 return ExprError(); 5116 FirstArg = FirstArgResult.get(); 5117 TheCall->setArg(0, FirstArg); 5118 5119 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 5120 if (!pointerType) { 5121 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 5122 << FirstArg->getType() << FirstArg->getSourceRange(); 5123 return ExprError(); 5124 } 5125 5126 QualType ValType = pointerType->getPointeeType(); 5127 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 5128 !ValType->isBlockPointerType()) { 5129 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 5130 << FirstArg->getType() << FirstArg->getSourceRange(); 5131 return ExprError(); 5132 } 5133 5134 if (ValType.isConstQualified()) { 5135 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 5136 << FirstArg->getType() << FirstArg->getSourceRange(); 5137 return ExprError(); 5138 } 5139 5140 switch (ValType.getObjCLifetime()) { 5141 case Qualifiers::OCL_None: 5142 case Qualifiers::OCL_ExplicitNone: 5143 // okay 5144 break; 5145 5146 case Qualifiers::OCL_Weak: 5147 case Qualifiers::OCL_Strong: 5148 case Qualifiers::OCL_Autoreleasing: 5149 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 5150 << ValType << FirstArg->getSourceRange(); 5151 return ExprError(); 5152 } 5153 5154 // Strip any qualifiers off ValType. 5155 ValType = ValType.getUnqualifiedType(); 5156 5157 // The majority of builtins return a value, but a few have special return 5158 // types, so allow them to override appropriately below. 5159 QualType ResultType = ValType; 5160 5161 // We need to figure out which concrete builtin this maps onto. For example, 5162 // __sync_fetch_and_add with a 2 byte object turns into 5163 // __sync_fetch_and_add_2. 5164 #define BUILTIN_ROW(x) \ 5165 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 5166 Builtin::BI##x##_8, Builtin::BI##x##_16 } 5167 5168 static const unsigned BuiltinIndices[][5] = { 5169 BUILTIN_ROW(__sync_fetch_and_add), 5170 BUILTIN_ROW(__sync_fetch_and_sub), 5171 BUILTIN_ROW(__sync_fetch_and_or), 5172 BUILTIN_ROW(__sync_fetch_and_and), 5173 BUILTIN_ROW(__sync_fetch_and_xor), 5174 BUILTIN_ROW(__sync_fetch_and_nand), 5175 5176 BUILTIN_ROW(__sync_add_and_fetch), 5177 BUILTIN_ROW(__sync_sub_and_fetch), 5178 BUILTIN_ROW(__sync_and_and_fetch), 5179 BUILTIN_ROW(__sync_or_and_fetch), 5180 BUILTIN_ROW(__sync_xor_and_fetch), 5181 BUILTIN_ROW(__sync_nand_and_fetch), 5182 5183 BUILTIN_ROW(__sync_val_compare_and_swap), 5184 BUILTIN_ROW(__sync_bool_compare_and_swap), 5185 BUILTIN_ROW(__sync_lock_test_and_set), 5186 BUILTIN_ROW(__sync_lock_release), 5187 BUILTIN_ROW(__sync_swap) 5188 }; 5189 #undef BUILTIN_ROW 5190 5191 // Determine the index of the size. 5192 unsigned SizeIndex; 5193 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 5194 case 1: SizeIndex = 0; break; 5195 case 2: SizeIndex = 1; break; 5196 case 4: SizeIndex = 2; break; 5197 case 8: SizeIndex = 3; break; 5198 case 16: SizeIndex = 4; break; 5199 default: 5200 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 5201 << FirstArg->getType() << FirstArg->getSourceRange(); 5202 return ExprError(); 5203 } 5204 5205 // Each of these builtins has one pointer argument, followed by some number of 5206 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 5207 // that we ignore. Find out which row of BuiltinIndices to read from as well 5208 // as the number of fixed args. 5209 unsigned BuiltinID = FDecl->getBuiltinID(); 5210 unsigned BuiltinIndex, NumFixed = 1; 5211 bool WarnAboutSemanticsChange = false; 5212 switch (BuiltinID) { 5213 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 5214 case Builtin::BI__sync_fetch_and_add: 5215 case Builtin::BI__sync_fetch_and_add_1: 5216 case Builtin::BI__sync_fetch_and_add_2: 5217 case Builtin::BI__sync_fetch_and_add_4: 5218 case Builtin::BI__sync_fetch_and_add_8: 5219 case Builtin::BI__sync_fetch_and_add_16: 5220 BuiltinIndex = 0; 5221 break; 5222 5223 case Builtin::BI__sync_fetch_and_sub: 5224 case Builtin::BI__sync_fetch_and_sub_1: 5225 case Builtin::BI__sync_fetch_and_sub_2: 5226 case Builtin::BI__sync_fetch_and_sub_4: 5227 case Builtin::BI__sync_fetch_and_sub_8: 5228 case Builtin::BI__sync_fetch_and_sub_16: 5229 BuiltinIndex = 1; 5230 break; 5231 5232 case Builtin::BI__sync_fetch_and_or: 5233 case Builtin::BI__sync_fetch_and_or_1: 5234 case Builtin::BI__sync_fetch_and_or_2: 5235 case Builtin::BI__sync_fetch_and_or_4: 5236 case Builtin::BI__sync_fetch_and_or_8: 5237 case Builtin::BI__sync_fetch_and_or_16: 5238 BuiltinIndex = 2; 5239 break; 5240 5241 case Builtin::BI__sync_fetch_and_and: 5242 case Builtin::BI__sync_fetch_and_and_1: 5243 case Builtin::BI__sync_fetch_and_and_2: 5244 case Builtin::BI__sync_fetch_and_and_4: 5245 case Builtin::BI__sync_fetch_and_and_8: 5246 case Builtin::BI__sync_fetch_and_and_16: 5247 BuiltinIndex = 3; 5248 break; 5249 5250 case Builtin::BI__sync_fetch_and_xor: 5251 case Builtin::BI__sync_fetch_and_xor_1: 5252 case Builtin::BI__sync_fetch_and_xor_2: 5253 case Builtin::BI__sync_fetch_and_xor_4: 5254 case Builtin::BI__sync_fetch_and_xor_8: 5255 case Builtin::BI__sync_fetch_and_xor_16: 5256 BuiltinIndex = 4; 5257 break; 5258 5259 case Builtin::BI__sync_fetch_and_nand: 5260 case Builtin::BI__sync_fetch_and_nand_1: 5261 case Builtin::BI__sync_fetch_and_nand_2: 5262 case Builtin::BI__sync_fetch_and_nand_4: 5263 case Builtin::BI__sync_fetch_and_nand_8: 5264 case Builtin::BI__sync_fetch_and_nand_16: 5265 BuiltinIndex = 5; 5266 WarnAboutSemanticsChange = true; 5267 break; 5268 5269 case Builtin::BI__sync_add_and_fetch: 5270 case Builtin::BI__sync_add_and_fetch_1: 5271 case Builtin::BI__sync_add_and_fetch_2: 5272 case Builtin::BI__sync_add_and_fetch_4: 5273 case Builtin::BI__sync_add_and_fetch_8: 5274 case Builtin::BI__sync_add_and_fetch_16: 5275 BuiltinIndex = 6; 5276 break; 5277 5278 case Builtin::BI__sync_sub_and_fetch: 5279 case Builtin::BI__sync_sub_and_fetch_1: 5280 case Builtin::BI__sync_sub_and_fetch_2: 5281 case Builtin::BI__sync_sub_and_fetch_4: 5282 case Builtin::BI__sync_sub_and_fetch_8: 5283 case Builtin::BI__sync_sub_and_fetch_16: 5284 BuiltinIndex = 7; 5285 break; 5286 5287 case Builtin::BI__sync_and_and_fetch: 5288 case Builtin::BI__sync_and_and_fetch_1: 5289 case Builtin::BI__sync_and_and_fetch_2: 5290 case Builtin::BI__sync_and_and_fetch_4: 5291 case Builtin::BI__sync_and_and_fetch_8: 5292 case Builtin::BI__sync_and_and_fetch_16: 5293 BuiltinIndex = 8; 5294 break; 5295 5296 case Builtin::BI__sync_or_and_fetch: 5297 case Builtin::BI__sync_or_and_fetch_1: 5298 case Builtin::BI__sync_or_and_fetch_2: 5299 case Builtin::BI__sync_or_and_fetch_4: 5300 case Builtin::BI__sync_or_and_fetch_8: 5301 case Builtin::BI__sync_or_and_fetch_16: 5302 BuiltinIndex = 9; 5303 break; 5304 5305 case Builtin::BI__sync_xor_and_fetch: 5306 case Builtin::BI__sync_xor_and_fetch_1: 5307 case Builtin::BI__sync_xor_and_fetch_2: 5308 case Builtin::BI__sync_xor_and_fetch_4: 5309 case Builtin::BI__sync_xor_and_fetch_8: 5310 case Builtin::BI__sync_xor_and_fetch_16: 5311 BuiltinIndex = 10; 5312 break; 5313 5314 case Builtin::BI__sync_nand_and_fetch: 5315 case Builtin::BI__sync_nand_and_fetch_1: 5316 case Builtin::BI__sync_nand_and_fetch_2: 5317 case Builtin::BI__sync_nand_and_fetch_4: 5318 case Builtin::BI__sync_nand_and_fetch_8: 5319 case Builtin::BI__sync_nand_and_fetch_16: 5320 BuiltinIndex = 11; 5321 WarnAboutSemanticsChange = true; 5322 break; 5323 5324 case Builtin::BI__sync_val_compare_and_swap: 5325 case Builtin::BI__sync_val_compare_and_swap_1: 5326 case Builtin::BI__sync_val_compare_and_swap_2: 5327 case Builtin::BI__sync_val_compare_and_swap_4: 5328 case Builtin::BI__sync_val_compare_and_swap_8: 5329 case Builtin::BI__sync_val_compare_and_swap_16: 5330 BuiltinIndex = 12; 5331 NumFixed = 2; 5332 break; 5333 5334 case Builtin::BI__sync_bool_compare_and_swap: 5335 case Builtin::BI__sync_bool_compare_and_swap_1: 5336 case Builtin::BI__sync_bool_compare_and_swap_2: 5337 case Builtin::BI__sync_bool_compare_and_swap_4: 5338 case Builtin::BI__sync_bool_compare_and_swap_8: 5339 case Builtin::BI__sync_bool_compare_and_swap_16: 5340 BuiltinIndex = 13; 5341 NumFixed = 2; 5342 ResultType = Context.BoolTy; 5343 break; 5344 5345 case Builtin::BI__sync_lock_test_and_set: 5346 case Builtin::BI__sync_lock_test_and_set_1: 5347 case Builtin::BI__sync_lock_test_and_set_2: 5348 case Builtin::BI__sync_lock_test_and_set_4: 5349 case Builtin::BI__sync_lock_test_and_set_8: 5350 case Builtin::BI__sync_lock_test_and_set_16: 5351 BuiltinIndex = 14; 5352 break; 5353 5354 case Builtin::BI__sync_lock_release: 5355 case Builtin::BI__sync_lock_release_1: 5356 case Builtin::BI__sync_lock_release_2: 5357 case Builtin::BI__sync_lock_release_4: 5358 case Builtin::BI__sync_lock_release_8: 5359 case Builtin::BI__sync_lock_release_16: 5360 BuiltinIndex = 15; 5361 NumFixed = 0; 5362 ResultType = Context.VoidTy; 5363 break; 5364 5365 case Builtin::BI__sync_swap: 5366 case Builtin::BI__sync_swap_1: 5367 case Builtin::BI__sync_swap_2: 5368 case Builtin::BI__sync_swap_4: 5369 case Builtin::BI__sync_swap_8: 5370 case Builtin::BI__sync_swap_16: 5371 BuiltinIndex = 16; 5372 break; 5373 } 5374 5375 // Now that we know how many fixed arguments we expect, first check that we 5376 // have at least that many. 5377 if (TheCall->getNumArgs() < 1+NumFixed) { 5378 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 5379 << 0 << 1 + NumFixed << TheCall->getNumArgs() 5380 << Callee->getSourceRange(); 5381 return ExprError(); 5382 } 5383 5384 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 5385 << Callee->getSourceRange(); 5386 5387 if (WarnAboutSemanticsChange) { 5388 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 5389 << Callee->getSourceRange(); 5390 } 5391 5392 // Get the decl for the concrete builtin from this, we can tell what the 5393 // concrete integer type we should convert to is. 5394 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 5395 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 5396 FunctionDecl *NewBuiltinDecl; 5397 if (NewBuiltinID == BuiltinID) 5398 NewBuiltinDecl = FDecl; 5399 else { 5400 // Perform builtin lookup to avoid redeclaring it. 5401 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 5402 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 5403 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 5404 assert(Res.getFoundDecl()); 5405 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 5406 if (!NewBuiltinDecl) 5407 return ExprError(); 5408 } 5409 5410 // The first argument --- the pointer --- has a fixed type; we 5411 // deduce the types of the rest of the arguments accordingly. Walk 5412 // the remaining arguments, converting them to the deduced value type. 5413 for (unsigned i = 0; i != NumFixed; ++i) { 5414 ExprResult Arg = TheCall->getArg(i+1); 5415 5416 // GCC does an implicit conversion to the pointer or integer ValType. This 5417 // can fail in some cases (1i -> int**), check for this error case now. 5418 // Initialize the argument. 5419 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 5420 ValType, /*consume*/ false); 5421 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5422 if (Arg.isInvalid()) 5423 return ExprError(); 5424 5425 // Okay, we have something that *can* be converted to the right type. Check 5426 // to see if there is a potentially weird extension going on here. This can 5427 // happen when you do an atomic operation on something like an char* and 5428 // pass in 42. The 42 gets converted to char. This is even more strange 5429 // for things like 45.123 -> char, etc. 5430 // FIXME: Do this check. 5431 TheCall->setArg(i+1, Arg.get()); 5432 } 5433 5434 // Create a new DeclRefExpr to refer to the new decl. 5435 DeclRefExpr *NewDRE = DeclRefExpr::Create( 5436 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, 5437 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, 5438 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse()); 5439 5440 // Set the callee in the CallExpr. 5441 // FIXME: This loses syntactic information. 5442 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 5443 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 5444 CK_BuiltinFnToFnPtr); 5445 TheCall->setCallee(PromotedCall.get()); 5446 5447 // Change the result type of the call to match the original value type. This 5448 // is arbitrary, but the codegen for these builtins ins design to handle it 5449 // gracefully. 5450 TheCall->setType(ResultType); 5451 5452 // Prohibit use of _ExtInt with atomic builtins. 5453 // The arguments would have already been converted to the first argument's 5454 // type, so only need to check the first argument. 5455 const auto *ExtIntValType = ValType->getAs<ExtIntType>(); 5456 if (ExtIntValType && !llvm::isPowerOf2_64(ExtIntValType->getNumBits())) { 5457 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size); 5458 return ExprError(); 5459 } 5460 5461 return TheCallResult; 5462 } 5463 5464 /// SemaBuiltinNontemporalOverloaded - We have a call to 5465 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 5466 /// overloaded function based on the pointer type of its last argument. 5467 /// 5468 /// This function goes through and does final semantic checking for these 5469 /// builtins. 5470 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 5471 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 5472 DeclRefExpr *DRE = 5473 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 5474 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 5475 unsigned BuiltinID = FDecl->getBuiltinID(); 5476 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 5477 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 5478 "Unexpected nontemporal load/store builtin!"); 5479 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 5480 unsigned numArgs = isStore ? 2 : 1; 5481 5482 // Ensure that we have the proper number of arguments. 5483 if (checkArgCount(*this, TheCall, numArgs)) 5484 return ExprError(); 5485 5486 // Inspect the last argument of the nontemporal builtin. This should always 5487 // be a pointer type, from which we imply the type of the memory access. 5488 // Because it is a pointer type, we don't have to worry about any implicit 5489 // casts here. 5490 Expr *PointerArg = TheCall->getArg(numArgs - 1); 5491 ExprResult PointerArgResult = 5492 DefaultFunctionArrayLvalueConversion(PointerArg); 5493 5494 if (PointerArgResult.isInvalid()) 5495 return ExprError(); 5496 PointerArg = PointerArgResult.get(); 5497 TheCall->setArg(numArgs - 1, PointerArg); 5498 5499 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 5500 if (!pointerType) { 5501 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 5502 << PointerArg->getType() << PointerArg->getSourceRange(); 5503 return ExprError(); 5504 } 5505 5506 QualType ValType = pointerType->getPointeeType(); 5507 5508 // Strip any qualifiers off ValType. 5509 ValType = ValType.getUnqualifiedType(); 5510 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 5511 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 5512 !ValType->isVectorType()) { 5513 Diag(DRE->getBeginLoc(), 5514 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 5515 << PointerArg->getType() << PointerArg->getSourceRange(); 5516 return ExprError(); 5517 } 5518 5519 if (!isStore) { 5520 TheCall->setType(ValType); 5521 return TheCallResult; 5522 } 5523 5524 ExprResult ValArg = TheCall->getArg(0); 5525 InitializedEntity Entity = InitializedEntity::InitializeParameter( 5526 Context, ValType, /*consume*/ false); 5527 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 5528 if (ValArg.isInvalid()) 5529 return ExprError(); 5530 5531 TheCall->setArg(0, ValArg.get()); 5532 TheCall->setType(Context.VoidTy); 5533 return TheCallResult; 5534 } 5535 5536 /// CheckObjCString - Checks that the argument to the builtin 5537 /// CFString constructor is correct 5538 /// Note: It might also make sense to do the UTF-16 conversion here (would 5539 /// simplify the backend). 5540 bool Sema::CheckObjCString(Expr *Arg) { 5541 Arg = Arg->IgnoreParenCasts(); 5542 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 5543 5544 if (!Literal || !Literal->isAscii()) { 5545 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 5546 << Arg->getSourceRange(); 5547 return true; 5548 } 5549 5550 if (Literal->containsNonAsciiOrNull()) { 5551 StringRef String = Literal->getString(); 5552 unsigned NumBytes = String.size(); 5553 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 5554 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 5555 llvm::UTF16 *ToPtr = &ToBuf[0]; 5556 5557 llvm::ConversionResult Result = 5558 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 5559 ToPtr + NumBytes, llvm::strictConversion); 5560 // Check for conversion failure. 5561 if (Result != llvm::conversionOK) 5562 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 5563 << Arg->getSourceRange(); 5564 } 5565 return false; 5566 } 5567 5568 /// CheckObjCString - Checks that the format string argument to the os_log() 5569 /// and os_trace() functions is correct, and converts it to const char *. 5570 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 5571 Arg = Arg->IgnoreParenCasts(); 5572 auto *Literal = dyn_cast<StringLiteral>(Arg); 5573 if (!Literal) { 5574 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 5575 Literal = ObjcLiteral->getString(); 5576 } 5577 } 5578 5579 if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) { 5580 return ExprError( 5581 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 5582 << Arg->getSourceRange()); 5583 } 5584 5585 ExprResult Result(Literal); 5586 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 5587 InitializedEntity Entity = 5588 InitializedEntity::InitializeParameter(Context, ResultTy, false); 5589 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 5590 return Result; 5591 } 5592 5593 /// Check that the user is calling the appropriate va_start builtin for the 5594 /// target and calling convention. 5595 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 5596 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 5597 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 5598 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 || 5599 TT.getArch() == llvm::Triple::aarch64_32); 5600 bool IsWindows = TT.isOSWindows(); 5601 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 5602 if (IsX64 || IsAArch64) { 5603 CallingConv CC = CC_C; 5604 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 5605 CC = FD->getType()->castAs<FunctionType>()->getCallConv(); 5606 if (IsMSVAStart) { 5607 // Don't allow this in System V ABI functions. 5608 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 5609 return S.Diag(Fn->getBeginLoc(), 5610 diag::err_ms_va_start_used_in_sysv_function); 5611 } else { 5612 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 5613 // On x64 Windows, don't allow this in System V ABI functions. 5614 // (Yes, that means there's no corresponding way to support variadic 5615 // System V ABI functions on Windows.) 5616 if ((IsWindows && CC == CC_X86_64SysV) || 5617 (!IsWindows && CC == CC_Win64)) 5618 return S.Diag(Fn->getBeginLoc(), 5619 diag::err_va_start_used_in_wrong_abi_function) 5620 << !IsWindows; 5621 } 5622 return false; 5623 } 5624 5625 if (IsMSVAStart) 5626 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 5627 return false; 5628 } 5629 5630 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 5631 ParmVarDecl **LastParam = nullptr) { 5632 // Determine whether the current function, block, or obj-c method is variadic 5633 // and get its parameter list. 5634 bool IsVariadic = false; 5635 ArrayRef<ParmVarDecl *> Params; 5636 DeclContext *Caller = S.CurContext; 5637 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 5638 IsVariadic = Block->isVariadic(); 5639 Params = Block->parameters(); 5640 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 5641 IsVariadic = FD->isVariadic(); 5642 Params = FD->parameters(); 5643 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 5644 IsVariadic = MD->isVariadic(); 5645 // FIXME: This isn't correct for methods (results in bogus warning). 5646 Params = MD->parameters(); 5647 } else if (isa<CapturedDecl>(Caller)) { 5648 // We don't support va_start in a CapturedDecl. 5649 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 5650 return true; 5651 } else { 5652 // This must be some other declcontext that parses exprs. 5653 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 5654 return true; 5655 } 5656 5657 if (!IsVariadic) { 5658 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 5659 return true; 5660 } 5661 5662 if (LastParam) 5663 *LastParam = Params.empty() ? nullptr : Params.back(); 5664 5665 return false; 5666 } 5667 5668 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 5669 /// for validity. Emit an error and return true on failure; return false 5670 /// on success. 5671 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 5672 Expr *Fn = TheCall->getCallee(); 5673 5674 if (checkVAStartABI(*this, BuiltinID, Fn)) 5675 return true; 5676 5677 if (checkArgCount(*this, TheCall, 2)) 5678 return true; 5679 5680 // Type-check the first argument normally. 5681 if (checkBuiltinArgument(*this, TheCall, 0)) 5682 return true; 5683 5684 // Check that the current function is variadic, and get its last parameter. 5685 ParmVarDecl *LastParam; 5686 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 5687 return true; 5688 5689 // Verify that the second argument to the builtin is the last argument of the 5690 // current function or method. 5691 bool SecondArgIsLastNamedArgument = false; 5692 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 5693 5694 // These are valid if SecondArgIsLastNamedArgument is false after the next 5695 // block. 5696 QualType Type; 5697 SourceLocation ParamLoc; 5698 bool IsCRegister = false; 5699 5700 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 5701 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 5702 SecondArgIsLastNamedArgument = PV == LastParam; 5703 5704 Type = PV->getType(); 5705 ParamLoc = PV->getLocation(); 5706 IsCRegister = 5707 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 5708 } 5709 } 5710 5711 if (!SecondArgIsLastNamedArgument) 5712 Diag(TheCall->getArg(1)->getBeginLoc(), 5713 diag::warn_second_arg_of_va_start_not_last_named_param); 5714 else if (IsCRegister || Type->isReferenceType() || 5715 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 5716 // Promotable integers are UB, but enumerations need a bit of 5717 // extra checking to see what their promotable type actually is. 5718 if (!Type->isPromotableIntegerType()) 5719 return false; 5720 if (!Type->isEnumeralType()) 5721 return true; 5722 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl(); 5723 return !(ED && 5724 Context.typesAreCompatible(ED->getPromotionType(), Type)); 5725 }()) { 5726 unsigned Reason = 0; 5727 if (Type->isReferenceType()) Reason = 1; 5728 else if (IsCRegister) Reason = 2; 5729 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 5730 Diag(ParamLoc, diag::note_parameter_type) << Type; 5731 } 5732 5733 TheCall->setType(Context.VoidTy); 5734 return false; 5735 } 5736 5737 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 5738 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 5739 // const char *named_addr); 5740 5741 Expr *Func = Call->getCallee(); 5742 5743 if (Call->getNumArgs() < 3) 5744 return Diag(Call->getEndLoc(), 5745 diag::err_typecheck_call_too_few_args_at_least) 5746 << 0 /*function call*/ << 3 << Call->getNumArgs(); 5747 5748 // Type-check the first argument normally. 5749 if (checkBuiltinArgument(*this, Call, 0)) 5750 return true; 5751 5752 // Check that the current function is variadic. 5753 if (checkVAStartIsInVariadicFunction(*this, Func)) 5754 return true; 5755 5756 // __va_start on Windows does not validate the parameter qualifiers 5757 5758 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 5759 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 5760 5761 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 5762 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 5763 5764 const QualType &ConstCharPtrTy = 5765 Context.getPointerType(Context.CharTy.withConst()); 5766 if (!Arg1Ty->isPointerType() || 5767 Arg1Ty->getPointeeType().withoutLocalFastQualifiers() != Context.CharTy) 5768 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 5769 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 5770 << 0 /* qualifier difference */ 5771 << 3 /* parameter mismatch */ 5772 << 2 << Arg1->getType() << ConstCharPtrTy; 5773 5774 const QualType SizeTy = Context.getSizeType(); 5775 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 5776 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 5777 << Arg2->getType() << SizeTy << 1 /* different class */ 5778 << 0 /* qualifier difference */ 5779 << 3 /* parameter mismatch */ 5780 << 3 << Arg2->getType() << SizeTy; 5781 5782 return false; 5783 } 5784 5785 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 5786 /// friends. This is declared to take (...), so we have to check everything. 5787 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 5788 if (checkArgCount(*this, TheCall, 2)) 5789 return true; 5790 5791 ExprResult OrigArg0 = TheCall->getArg(0); 5792 ExprResult OrigArg1 = TheCall->getArg(1); 5793 5794 // Do standard promotions between the two arguments, returning their common 5795 // type. 5796 QualType Res = UsualArithmeticConversions( 5797 OrigArg0, OrigArg1, TheCall->getExprLoc(), ACK_Comparison); 5798 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 5799 return true; 5800 5801 // Make sure any conversions are pushed back into the call; this is 5802 // type safe since unordered compare builtins are declared as "_Bool 5803 // foo(...)". 5804 TheCall->setArg(0, OrigArg0.get()); 5805 TheCall->setArg(1, OrigArg1.get()); 5806 5807 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 5808 return false; 5809 5810 // If the common type isn't a real floating type, then the arguments were 5811 // invalid for this operation. 5812 if (Res.isNull() || !Res->isRealFloatingType()) 5813 return Diag(OrigArg0.get()->getBeginLoc(), 5814 diag::err_typecheck_call_invalid_ordered_compare) 5815 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 5816 << SourceRange(OrigArg0.get()->getBeginLoc(), 5817 OrigArg1.get()->getEndLoc()); 5818 5819 return false; 5820 } 5821 5822 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 5823 /// __builtin_isnan and friends. This is declared to take (...), so we have 5824 /// to check everything. We expect the last argument to be a floating point 5825 /// value. 5826 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 5827 if (checkArgCount(*this, TheCall, NumArgs)) 5828 return true; 5829 5830 // __builtin_fpclassify is the only case where NumArgs != 1, so we can count 5831 // on all preceding parameters just being int. Try all of those. 5832 for (unsigned i = 0; i < NumArgs - 1; ++i) { 5833 Expr *Arg = TheCall->getArg(i); 5834 5835 if (Arg->isTypeDependent()) 5836 return false; 5837 5838 ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing); 5839 5840 if (Res.isInvalid()) 5841 return true; 5842 TheCall->setArg(i, Res.get()); 5843 } 5844 5845 Expr *OrigArg = TheCall->getArg(NumArgs-1); 5846 5847 if (OrigArg->isTypeDependent()) 5848 return false; 5849 5850 // Usual Unary Conversions will convert half to float, which we want for 5851 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the 5852 // type how it is, but do normal L->Rvalue conversions. 5853 if (Context.getTargetInfo().useFP16ConversionIntrinsics()) 5854 OrigArg = UsualUnaryConversions(OrigArg).get(); 5855 else 5856 OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get(); 5857 TheCall->setArg(NumArgs - 1, OrigArg); 5858 5859 // This operation requires a non-_Complex floating-point number. 5860 if (!OrigArg->getType()->isRealFloatingType()) 5861 return Diag(OrigArg->getBeginLoc(), 5862 diag::err_typecheck_call_invalid_unary_fp) 5863 << OrigArg->getType() << OrigArg->getSourceRange(); 5864 5865 return false; 5866 } 5867 5868 /// Perform semantic analysis for a call to __builtin_complex. 5869 bool Sema::SemaBuiltinComplex(CallExpr *TheCall) { 5870 if (checkArgCount(*this, TheCall, 2)) 5871 return true; 5872 5873 bool Dependent = false; 5874 for (unsigned I = 0; I != 2; ++I) { 5875 Expr *Arg = TheCall->getArg(I); 5876 QualType T = Arg->getType(); 5877 if (T->isDependentType()) { 5878 Dependent = true; 5879 continue; 5880 } 5881 5882 // Despite supporting _Complex int, GCC requires a real floating point type 5883 // for the operands of __builtin_complex. 5884 if (!T->isRealFloatingType()) { 5885 return Diag(Arg->getBeginLoc(), diag::err_typecheck_call_requires_real_fp) 5886 << Arg->getType() << Arg->getSourceRange(); 5887 } 5888 5889 ExprResult Converted = DefaultLvalueConversion(Arg); 5890 if (Converted.isInvalid()) 5891 return true; 5892 TheCall->setArg(I, Converted.get()); 5893 } 5894 5895 if (Dependent) { 5896 TheCall->setType(Context.DependentTy); 5897 return false; 5898 } 5899 5900 Expr *Real = TheCall->getArg(0); 5901 Expr *Imag = TheCall->getArg(1); 5902 if (!Context.hasSameType(Real->getType(), Imag->getType())) { 5903 return Diag(Real->getBeginLoc(), 5904 diag::err_typecheck_call_different_arg_types) 5905 << Real->getType() << Imag->getType() 5906 << Real->getSourceRange() << Imag->getSourceRange(); 5907 } 5908 5909 // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers; 5910 // don't allow this builtin to form those types either. 5911 // FIXME: Should we allow these types? 5912 if (Real->getType()->isFloat16Type()) 5913 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 5914 << "_Float16"; 5915 if (Real->getType()->isHalfType()) 5916 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 5917 << "half"; 5918 5919 TheCall->setType(Context.getComplexType(Real->getType())); 5920 return false; 5921 } 5922 5923 // Customized Sema Checking for VSX builtins that have the following signature: 5924 // vector [...] builtinName(vector [...], vector [...], const int); 5925 // Which takes the same type of vectors (any legal vector type) for the first 5926 // two arguments and takes compile time constant for the third argument. 5927 // Example builtins are : 5928 // vector double vec_xxpermdi(vector double, vector double, int); 5929 // vector short vec_xxsldwi(vector short, vector short, int); 5930 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 5931 unsigned ExpectedNumArgs = 3; 5932 if (checkArgCount(*this, TheCall, ExpectedNumArgs)) 5933 return true; 5934 5935 // Check the third argument is a compile time constant 5936 if (!TheCall->getArg(2)->isIntegerConstantExpr(Context)) 5937 return Diag(TheCall->getBeginLoc(), 5938 diag::err_vsx_builtin_nonconstant_argument) 5939 << 3 /* argument index */ << TheCall->getDirectCallee() 5940 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 5941 TheCall->getArg(2)->getEndLoc()); 5942 5943 QualType Arg1Ty = TheCall->getArg(0)->getType(); 5944 QualType Arg2Ty = TheCall->getArg(1)->getType(); 5945 5946 // Check the type of argument 1 and argument 2 are vectors. 5947 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 5948 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 5949 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 5950 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 5951 << TheCall->getDirectCallee() 5952 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5953 TheCall->getArg(1)->getEndLoc()); 5954 } 5955 5956 // Check the first two arguments are the same type. 5957 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 5958 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 5959 << TheCall->getDirectCallee() 5960 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5961 TheCall->getArg(1)->getEndLoc()); 5962 } 5963 5964 // When default clang type checking is turned off and the customized type 5965 // checking is used, the returning type of the function must be explicitly 5966 // set. Otherwise it is _Bool by default. 5967 TheCall->setType(Arg1Ty); 5968 5969 return false; 5970 } 5971 5972 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 5973 // This is declared to take (...), so we have to check everything. 5974 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 5975 if (TheCall->getNumArgs() < 2) 5976 return ExprError(Diag(TheCall->getEndLoc(), 5977 diag::err_typecheck_call_too_few_args_at_least) 5978 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 5979 << TheCall->getSourceRange()); 5980 5981 // Determine which of the following types of shufflevector we're checking: 5982 // 1) unary, vector mask: (lhs, mask) 5983 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 5984 QualType resType = TheCall->getArg(0)->getType(); 5985 unsigned numElements = 0; 5986 5987 if (!TheCall->getArg(0)->isTypeDependent() && 5988 !TheCall->getArg(1)->isTypeDependent()) { 5989 QualType LHSType = TheCall->getArg(0)->getType(); 5990 QualType RHSType = TheCall->getArg(1)->getType(); 5991 5992 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 5993 return ExprError( 5994 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 5995 << TheCall->getDirectCallee() 5996 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5997 TheCall->getArg(1)->getEndLoc())); 5998 5999 numElements = LHSType->castAs<VectorType>()->getNumElements(); 6000 unsigned numResElements = TheCall->getNumArgs() - 2; 6001 6002 // Check to see if we have a call with 2 vector arguments, the unary shuffle 6003 // with mask. If so, verify that RHS is an integer vector type with the 6004 // same number of elts as lhs. 6005 if (TheCall->getNumArgs() == 2) { 6006 if (!RHSType->hasIntegerRepresentation() || 6007 RHSType->castAs<VectorType>()->getNumElements() != numElements) 6008 return ExprError(Diag(TheCall->getBeginLoc(), 6009 diag::err_vec_builtin_incompatible_vector) 6010 << TheCall->getDirectCallee() 6011 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 6012 TheCall->getArg(1)->getEndLoc())); 6013 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 6014 return ExprError(Diag(TheCall->getBeginLoc(), 6015 diag::err_vec_builtin_incompatible_vector) 6016 << TheCall->getDirectCallee() 6017 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6018 TheCall->getArg(1)->getEndLoc())); 6019 } else if (numElements != numResElements) { 6020 QualType eltType = LHSType->castAs<VectorType>()->getElementType(); 6021 resType = Context.getVectorType(eltType, numResElements, 6022 VectorType::GenericVector); 6023 } 6024 } 6025 6026 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 6027 if (TheCall->getArg(i)->isTypeDependent() || 6028 TheCall->getArg(i)->isValueDependent()) 6029 continue; 6030 6031 Optional<llvm::APSInt> Result; 6032 if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context))) 6033 return ExprError(Diag(TheCall->getBeginLoc(), 6034 diag::err_shufflevector_nonconstant_argument) 6035 << TheCall->getArg(i)->getSourceRange()); 6036 6037 // Allow -1 which will be translated to undef in the IR. 6038 if (Result->isSigned() && Result->isAllOnesValue()) 6039 continue; 6040 6041 if (Result->getActiveBits() > 64 || 6042 Result->getZExtValue() >= numElements * 2) 6043 return ExprError(Diag(TheCall->getBeginLoc(), 6044 diag::err_shufflevector_argument_too_large) 6045 << TheCall->getArg(i)->getSourceRange()); 6046 } 6047 6048 SmallVector<Expr*, 32> exprs; 6049 6050 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 6051 exprs.push_back(TheCall->getArg(i)); 6052 TheCall->setArg(i, nullptr); 6053 } 6054 6055 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 6056 TheCall->getCallee()->getBeginLoc(), 6057 TheCall->getRParenLoc()); 6058 } 6059 6060 /// SemaConvertVectorExpr - Handle __builtin_convertvector 6061 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 6062 SourceLocation BuiltinLoc, 6063 SourceLocation RParenLoc) { 6064 ExprValueKind VK = VK_RValue; 6065 ExprObjectKind OK = OK_Ordinary; 6066 QualType DstTy = TInfo->getType(); 6067 QualType SrcTy = E->getType(); 6068 6069 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 6070 return ExprError(Diag(BuiltinLoc, 6071 diag::err_convertvector_non_vector) 6072 << E->getSourceRange()); 6073 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 6074 return ExprError(Diag(BuiltinLoc, 6075 diag::err_convertvector_non_vector_type)); 6076 6077 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 6078 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements(); 6079 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements(); 6080 if (SrcElts != DstElts) 6081 return ExprError(Diag(BuiltinLoc, 6082 diag::err_convertvector_incompatible_vector) 6083 << E->getSourceRange()); 6084 } 6085 6086 return new (Context) 6087 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 6088 } 6089 6090 /// SemaBuiltinPrefetch - Handle __builtin_prefetch. 6091 // This is declared to take (const void*, ...) and can take two 6092 // optional constant int args. 6093 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 6094 unsigned NumArgs = TheCall->getNumArgs(); 6095 6096 if (NumArgs > 3) 6097 return Diag(TheCall->getEndLoc(), 6098 diag::err_typecheck_call_too_many_args_at_most) 6099 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 6100 6101 // Argument 0 is checked for us and the remaining arguments must be 6102 // constant integers. 6103 for (unsigned i = 1; i != NumArgs; ++i) 6104 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 6105 return true; 6106 6107 return false; 6108 } 6109 6110 /// SemaBuiltinAssume - Handle __assume (MS Extension). 6111 // __assume does not evaluate its arguments, and should warn if its argument 6112 // has side effects. 6113 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 6114 Expr *Arg = TheCall->getArg(0); 6115 if (Arg->isInstantiationDependent()) return false; 6116 6117 if (Arg->HasSideEffects(Context)) 6118 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 6119 << Arg->getSourceRange() 6120 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 6121 6122 return false; 6123 } 6124 6125 /// Handle __builtin_alloca_with_align. This is declared 6126 /// as (size_t, size_t) where the second size_t must be a power of 2 greater 6127 /// than 8. 6128 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 6129 // The alignment must be a constant integer. 6130 Expr *Arg = TheCall->getArg(1); 6131 6132 // We can't check the value of a dependent argument. 6133 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 6134 if (const auto *UE = 6135 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 6136 if (UE->getKind() == UETT_AlignOf || 6137 UE->getKind() == UETT_PreferredAlignOf) 6138 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 6139 << Arg->getSourceRange(); 6140 6141 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 6142 6143 if (!Result.isPowerOf2()) 6144 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 6145 << Arg->getSourceRange(); 6146 6147 if (Result < Context.getCharWidth()) 6148 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 6149 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 6150 6151 if (Result > std::numeric_limits<int32_t>::max()) 6152 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 6153 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 6154 } 6155 6156 return false; 6157 } 6158 6159 /// Handle __builtin_assume_aligned. This is declared 6160 /// as (const void*, size_t, ...) and can take one optional constant int arg. 6161 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 6162 unsigned NumArgs = TheCall->getNumArgs(); 6163 6164 if (NumArgs > 3) 6165 return Diag(TheCall->getEndLoc(), 6166 diag::err_typecheck_call_too_many_args_at_most) 6167 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 6168 6169 // The alignment must be a constant integer. 6170 Expr *Arg = TheCall->getArg(1); 6171 6172 // We can't check the value of a dependent argument. 6173 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 6174 llvm::APSInt Result; 6175 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 6176 return true; 6177 6178 if (!Result.isPowerOf2()) 6179 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 6180 << Arg->getSourceRange(); 6181 6182 if (Result > Sema::MaximumAlignment) 6183 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great) 6184 << Arg->getSourceRange() << Sema::MaximumAlignment; 6185 } 6186 6187 if (NumArgs > 2) { 6188 ExprResult Arg(TheCall->getArg(2)); 6189 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 6190 Context.getSizeType(), false); 6191 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6192 if (Arg.isInvalid()) return true; 6193 TheCall->setArg(2, Arg.get()); 6194 } 6195 6196 return false; 6197 } 6198 6199 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 6200 unsigned BuiltinID = 6201 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 6202 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 6203 6204 unsigned NumArgs = TheCall->getNumArgs(); 6205 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 6206 if (NumArgs < NumRequiredArgs) { 6207 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 6208 << 0 /* function call */ << NumRequiredArgs << NumArgs 6209 << TheCall->getSourceRange(); 6210 } 6211 if (NumArgs >= NumRequiredArgs + 0x100) { 6212 return Diag(TheCall->getEndLoc(), 6213 diag::err_typecheck_call_too_many_args_at_most) 6214 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 6215 << TheCall->getSourceRange(); 6216 } 6217 unsigned i = 0; 6218 6219 // For formatting call, check buffer arg. 6220 if (!IsSizeCall) { 6221 ExprResult Arg(TheCall->getArg(i)); 6222 InitializedEntity Entity = InitializedEntity::InitializeParameter( 6223 Context, Context.VoidPtrTy, false); 6224 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6225 if (Arg.isInvalid()) 6226 return true; 6227 TheCall->setArg(i, Arg.get()); 6228 i++; 6229 } 6230 6231 // Check string literal arg. 6232 unsigned FormatIdx = i; 6233 { 6234 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 6235 if (Arg.isInvalid()) 6236 return true; 6237 TheCall->setArg(i, Arg.get()); 6238 i++; 6239 } 6240 6241 // Make sure variadic args are scalar. 6242 unsigned FirstDataArg = i; 6243 while (i < NumArgs) { 6244 ExprResult Arg = DefaultVariadicArgumentPromotion( 6245 TheCall->getArg(i), VariadicFunction, nullptr); 6246 if (Arg.isInvalid()) 6247 return true; 6248 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 6249 if (ArgSize.getQuantity() >= 0x100) { 6250 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 6251 << i << (int)ArgSize.getQuantity() << 0xff 6252 << TheCall->getSourceRange(); 6253 } 6254 TheCall->setArg(i, Arg.get()); 6255 i++; 6256 } 6257 6258 // Check formatting specifiers. NOTE: We're only doing this for the non-size 6259 // call to avoid duplicate diagnostics. 6260 if (!IsSizeCall) { 6261 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 6262 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 6263 bool Success = CheckFormatArguments( 6264 Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog, 6265 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 6266 CheckedVarArgs); 6267 if (!Success) 6268 return true; 6269 } 6270 6271 if (IsSizeCall) { 6272 TheCall->setType(Context.getSizeType()); 6273 } else { 6274 TheCall->setType(Context.VoidPtrTy); 6275 } 6276 return false; 6277 } 6278 6279 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 6280 /// TheCall is a constant expression. 6281 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 6282 llvm::APSInt &Result) { 6283 Expr *Arg = TheCall->getArg(ArgNum); 6284 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6285 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6286 6287 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 6288 6289 Optional<llvm::APSInt> R; 6290 if (!(R = Arg->getIntegerConstantExpr(Context))) 6291 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 6292 << FDecl->getDeclName() << Arg->getSourceRange(); 6293 Result = *R; 6294 return false; 6295 } 6296 6297 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 6298 /// TheCall is a constant expression in the range [Low, High]. 6299 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 6300 int Low, int High, bool RangeIsError) { 6301 if (isConstantEvaluated()) 6302 return false; 6303 llvm::APSInt Result; 6304 6305 // We can't check the value of a dependent argument. 6306 Expr *Arg = TheCall->getArg(ArgNum); 6307 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6308 return false; 6309 6310 // Check constant-ness first. 6311 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6312 return true; 6313 6314 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 6315 if (RangeIsError) 6316 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 6317 << Result.toString(10) << Low << High << Arg->getSourceRange(); 6318 else 6319 // Defer the warning until we know if the code will be emitted so that 6320 // dead code can ignore this. 6321 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 6322 PDiag(diag::warn_argument_invalid_range) 6323 << Result.toString(10) << Low << High 6324 << Arg->getSourceRange()); 6325 } 6326 6327 return false; 6328 } 6329 6330 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 6331 /// TheCall is a constant expression is a multiple of Num.. 6332 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 6333 unsigned Num) { 6334 llvm::APSInt Result; 6335 6336 // We can't check the value of a dependent argument. 6337 Expr *Arg = TheCall->getArg(ArgNum); 6338 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6339 return false; 6340 6341 // Check constant-ness first. 6342 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6343 return true; 6344 6345 if (Result.getSExtValue() % Num != 0) 6346 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 6347 << Num << Arg->getSourceRange(); 6348 6349 return false; 6350 } 6351 6352 /// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a 6353 /// constant expression representing a power of 2. 6354 bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) { 6355 llvm::APSInt Result; 6356 6357 // We can't check the value of a dependent argument. 6358 Expr *Arg = TheCall->getArg(ArgNum); 6359 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6360 return false; 6361 6362 // Check constant-ness first. 6363 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6364 return true; 6365 6366 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if 6367 // and only if x is a power of 2. 6368 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0) 6369 return false; 6370 6371 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2) 6372 << Arg->getSourceRange(); 6373 } 6374 6375 static bool IsShiftedByte(llvm::APSInt Value) { 6376 if (Value.isNegative()) 6377 return false; 6378 6379 // Check if it's a shifted byte, by shifting it down 6380 while (true) { 6381 // If the value fits in the bottom byte, the check passes. 6382 if (Value < 0x100) 6383 return true; 6384 6385 // Otherwise, if the value has _any_ bits in the bottom byte, the check 6386 // fails. 6387 if ((Value & 0xFF) != 0) 6388 return false; 6389 6390 // If the bottom 8 bits are all 0, but something above that is nonzero, 6391 // then shifting the value right by 8 bits won't affect whether it's a 6392 // shifted byte or not. So do that, and go round again. 6393 Value >>= 8; 6394 } 6395 } 6396 6397 /// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is 6398 /// a constant expression representing an arbitrary byte value shifted left by 6399 /// a multiple of 8 bits. 6400 bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, 6401 unsigned ArgBits) { 6402 llvm::APSInt Result; 6403 6404 // We can't check the value of a dependent argument. 6405 Expr *Arg = TheCall->getArg(ArgNum); 6406 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6407 return false; 6408 6409 // Check constant-ness first. 6410 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6411 return true; 6412 6413 // Truncate to the given size. 6414 Result = Result.getLoBits(ArgBits); 6415 Result.setIsUnsigned(true); 6416 6417 if (IsShiftedByte(Result)) 6418 return false; 6419 6420 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte) 6421 << Arg->getSourceRange(); 6422 } 6423 6424 /// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of 6425 /// TheCall is a constant expression representing either a shifted byte value, 6426 /// or a value of the form 0x??FF (i.e. a member of the arithmetic progression 6427 /// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some 6428 /// Arm MVE intrinsics. 6429 bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, 6430 int ArgNum, 6431 unsigned ArgBits) { 6432 llvm::APSInt Result; 6433 6434 // We can't check the value of a dependent argument. 6435 Expr *Arg = TheCall->getArg(ArgNum); 6436 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6437 return false; 6438 6439 // Check constant-ness first. 6440 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6441 return true; 6442 6443 // Truncate to the given size. 6444 Result = Result.getLoBits(ArgBits); 6445 Result.setIsUnsigned(true); 6446 6447 // Check to see if it's in either of the required forms. 6448 if (IsShiftedByte(Result) || 6449 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF)) 6450 return false; 6451 6452 return Diag(TheCall->getBeginLoc(), 6453 diag::err_argument_not_shifted_byte_or_xxff) 6454 << Arg->getSourceRange(); 6455 } 6456 6457 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 6458 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { 6459 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 6460 if (checkArgCount(*this, TheCall, 2)) 6461 return true; 6462 Expr *Arg0 = TheCall->getArg(0); 6463 Expr *Arg1 = TheCall->getArg(1); 6464 6465 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6466 if (FirstArg.isInvalid()) 6467 return true; 6468 QualType FirstArgType = FirstArg.get()->getType(); 6469 if (!FirstArgType->isAnyPointerType()) 6470 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6471 << "first" << FirstArgType << Arg0->getSourceRange(); 6472 TheCall->setArg(0, FirstArg.get()); 6473 6474 ExprResult SecArg = DefaultLvalueConversion(Arg1); 6475 if (SecArg.isInvalid()) 6476 return true; 6477 QualType SecArgType = SecArg.get()->getType(); 6478 if (!SecArgType->isIntegerType()) 6479 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 6480 << "second" << SecArgType << Arg1->getSourceRange(); 6481 6482 // Derive the return type from the pointer argument. 6483 TheCall->setType(FirstArgType); 6484 return false; 6485 } 6486 6487 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 6488 if (checkArgCount(*this, TheCall, 2)) 6489 return true; 6490 6491 Expr *Arg0 = TheCall->getArg(0); 6492 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6493 if (FirstArg.isInvalid()) 6494 return true; 6495 QualType FirstArgType = FirstArg.get()->getType(); 6496 if (!FirstArgType->isAnyPointerType()) 6497 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6498 << "first" << FirstArgType << Arg0->getSourceRange(); 6499 TheCall->setArg(0, FirstArg.get()); 6500 6501 // Derive the return type from the pointer argument. 6502 TheCall->setType(FirstArgType); 6503 6504 // Second arg must be an constant in range [0,15] 6505 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 6506 } 6507 6508 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 6509 if (checkArgCount(*this, TheCall, 2)) 6510 return true; 6511 Expr *Arg0 = TheCall->getArg(0); 6512 Expr *Arg1 = TheCall->getArg(1); 6513 6514 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6515 if (FirstArg.isInvalid()) 6516 return true; 6517 QualType FirstArgType = FirstArg.get()->getType(); 6518 if (!FirstArgType->isAnyPointerType()) 6519 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6520 << "first" << FirstArgType << Arg0->getSourceRange(); 6521 6522 QualType SecArgType = Arg1->getType(); 6523 if (!SecArgType->isIntegerType()) 6524 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 6525 << "second" << SecArgType << Arg1->getSourceRange(); 6526 TheCall->setType(Context.IntTy); 6527 return false; 6528 } 6529 6530 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 6531 BuiltinID == AArch64::BI__builtin_arm_stg) { 6532 if (checkArgCount(*this, TheCall, 1)) 6533 return true; 6534 Expr *Arg0 = TheCall->getArg(0); 6535 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6536 if (FirstArg.isInvalid()) 6537 return true; 6538 6539 QualType FirstArgType = FirstArg.get()->getType(); 6540 if (!FirstArgType->isAnyPointerType()) 6541 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6542 << "first" << FirstArgType << Arg0->getSourceRange(); 6543 TheCall->setArg(0, FirstArg.get()); 6544 6545 // Derive the return type from the pointer argument. 6546 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 6547 TheCall->setType(FirstArgType); 6548 return false; 6549 } 6550 6551 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 6552 Expr *ArgA = TheCall->getArg(0); 6553 Expr *ArgB = TheCall->getArg(1); 6554 6555 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA); 6556 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB); 6557 6558 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 6559 return true; 6560 6561 QualType ArgTypeA = ArgExprA.get()->getType(); 6562 QualType ArgTypeB = ArgExprB.get()->getType(); 6563 6564 auto isNull = [&] (Expr *E) -> bool { 6565 return E->isNullPointerConstant( 6566 Context, Expr::NPC_ValueDependentIsNotNull); }; 6567 6568 // argument should be either a pointer or null 6569 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 6570 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 6571 << "first" << ArgTypeA << ArgA->getSourceRange(); 6572 6573 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 6574 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 6575 << "second" << ArgTypeB << ArgB->getSourceRange(); 6576 6577 // Ensure Pointee types are compatible 6578 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 6579 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 6580 QualType pointeeA = ArgTypeA->getPointeeType(); 6581 QualType pointeeB = ArgTypeB->getPointeeType(); 6582 if (!Context.typesAreCompatible( 6583 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 6584 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 6585 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) 6586 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 6587 << ArgB->getSourceRange(); 6588 } 6589 } 6590 6591 // at least one argument should be pointer type 6592 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 6593 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 6594 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 6595 6596 if (isNull(ArgA)) // adopt type of the other pointer 6597 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 6598 6599 if (isNull(ArgB)) 6600 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 6601 6602 TheCall->setArg(0, ArgExprA.get()); 6603 TheCall->setArg(1, ArgExprB.get()); 6604 TheCall->setType(Context.LongLongTy); 6605 return false; 6606 } 6607 assert(false && "Unhandled ARM MTE intrinsic"); 6608 return true; 6609 } 6610 6611 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 6612 /// TheCall is an ARM/AArch64 special register string literal. 6613 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 6614 int ArgNum, unsigned ExpectedFieldNum, 6615 bool AllowName) { 6616 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 6617 BuiltinID == ARM::BI__builtin_arm_wsr64 || 6618 BuiltinID == ARM::BI__builtin_arm_rsr || 6619 BuiltinID == ARM::BI__builtin_arm_rsrp || 6620 BuiltinID == ARM::BI__builtin_arm_wsr || 6621 BuiltinID == ARM::BI__builtin_arm_wsrp; 6622 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 6623 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 6624 BuiltinID == AArch64::BI__builtin_arm_rsr || 6625 BuiltinID == AArch64::BI__builtin_arm_rsrp || 6626 BuiltinID == AArch64::BI__builtin_arm_wsr || 6627 BuiltinID == AArch64::BI__builtin_arm_wsrp; 6628 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 6629 6630 // We can't check the value of a dependent argument. 6631 Expr *Arg = TheCall->getArg(ArgNum); 6632 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6633 return false; 6634 6635 // Check if the argument is a string literal. 6636 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 6637 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 6638 << Arg->getSourceRange(); 6639 6640 // Check the type of special register given. 6641 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 6642 SmallVector<StringRef, 6> Fields; 6643 Reg.split(Fields, ":"); 6644 6645 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 6646 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 6647 << Arg->getSourceRange(); 6648 6649 // If the string is the name of a register then we cannot check that it is 6650 // valid here but if the string is of one the forms described in ACLE then we 6651 // can check that the supplied fields are integers and within the valid 6652 // ranges. 6653 if (Fields.size() > 1) { 6654 bool FiveFields = Fields.size() == 5; 6655 6656 bool ValidString = true; 6657 if (IsARMBuiltin) { 6658 ValidString &= Fields[0].startswith_lower("cp") || 6659 Fields[0].startswith_lower("p"); 6660 if (ValidString) 6661 Fields[0] = 6662 Fields[0].drop_front(Fields[0].startswith_lower("cp") ? 2 : 1); 6663 6664 ValidString &= Fields[2].startswith_lower("c"); 6665 if (ValidString) 6666 Fields[2] = Fields[2].drop_front(1); 6667 6668 if (FiveFields) { 6669 ValidString &= Fields[3].startswith_lower("c"); 6670 if (ValidString) 6671 Fields[3] = Fields[3].drop_front(1); 6672 } 6673 } 6674 6675 SmallVector<int, 5> Ranges; 6676 if (FiveFields) 6677 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 6678 else 6679 Ranges.append({15, 7, 15}); 6680 6681 for (unsigned i=0; i<Fields.size(); ++i) { 6682 int IntField; 6683 ValidString &= !Fields[i].getAsInteger(10, IntField); 6684 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 6685 } 6686 6687 if (!ValidString) 6688 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 6689 << Arg->getSourceRange(); 6690 } else if (IsAArch64Builtin && Fields.size() == 1) { 6691 // If the register name is one of those that appear in the condition below 6692 // and the special register builtin being used is one of the write builtins, 6693 // then we require that the argument provided for writing to the register 6694 // is an integer constant expression. This is because it will be lowered to 6695 // an MSR (immediate) instruction, so we need to know the immediate at 6696 // compile time. 6697 if (TheCall->getNumArgs() != 2) 6698 return false; 6699 6700 std::string RegLower = Reg.lower(); 6701 if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" && 6702 RegLower != "pan" && RegLower != "uao") 6703 return false; 6704 6705 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 6706 } 6707 6708 return false; 6709 } 6710 6711 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 6712 /// This checks that the target supports __builtin_longjmp and 6713 /// that val is a constant 1. 6714 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 6715 if (!Context.getTargetInfo().hasSjLjLowering()) 6716 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 6717 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 6718 6719 Expr *Arg = TheCall->getArg(1); 6720 llvm::APSInt Result; 6721 6722 // TODO: This is less than ideal. Overload this to take a value. 6723 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 6724 return true; 6725 6726 if (Result != 1) 6727 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 6728 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 6729 6730 return false; 6731 } 6732 6733 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 6734 /// This checks that the target supports __builtin_setjmp. 6735 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 6736 if (!Context.getTargetInfo().hasSjLjLowering()) 6737 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 6738 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 6739 return false; 6740 } 6741 6742 namespace { 6743 6744 class UncoveredArgHandler { 6745 enum { Unknown = -1, AllCovered = -2 }; 6746 6747 signed FirstUncoveredArg = Unknown; 6748 SmallVector<const Expr *, 4> DiagnosticExprs; 6749 6750 public: 6751 UncoveredArgHandler() = default; 6752 6753 bool hasUncoveredArg() const { 6754 return (FirstUncoveredArg >= 0); 6755 } 6756 6757 unsigned getUncoveredArg() const { 6758 assert(hasUncoveredArg() && "no uncovered argument"); 6759 return FirstUncoveredArg; 6760 } 6761 6762 void setAllCovered() { 6763 // A string has been found with all arguments covered, so clear out 6764 // the diagnostics. 6765 DiagnosticExprs.clear(); 6766 FirstUncoveredArg = AllCovered; 6767 } 6768 6769 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 6770 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 6771 6772 // Don't update if a previous string covers all arguments. 6773 if (FirstUncoveredArg == AllCovered) 6774 return; 6775 6776 // UncoveredArgHandler tracks the highest uncovered argument index 6777 // and with it all the strings that match this index. 6778 if (NewFirstUncoveredArg == FirstUncoveredArg) 6779 DiagnosticExprs.push_back(StrExpr); 6780 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 6781 DiagnosticExprs.clear(); 6782 DiagnosticExprs.push_back(StrExpr); 6783 FirstUncoveredArg = NewFirstUncoveredArg; 6784 } 6785 } 6786 6787 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 6788 }; 6789 6790 enum StringLiteralCheckType { 6791 SLCT_NotALiteral, 6792 SLCT_UncheckedLiteral, 6793 SLCT_CheckedLiteral 6794 }; 6795 6796 } // namespace 6797 6798 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 6799 BinaryOperatorKind BinOpKind, 6800 bool AddendIsRight) { 6801 unsigned BitWidth = Offset.getBitWidth(); 6802 unsigned AddendBitWidth = Addend.getBitWidth(); 6803 // There might be negative interim results. 6804 if (Addend.isUnsigned()) { 6805 Addend = Addend.zext(++AddendBitWidth); 6806 Addend.setIsSigned(true); 6807 } 6808 // Adjust the bit width of the APSInts. 6809 if (AddendBitWidth > BitWidth) { 6810 Offset = Offset.sext(AddendBitWidth); 6811 BitWidth = AddendBitWidth; 6812 } else if (BitWidth > AddendBitWidth) { 6813 Addend = Addend.sext(BitWidth); 6814 } 6815 6816 bool Ov = false; 6817 llvm::APSInt ResOffset = Offset; 6818 if (BinOpKind == BO_Add) 6819 ResOffset = Offset.sadd_ov(Addend, Ov); 6820 else { 6821 assert(AddendIsRight && BinOpKind == BO_Sub && 6822 "operator must be add or sub with addend on the right"); 6823 ResOffset = Offset.ssub_ov(Addend, Ov); 6824 } 6825 6826 // We add an offset to a pointer here so we should support an offset as big as 6827 // possible. 6828 if (Ov) { 6829 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 6830 "index (intermediate) result too big"); 6831 Offset = Offset.sext(2 * BitWidth); 6832 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 6833 return; 6834 } 6835 6836 Offset = ResOffset; 6837 } 6838 6839 namespace { 6840 6841 // This is a wrapper class around StringLiteral to support offsetted string 6842 // literals as format strings. It takes the offset into account when returning 6843 // the string and its length or the source locations to display notes correctly. 6844 class FormatStringLiteral { 6845 const StringLiteral *FExpr; 6846 int64_t Offset; 6847 6848 public: 6849 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 6850 : FExpr(fexpr), Offset(Offset) {} 6851 6852 StringRef getString() const { 6853 return FExpr->getString().drop_front(Offset); 6854 } 6855 6856 unsigned getByteLength() const { 6857 return FExpr->getByteLength() - getCharByteWidth() * Offset; 6858 } 6859 6860 unsigned getLength() const { return FExpr->getLength() - Offset; } 6861 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 6862 6863 StringLiteral::StringKind getKind() const { return FExpr->getKind(); } 6864 6865 QualType getType() const { return FExpr->getType(); } 6866 6867 bool isAscii() const { return FExpr->isAscii(); } 6868 bool isWide() const { return FExpr->isWide(); } 6869 bool isUTF8() const { return FExpr->isUTF8(); } 6870 bool isUTF16() const { return FExpr->isUTF16(); } 6871 bool isUTF32() const { return FExpr->isUTF32(); } 6872 bool isPascal() const { return FExpr->isPascal(); } 6873 6874 SourceLocation getLocationOfByte( 6875 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 6876 const TargetInfo &Target, unsigned *StartToken = nullptr, 6877 unsigned *StartTokenByteOffset = nullptr) const { 6878 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 6879 StartToken, StartTokenByteOffset); 6880 } 6881 6882 SourceLocation getBeginLoc() const LLVM_READONLY { 6883 return FExpr->getBeginLoc().getLocWithOffset(Offset); 6884 } 6885 6886 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 6887 }; 6888 6889 } // namespace 6890 6891 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 6892 const Expr *OrigFormatExpr, 6893 ArrayRef<const Expr *> Args, 6894 bool HasVAListArg, unsigned format_idx, 6895 unsigned firstDataArg, 6896 Sema::FormatStringType Type, 6897 bool inFunctionCall, 6898 Sema::VariadicCallType CallType, 6899 llvm::SmallBitVector &CheckedVarArgs, 6900 UncoveredArgHandler &UncoveredArg, 6901 bool IgnoreStringsWithoutSpecifiers); 6902 6903 // Determine if an expression is a string literal or constant string. 6904 // If this function returns false on the arguments to a function expecting a 6905 // format string, we will usually need to emit a warning. 6906 // True string literals are then checked by CheckFormatString. 6907 static StringLiteralCheckType 6908 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 6909 bool HasVAListArg, unsigned format_idx, 6910 unsigned firstDataArg, Sema::FormatStringType Type, 6911 Sema::VariadicCallType CallType, bool InFunctionCall, 6912 llvm::SmallBitVector &CheckedVarArgs, 6913 UncoveredArgHandler &UncoveredArg, 6914 llvm::APSInt Offset, 6915 bool IgnoreStringsWithoutSpecifiers = false) { 6916 if (S.isConstantEvaluated()) 6917 return SLCT_NotALiteral; 6918 tryAgain: 6919 assert(Offset.isSigned() && "invalid offset"); 6920 6921 if (E->isTypeDependent() || E->isValueDependent()) 6922 return SLCT_NotALiteral; 6923 6924 E = E->IgnoreParenCasts(); 6925 6926 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 6927 // Technically -Wformat-nonliteral does not warn about this case. 6928 // The behavior of printf and friends in this case is implementation 6929 // dependent. Ideally if the format string cannot be null then 6930 // it should have a 'nonnull' attribute in the function prototype. 6931 return SLCT_UncheckedLiteral; 6932 6933 switch (E->getStmtClass()) { 6934 case Stmt::BinaryConditionalOperatorClass: 6935 case Stmt::ConditionalOperatorClass: { 6936 // The expression is a literal if both sub-expressions were, and it was 6937 // completely checked only if both sub-expressions were checked. 6938 const AbstractConditionalOperator *C = 6939 cast<AbstractConditionalOperator>(E); 6940 6941 // Determine whether it is necessary to check both sub-expressions, for 6942 // example, because the condition expression is a constant that can be 6943 // evaluated at compile time. 6944 bool CheckLeft = true, CheckRight = true; 6945 6946 bool Cond; 6947 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(), 6948 S.isConstantEvaluated())) { 6949 if (Cond) 6950 CheckRight = false; 6951 else 6952 CheckLeft = false; 6953 } 6954 6955 // We need to maintain the offsets for the right and the left hand side 6956 // separately to check if every possible indexed expression is a valid 6957 // string literal. They might have different offsets for different string 6958 // literals in the end. 6959 StringLiteralCheckType Left; 6960 if (!CheckLeft) 6961 Left = SLCT_UncheckedLiteral; 6962 else { 6963 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, 6964 HasVAListArg, format_idx, firstDataArg, 6965 Type, CallType, InFunctionCall, 6966 CheckedVarArgs, UncoveredArg, Offset, 6967 IgnoreStringsWithoutSpecifiers); 6968 if (Left == SLCT_NotALiteral || !CheckRight) { 6969 return Left; 6970 } 6971 } 6972 6973 StringLiteralCheckType Right = checkFormatStringExpr( 6974 S, C->getFalseExpr(), Args, HasVAListArg, format_idx, firstDataArg, 6975 Type, CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 6976 IgnoreStringsWithoutSpecifiers); 6977 6978 return (CheckLeft && Left < Right) ? Left : Right; 6979 } 6980 6981 case Stmt::ImplicitCastExprClass: 6982 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 6983 goto tryAgain; 6984 6985 case Stmt::OpaqueValueExprClass: 6986 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 6987 E = src; 6988 goto tryAgain; 6989 } 6990 return SLCT_NotALiteral; 6991 6992 case Stmt::PredefinedExprClass: 6993 // While __func__, etc., are technically not string literals, they 6994 // cannot contain format specifiers and thus are not a security 6995 // liability. 6996 return SLCT_UncheckedLiteral; 6997 6998 case Stmt::DeclRefExprClass: { 6999 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 7000 7001 // As an exception, do not flag errors for variables binding to 7002 // const string literals. 7003 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 7004 bool isConstant = false; 7005 QualType T = DR->getType(); 7006 7007 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 7008 isConstant = AT->getElementType().isConstant(S.Context); 7009 } else if (const PointerType *PT = T->getAs<PointerType>()) { 7010 isConstant = T.isConstant(S.Context) && 7011 PT->getPointeeType().isConstant(S.Context); 7012 } else if (T->isObjCObjectPointerType()) { 7013 // In ObjC, there is usually no "const ObjectPointer" type, 7014 // so don't check if the pointee type is constant. 7015 isConstant = T.isConstant(S.Context); 7016 } 7017 7018 if (isConstant) { 7019 if (const Expr *Init = VD->getAnyInitializer()) { 7020 // Look through initializers like const char c[] = { "foo" } 7021 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 7022 if (InitList->isStringLiteralInit()) 7023 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 7024 } 7025 return checkFormatStringExpr(S, Init, Args, 7026 HasVAListArg, format_idx, 7027 firstDataArg, Type, CallType, 7028 /*InFunctionCall*/ false, CheckedVarArgs, 7029 UncoveredArg, Offset); 7030 } 7031 } 7032 7033 // For vprintf* functions (i.e., HasVAListArg==true), we add a 7034 // special check to see if the format string is a function parameter 7035 // of the function calling the printf function. If the function 7036 // has an attribute indicating it is a printf-like function, then we 7037 // should suppress warnings concerning non-literals being used in a call 7038 // to a vprintf function. For example: 7039 // 7040 // void 7041 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){ 7042 // va_list ap; 7043 // va_start(ap, fmt); 7044 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt". 7045 // ... 7046 // } 7047 if (HasVAListArg) { 7048 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) { 7049 if (const NamedDecl *ND = dyn_cast<NamedDecl>(PV->getDeclContext())) { 7050 int PVIndex = PV->getFunctionScopeIndex() + 1; 7051 for (const auto *PVFormat : ND->specific_attrs<FormatAttr>()) { 7052 // adjust for implicit parameter 7053 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND)) 7054 if (MD->isInstance()) 7055 ++PVIndex; 7056 // We also check if the formats are compatible. 7057 // We can't pass a 'scanf' string to a 'printf' function. 7058 if (PVIndex == PVFormat->getFormatIdx() && 7059 Type == S.GetFormatStringType(PVFormat)) 7060 return SLCT_UncheckedLiteral; 7061 } 7062 } 7063 } 7064 } 7065 } 7066 7067 return SLCT_NotALiteral; 7068 } 7069 7070 case Stmt::CallExprClass: 7071 case Stmt::CXXMemberCallExprClass: { 7072 const CallExpr *CE = cast<CallExpr>(E); 7073 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 7074 bool IsFirst = true; 7075 StringLiteralCheckType CommonResult; 7076 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 7077 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 7078 StringLiteralCheckType Result = checkFormatStringExpr( 7079 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 7080 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 7081 IgnoreStringsWithoutSpecifiers); 7082 if (IsFirst) { 7083 CommonResult = Result; 7084 IsFirst = false; 7085 } 7086 } 7087 if (!IsFirst) 7088 return CommonResult; 7089 7090 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 7091 unsigned BuiltinID = FD->getBuiltinID(); 7092 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 7093 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 7094 const Expr *Arg = CE->getArg(0); 7095 return checkFormatStringExpr(S, Arg, Args, 7096 HasVAListArg, format_idx, 7097 firstDataArg, Type, CallType, 7098 InFunctionCall, CheckedVarArgs, 7099 UncoveredArg, Offset, 7100 IgnoreStringsWithoutSpecifiers); 7101 } 7102 } 7103 } 7104 7105 return SLCT_NotALiteral; 7106 } 7107 case Stmt::ObjCMessageExprClass: { 7108 const auto *ME = cast<ObjCMessageExpr>(E); 7109 if (const auto *MD = ME->getMethodDecl()) { 7110 if (const auto *FA = MD->getAttr<FormatArgAttr>()) { 7111 // As a special case heuristic, if we're using the method -[NSBundle 7112 // localizedStringForKey:value:table:], ignore any key strings that lack 7113 // format specifiers. The idea is that if the key doesn't have any 7114 // format specifiers then its probably just a key to map to the 7115 // localized strings. If it does have format specifiers though, then its 7116 // likely that the text of the key is the format string in the 7117 // programmer's language, and should be checked. 7118 const ObjCInterfaceDecl *IFace; 7119 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) && 7120 IFace->getIdentifier()->isStr("NSBundle") && 7121 MD->getSelector().isKeywordSelector( 7122 {"localizedStringForKey", "value", "table"})) { 7123 IgnoreStringsWithoutSpecifiers = true; 7124 } 7125 7126 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 7127 return checkFormatStringExpr( 7128 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 7129 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 7130 IgnoreStringsWithoutSpecifiers); 7131 } 7132 } 7133 7134 return SLCT_NotALiteral; 7135 } 7136 case Stmt::ObjCStringLiteralClass: 7137 case Stmt::StringLiteralClass: { 7138 const StringLiteral *StrE = nullptr; 7139 7140 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 7141 StrE = ObjCFExpr->getString(); 7142 else 7143 StrE = cast<StringLiteral>(E); 7144 7145 if (StrE) { 7146 if (Offset.isNegative() || Offset > StrE->getLength()) { 7147 // TODO: It would be better to have an explicit warning for out of 7148 // bounds literals. 7149 return SLCT_NotALiteral; 7150 } 7151 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 7152 CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx, 7153 firstDataArg, Type, InFunctionCall, CallType, 7154 CheckedVarArgs, UncoveredArg, 7155 IgnoreStringsWithoutSpecifiers); 7156 return SLCT_CheckedLiteral; 7157 } 7158 7159 return SLCT_NotALiteral; 7160 } 7161 case Stmt::BinaryOperatorClass: { 7162 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 7163 7164 // A string literal + an int offset is still a string literal. 7165 if (BinOp->isAdditiveOp()) { 7166 Expr::EvalResult LResult, RResult; 7167 7168 bool LIsInt = BinOp->getLHS()->EvaluateAsInt( 7169 LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 7170 bool RIsInt = BinOp->getRHS()->EvaluateAsInt( 7171 RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 7172 7173 if (LIsInt != RIsInt) { 7174 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 7175 7176 if (LIsInt) { 7177 if (BinOpKind == BO_Add) { 7178 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt); 7179 E = BinOp->getRHS(); 7180 goto tryAgain; 7181 } 7182 } else { 7183 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt); 7184 E = BinOp->getLHS(); 7185 goto tryAgain; 7186 } 7187 } 7188 } 7189 7190 return SLCT_NotALiteral; 7191 } 7192 case Stmt::UnaryOperatorClass: { 7193 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 7194 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 7195 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 7196 Expr::EvalResult IndexResult; 7197 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context, 7198 Expr::SE_NoSideEffects, 7199 S.isConstantEvaluated())) { 7200 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add, 7201 /*RHS is int*/ true); 7202 E = ASE->getBase(); 7203 goto tryAgain; 7204 } 7205 } 7206 7207 return SLCT_NotALiteral; 7208 } 7209 7210 default: 7211 return SLCT_NotALiteral; 7212 } 7213 } 7214 7215 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 7216 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 7217 .Case("scanf", FST_Scanf) 7218 .Cases("printf", "printf0", FST_Printf) 7219 .Cases("NSString", "CFString", FST_NSString) 7220 .Case("strftime", FST_Strftime) 7221 .Case("strfmon", FST_Strfmon) 7222 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 7223 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 7224 .Case("os_trace", FST_OSLog) 7225 .Case("os_log", FST_OSLog) 7226 .Default(FST_Unknown); 7227 } 7228 7229 /// CheckFormatArguments - Check calls to printf and scanf (and similar 7230 /// functions) for correct use of format strings. 7231 /// Returns true if a format string has been fully checked. 7232 bool Sema::CheckFormatArguments(const FormatAttr *Format, 7233 ArrayRef<const Expr *> Args, 7234 bool IsCXXMember, 7235 VariadicCallType CallType, 7236 SourceLocation Loc, SourceRange Range, 7237 llvm::SmallBitVector &CheckedVarArgs) { 7238 FormatStringInfo FSI; 7239 if (getFormatStringInfo(Format, IsCXXMember, &FSI)) 7240 return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx, 7241 FSI.FirstDataArg, GetFormatStringType(Format), 7242 CallType, Loc, Range, CheckedVarArgs); 7243 return false; 7244 } 7245 7246 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 7247 bool HasVAListArg, unsigned format_idx, 7248 unsigned firstDataArg, FormatStringType Type, 7249 VariadicCallType CallType, 7250 SourceLocation Loc, SourceRange Range, 7251 llvm::SmallBitVector &CheckedVarArgs) { 7252 // CHECK: printf/scanf-like function is called with no format string. 7253 if (format_idx >= Args.size()) { 7254 Diag(Loc, diag::warn_missing_format_string) << Range; 7255 return false; 7256 } 7257 7258 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 7259 7260 // CHECK: format string is not a string literal. 7261 // 7262 // Dynamically generated format strings are difficult to 7263 // automatically vet at compile time. Requiring that format strings 7264 // are string literals: (1) permits the checking of format strings by 7265 // the compiler and thereby (2) can practically remove the source of 7266 // many format string exploits. 7267 7268 // Format string can be either ObjC string (e.g. @"%d") or 7269 // C string (e.g. "%d") 7270 // ObjC string uses the same format specifiers as C string, so we can use 7271 // the same format string checking logic for both ObjC and C strings. 7272 UncoveredArgHandler UncoveredArg; 7273 StringLiteralCheckType CT = 7274 checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg, 7275 format_idx, firstDataArg, Type, CallType, 7276 /*IsFunctionCall*/ true, CheckedVarArgs, 7277 UncoveredArg, 7278 /*no string offset*/ llvm::APSInt(64, false) = 0); 7279 7280 // Generate a diagnostic where an uncovered argument is detected. 7281 if (UncoveredArg.hasUncoveredArg()) { 7282 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 7283 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 7284 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 7285 } 7286 7287 if (CT != SLCT_NotALiteral) 7288 // Literal format string found, check done! 7289 return CT == SLCT_CheckedLiteral; 7290 7291 // Strftime is particular as it always uses a single 'time' argument, 7292 // so it is safe to pass a non-literal string. 7293 if (Type == FST_Strftime) 7294 return false; 7295 7296 // Do not emit diag when the string param is a macro expansion and the 7297 // format is either NSString or CFString. This is a hack to prevent 7298 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 7299 // which are usually used in place of NS and CF string literals. 7300 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 7301 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 7302 return false; 7303 7304 // If there are no arguments specified, warn with -Wformat-security, otherwise 7305 // warn only with -Wformat-nonliteral. 7306 if (Args.size() == firstDataArg) { 7307 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 7308 << OrigFormatExpr->getSourceRange(); 7309 switch (Type) { 7310 default: 7311 break; 7312 case FST_Kprintf: 7313 case FST_FreeBSDKPrintf: 7314 case FST_Printf: 7315 Diag(FormatLoc, diag::note_format_security_fixit) 7316 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 7317 break; 7318 case FST_NSString: 7319 Diag(FormatLoc, diag::note_format_security_fixit) 7320 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 7321 break; 7322 } 7323 } else { 7324 Diag(FormatLoc, diag::warn_format_nonliteral) 7325 << OrigFormatExpr->getSourceRange(); 7326 } 7327 return false; 7328 } 7329 7330 namespace { 7331 7332 class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 7333 protected: 7334 Sema &S; 7335 const FormatStringLiteral *FExpr; 7336 const Expr *OrigFormatExpr; 7337 const Sema::FormatStringType FSType; 7338 const unsigned FirstDataArg; 7339 const unsigned NumDataArgs; 7340 const char *Beg; // Start of format string. 7341 const bool HasVAListArg; 7342 ArrayRef<const Expr *> Args; 7343 unsigned FormatIdx; 7344 llvm::SmallBitVector CoveredArgs; 7345 bool usesPositionalArgs = false; 7346 bool atFirstArg = true; 7347 bool inFunctionCall; 7348 Sema::VariadicCallType CallType; 7349 llvm::SmallBitVector &CheckedVarArgs; 7350 UncoveredArgHandler &UncoveredArg; 7351 7352 public: 7353 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 7354 const Expr *origFormatExpr, 7355 const Sema::FormatStringType type, unsigned firstDataArg, 7356 unsigned numDataArgs, const char *beg, bool hasVAListArg, 7357 ArrayRef<const Expr *> Args, unsigned formatIdx, 7358 bool inFunctionCall, Sema::VariadicCallType callType, 7359 llvm::SmallBitVector &CheckedVarArgs, 7360 UncoveredArgHandler &UncoveredArg) 7361 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 7362 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 7363 HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx), 7364 inFunctionCall(inFunctionCall), CallType(callType), 7365 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 7366 CoveredArgs.resize(numDataArgs); 7367 CoveredArgs.reset(); 7368 } 7369 7370 void DoneProcessing(); 7371 7372 void HandleIncompleteSpecifier(const char *startSpecifier, 7373 unsigned specifierLen) override; 7374 7375 void HandleInvalidLengthModifier( 7376 const analyze_format_string::FormatSpecifier &FS, 7377 const analyze_format_string::ConversionSpecifier &CS, 7378 const char *startSpecifier, unsigned specifierLen, 7379 unsigned DiagID); 7380 7381 void HandleNonStandardLengthModifier( 7382 const analyze_format_string::FormatSpecifier &FS, 7383 const char *startSpecifier, unsigned specifierLen); 7384 7385 void HandleNonStandardConversionSpecifier( 7386 const analyze_format_string::ConversionSpecifier &CS, 7387 const char *startSpecifier, unsigned specifierLen); 7388 7389 void HandlePosition(const char *startPos, unsigned posLen) override; 7390 7391 void HandleInvalidPosition(const char *startSpecifier, 7392 unsigned specifierLen, 7393 analyze_format_string::PositionContext p) override; 7394 7395 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 7396 7397 void HandleNullChar(const char *nullCharacter) override; 7398 7399 template <typename Range> 7400 static void 7401 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 7402 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 7403 bool IsStringLocation, Range StringRange, 7404 ArrayRef<FixItHint> Fixit = None); 7405 7406 protected: 7407 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 7408 const char *startSpec, 7409 unsigned specifierLen, 7410 const char *csStart, unsigned csLen); 7411 7412 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 7413 const char *startSpec, 7414 unsigned specifierLen); 7415 7416 SourceRange getFormatStringRange(); 7417 CharSourceRange getSpecifierRange(const char *startSpecifier, 7418 unsigned specifierLen); 7419 SourceLocation getLocationOfByte(const char *x); 7420 7421 const Expr *getDataArg(unsigned i) const; 7422 7423 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 7424 const analyze_format_string::ConversionSpecifier &CS, 7425 const char *startSpecifier, unsigned specifierLen, 7426 unsigned argIndex); 7427 7428 template <typename Range> 7429 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 7430 bool IsStringLocation, Range StringRange, 7431 ArrayRef<FixItHint> Fixit = None); 7432 }; 7433 7434 } // namespace 7435 7436 SourceRange CheckFormatHandler::getFormatStringRange() { 7437 return OrigFormatExpr->getSourceRange(); 7438 } 7439 7440 CharSourceRange CheckFormatHandler:: 7441 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 7442 SourceLocation Start = getLocationOfByte(startSpecifier); 7443 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 7444 7445 // Advance the end SourceLocation by one due to half-open ranges. 7446 End = End.getLocWithOffset(1); 7447 7448 return CharSourceRange::getCharRange(Start, End); 7449 } 7450 7451 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 7452 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 7453 S.getLangOpts(), S.Context.getTargetInfo()); 7454 } 7455 7456 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 7457 unsigned specifierLen){ 7458 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 7459 getLocationOfByte(startSpecifier), 7460 /*IsStringLocation*/true, 7461 getSpecifierRange(startSpecifier, specifierLen)); 7462 } 7463 7464 void CheckFormatHandler::HandleInvalidLengthModifier( 7465 const analyze_format_string::FormatSpecifier &FS, 7466 const analyze_format_string::ConversionSpecifier &CS, 7467 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 7468 using namespace analyze_format_string; 7469 7470 const LengthModifier &LM = FS.getLengthModifier(); 7471 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 7472 7473 // See if we know how to fix this length modifier. 7474 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 7475 if (FixedLM) { 7476 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 7477 getLocationOfByte(LM.getStart()), 7478 /*IsStringLocation*/true, 7479 getSpecifierRange(startSpecifier, specifierLen)); 7480 7481 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 7482 << FixedLM->toString() 7483 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 7484 7485 } else { 7486 FixItHint Hint; 7487 if (DiagID == diag::warn_format_nonsensical_length) 7488 Hint = FixItHint::CreateRemoval(LMRange); 7489 7490 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 7491 getLocationOfByte(LM.getStart()), 7492 /*IsStringLocation*/true, 7493 getSpecifierRange(startSpecifier, specifierLen), 7494 Hint); 7495 } 7496 } 7497 7498 void CheckFormatHandler::HandleNonStandardLengthModifier( 7499 const analyze_format_string::FormatSpecifier &FS, 7500 const char *startSpecifier, unsigned specifierLen) { 7501 using namespace analyze_format_string; 7502 7503 const LengthModifier &LM = FS.getLengthModifier(); 7504 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 7505 7506 // See if we know how to fix this length modifier. 7507 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 7508 if (FixedLM) { 7509 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7510 << LM.toString() << 0, 7511 getLocationOfByte(LM.getStart()), 7512 /*IsStringLocation*/true, 7513 getSpecifierRange(startSpecifier, specifierLen)); 7514 7515 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 7516 << FixedLM->toString() 7517 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 7518 7519 } else { 7520 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7521 << LM.toString() << 0, 7522 getLocationOfByte(LM.getStart()), 7523 /*IsStringLocation*/true, 7524 getSpecifierRange(startSpecifier, specifierLen)); 7525 } 7526 } 7527 7528 void CheckFormatHandler::HandleNonStandardConversionSpecifier( 7529 const analyze_format_string::ConversionSpecifier &CS, 7530 const char *startSpecifier, unsigned specifierLen) { 7531 using namespace analyze_format_string; 7532 7533 // See if we know how to fix this conversion specifier. 7534 Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 7535 if (FixedCS) { 7536 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7537 << CS.toString() << /*conversion specifier*/1, 7538 getLocationOfByte(CS.getStart()), 7539 /*IsStringLocation*/true, 7540 getSpecifierRange(startSpecifier, specifierLen)); 7541 7542 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 7543 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 7544 << FixedCS->toString() 7545 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 7546 } else { 7547 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7548 << CS.toString() << /*conversion specifier*/1, 7549 getLocationOfByte(CS.getStart()), 7550 /*IsStringLocation*/true, 7551 getSpecifierRange(startSpecifier, specifierLen)); 7552 } 7553 } 7554 7555 void CheckFormatHandler::HandlePosition(const char *startPos, 7556 unsigned posLen) { 7557 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 7558 getLocationOfByte(startPos), 7559 /*IsStringLocation*/true, 7560 getSpecifierRange(startPos, posLen)); 7561 } 7562 7563 void 7564 CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, 7565 analyze_format_string::PositionContext p) { 7566 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) 7567 << (unsigned) p, 7568 getLocationOfByte(startPos), /*IsStringLocation*/true, 7569 getSpecifierRange(startPos, posLen)); 7570 } 7571 7572 void CheckFormatHandler::HandleZeroPosition(const char *startPos, 7573 unsigned posLen) { 7574 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 7575 getLocationOfByte(startPos), 7576 /*IsStringLocation*/true, 7577 getSpecifierRange(startPos, posLen)); 7578 } 7579 7580 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 7581 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 7582 // The presence of a null character is likely an error. 7583 EmitFormatDiagnostic( 7584 S.PDiag(diag::warn_printf_format_string_contains_null_char), 7585 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 7586 getFormatStringRange()); 7587 } 7588 } 7589 7590 // Note that this may return NULL if there was an error parsing or building 7591 // one of the argument expressions. 7592 const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 7593 return Args[FirstDataArg + i]; 7594 } 7595 7596 void CheckFormatHandler::DoneProcessing() { 7597 // Does the number of data arguments exceed the number of 7598 // format conversions in the format string? 7599 if (!HasVAListArg) { 7600 // Find any arguments that weren't covered. 7601 CoveredArgs.flip(); 7602 signed notCoveredArg = CoveredArgs.find_first(); 7603 if (notCoveredArg >= 0) { 7604 assert((unsigned)notCoveredArg < NumDataArgs); 7605 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 7606 } else { 7607 UncoveredArg.setAllCovered(); 7608 } 7609 } 7610 } 7611 7612 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 7613 const Expr *ArgExpr) { 7614 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 && 7615 "Invalid state"); 7616 7617 if (!ArgExpr) 7618 return; 7619 7620 SourceLocation Loc = ArgExpr->getBeginLoc(); 7621 7622 if (S.getSourceManager().isInSystemMacro(Loc)) 7623 return; 7624 7625 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 7626 for (auto E : DiagnosticExprs) 7627 PDiag << E->getSourceRange(); 7628 7629 CheckFormatHandler::EmitFormatDiagnostic( 7630 S, IsFunctionCall, DiagnosticExprs[0], 7631 PDiag, Loc, /*IsStringLocation*/false, 7632 DiagnosticExprs[0]->getSourceRange()); 7633 } 7634 7635 bool 7636 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 7637 SourceLocation Loc, 7638 const char *startSpec, 7639 unsigned specifierLen, 7640 const char *csStart, 7641 unsigned csLen) { 7642 bool keepGoing = true; 7643 if (argIndex < NumDataArgs) { 7644 // Consider the argument coverered, even though the specifier doesn't 7645 // make sense. 7646 CoveredArgs.set(argIndex); 7647 } 7648 else { 7649 // If argIndex exceeds the number of data arguments we 7650 // don't issue a warning because that is just a cascade of warnings (and 7651 // they may have intended '%%' anyway). We don't want to continue processing 7652 // the format string after this point, however, as we will like just get 7653 // gibberish when trying to match arguments. 7654 keepGoing = false; 7655 } 7656 7657 StringRef Specifier(csStart, csLen); 7658 7659 // If the specifier in non-printable, it could be the first byte of a UTF-8 7660 // sequence. In that case, print the UTF-8 code point. If not, print the byte 7661 // hex value. 7662 std::string CodePointStr; 7663 if (!llvm::sys::locale::isPrint(*csStart)) { 7664 llvm::UTF32 CodePoint; 7665 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 7666 const llvm::UTF8 *E = 7667 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 7668 llvm::ConversionResult Result = 7669 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 7670 7671 if (Result != llvm::conversionOK) { 7672 unsigned char FirstChar = *csStart; 7673 CodePoint = (llvm::UTF32)FirstChar; 7674 } 7675 7676 llvm::raw_string_ostream OS(CodePointStr); 7677 if (CodePoint < 256) 7678 OS << "\\x" << llvm::format("%02x", CodePoint); 7679 else if (CodePoint <= 0xFFFF) 7680 OS << "\\u" << llvm::format("%04x", CodePoint); 7681 else 7682 OS << "\\U" << llvm::format("%08x", CodePoint); 7683 OS.flush(); 7684 Specifier = CodePointStr; 7685 } 7686 7687 EmitFormatDiagnostic( 7688 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 7689 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 7690 7691 return keepGoing; 7692 } 7693 7694 void 7695 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 7696 const char *startSpec, 7697 unsigned specifierLen) { 7698 EmitFormatDiagnostic( 7699 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 7700 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 7701 } 7702 7703 bool 7704 CheckFormatHandler::CheckNumArgs( 7705 const analyze_format_string::FormatSpecifier &FS, 7706 const analyze_format_string::ConversionSpecifier &CS, 7707 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 7708 7709 if (argIndex >= NumDataArgs) { 7710 PartialDiagnostic PDiag = FS.usesPositionalArg() 7711 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 7712 << (argIndex+1) << NumDataArgs) 7713 : S.PDiag(diag::warn_printf_insufficient_data_args); 7714 EmitFormatDiagnostic( 7715 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 7716 getSpecifierRange(startSpecifier, specifierLen)); 7717 7718 // Since more arguments than conversion tokens are given, by extension 7719 // all arguments are covered, so mark this as so. 7720 UncoveredArg.setAllCovered(); 7721 return false; 7722 } 7723 return true; 7724 } 7725 7726 template<typename Range> 7727 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 7728 SourceLocation Loc, 7729 bool IsStringLocation, 7730 Range StringRange, 7731 ArrayRef<FixItHint> FixIt) { 7732 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 7733 Loc, IsStringLocation, StringRange, FixIt); 7734 } 7735 7736 /// If the format string is not within the function call, emit a note 7737 /// so that the function call and string are in diagnostic messages. 7738 /// 7739 /// \param InFunctionCall if true, the format string is within the function 7740 /// call and only one diagnostic message will be produced. Otherwise, an 7741 /// extra note will be emitted pointing to location of the format string. 7742 /// 7743 /// \param ArgumentExpr the expression that is passed as the format string 7744 /// argument in the function call. Used for getting locations when two 7745 /// diagnostics are emitted. 7746 /// 7747 /// \param PDiag the callee should already have provided any strings for the 7748 /// diagnostic message. This function only adds locations and fixits 7749 /// to diagnostics. 7750 /// 7751 /// \param Loc primary location for diagnostic. If two diagnostics are 7752 /// required, one will be at Loc and a new SourceLocation will be created for 7753 /// the other one. 7754 /// 7755 /// \param IsStringLocation if true, Loc points to the format string should be 7756 /// used for the note. Otherwise, Loc points to the argument list and will 7757 /// be used with PDiag. 7758 /// 7759 /// \param StringRange some or all of the string to highlight. This is 7760 /// templated so it can accept either a CharSourceRange or a SourceRange. 7761 /// 7762 /// \param FixIt optional fix it hint for the format string. 7763 template <typename Range> 7764 void CheckFormatHandler::EmitFormatDiagnostic( 7765 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 7766 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 7767 Range StringRange, ArrayRef<FixItHint> FixIt) { 7768 if (InFunctionCall) { 7769 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 7770 D << StringRange; 7771 D << FixIt; 7772 } else { 7773 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 7774 << ArgumentExpr->getSourceRange(); 7775 7776 const Sema::SemaDiagnosticBuilder &Note = 7777 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 7778 diag::note_format_string_defined); 7779 7780 Note << StringRange; 7781 Note << FixIt; 7782 } 7783 } 7784 7785 //===--- CHECK: Printf format string checking ------------------------------===// 7786 7787 namespace { 7788 7789 class CheckPrintfHandler : public CheckFormatHandler { 7790 public: 7791 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 7792 const Expr *origFormatExpr, 7793 const Sema::FormatStringType type, unsigned firstDataArg, 7794 unsigned numDataArgs, bool isObjC, const char *beg, 7795 bool hasVAListArg, ArrayRef<const Expr *> Args, 7796 unsigned formatIdx, bool inFunctionCall, 7797 Sema::VariadicCallType CallType, 7798 llvm::SmallBitVector &CheckedVarArgs, 7799 UncoveredArgHandler &UncoveredArg) 7800 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 7801 numDataArgs, beg, hasVAListArg, Args, formatIdx, 7802 inFunctionCall, CallType, CheckedVarArgs, 7803 UncoveredArg) {} 7804 7805 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 7806 7807 /// Returns true if '%@' specifiers are allowed in the format string. 7808 bool allowsObjCArg() const { 7809 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 7810 FSType == Sema::FST_OSTrace; 7811 } 7812 7813 bool HandleInvalidPrintfConversionSpecifier( 7814 const analyze_printf::PrintfSpecifier &FS, 7815 const char *startSpecifier, 7816 unsigned specifierLen) override; 7817 7818 void handleInvalidMaskType(StringRef MaskType) override; 7819 7820 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 7821 const char *startSpecifier, 7822 unsigned specifierLen) override; 7823 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 7824 const char *StartSpecifier, 7825 unsigned SpecifierLen, 7826 const Expr *E); 7827 7828 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 7829 const char *startSpecifier, unsigned specifierLen); 7830 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 7831 const analyze_printf::OptionalAmount &Amt, 7832 unsigned type, 7833 const char *startSpecifier, unsigned specifierLen); 7834 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 7835 const analyze_printf::OptionalFlag &flag, 7836 const char *startSpecifier, unsigned specifierLen); 7837 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 7838 const analyze_printf::OptionalFlag &ignoredFlag, 7839 const analyze_printf::OptionalFlag &flag, 7840 const char *startSpecifier, unsigned specifierLen); 7841 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 7842 const Expr *E); 7843 7844 void HandleEmptyObjCModifierFlag(const char *startFlag, 7845 unsigned flagLen) override; 7846 7847 void HandleInvalidObjCModifierFlag(const char *startFlag, 7848 unsigned flagLen) override; 7849 7850 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 7851 const char *flagsEnd, 7852 const char *conversionPosition) 7853 override; 7854 }; 7855 7856 } // namespace 7857 7858 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 7859 const analyze_printf::PrintfSpecifier &FS, 7860 const char *startSpecifier, 7861 unsigned specifierLen) { 7862 const analyze_printf::PrintfConversionSpecifier &CS = 7863 FS.getConversionSpecifier(); 7864 7865 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 7866 getLocationOfByte(CS.getStart()), 7867 startSpecifier, specifierLen, 7868 CS.getStart(), CS.getLength()); 7869 } 7870 7871 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 7872 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 7873 } 7874 7875 bool CheckPrintfHandler::HandleAmount( 7876 const analyze_format_string::OptionalAmount &Amt, 7877 unsigned k, const char *startSpecifier, 7878 unsigned specifierLen) { 7879 if (Amt.hasDataArgument()) { 7880 if (!HasVAListArg) { 7881 unsigned argIndex = Amt.getArgIndex(); 7882 if (argIndex >= NumDataArgs) { 7883 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 7884 << k, 7885 getLocationOfByte(Amt.getStart()), 7886 /*IsStringLocation*/true, 7887 getSpecifierRange(startSpecifier, specifierLen)); 7888 // Don't do any more checking. We will just emit 7889 // spurious errors. 7890 return false; 7891 } 7892 7893 // Type check the data argument. It should be an 'int'. 7894 // Although not in conformance with C99, we also allow the argument to be 7895 // an 'unsigned int' as that is a reasonably safe case. GCC also 7896 // doesn't emit a warning for that case. 7897 CoveredArgs.set(argIndex); 7898 const Expr *Arg = getDataArg(argIndex); 7899 if (!Arg) 7900 return false; 7901 7902 QualType T = Arg->getType(); 7903 7904 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 7905 assert(AT.isValid()); 7906 7907 if (!AT.matchesType(S.Context, T)) { 7908 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 7909 << k << AT.getRepresentativeTypeName(S.Context) 7910 << T << Arg->getSourceRange(), 7911 getLocationOfByte(Amt.getStart()), 7912 /*IsStringLocation*/true, 7913 getSpecifierRange(startSpecifier, specifierLen)); 7914 // Don't do any more checking. We will just emit 7915 // spurious errors. 7916 return false; 7917 } 7918 } 7919 } 7920 return true; 7921 } 7922 7923 void CheckPrintfHandler::HandleInvalidAmount( 7924 const analyze_printf::PrintfSpecifier &FS, 7925 const analyze_printf::OptionalAmount &Amt, 7926 unsigned type, 7927 const char *startSpecifier, 7928 unsigned specifierLen) { 7929 const analyze_printf::PrintfConversionSpecifier &CS = 7930 FS.getConversionSpecifier(); 7931 7932 FixItHint fixit = 7933 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 7934 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 7935 Amt.getConstantLength())) 7936 : FixItHint(); 7937 7938 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 7939 << type << CS.toString(), 7940 getLocationOfByte(Amt.getStart()), 7941 /*IsStringLocation*/true, 7942 getSpecifierRange(startSpecifier, specifierLen), 7943 fixit); 7944 } 7945 7946 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 7947 const analyze_printf::OptionalFlag &flag, 7948 const char *startSpecifier, 7949 unsigned specifierLen) { 7950 // Warn about pointless flag with a fixit removal. 7951 const analyze_printf::PrintfConversionSpecifier &CS = 7952 FS.getConversionSpecifier(); 7953 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 7954 << flag.toString() << CS.toString(), 7955 getLocationOfByte(flag.getPosition()), 7956 /*IsStringLocation*/true, 7957 getSpecifierRange(startSpecifier, specifierLen), 7958 FixItHint::CreateRemoval( 7959 getSpecifierRange(flag.getPosition(), 1))); 7960 } 7961 7962 void CheckPrintfHandler::HandleIgnoredFlag( 7963 const analyze_printf::PrintfSpecifier &FS, 7964 const analyze_printf::OptionalFlag &ignoredFlag, 7965 const analyze_printf::OptionalFlag &flag, 7966 const char *startSpecifier, 7967 unsigned specifierLen) { 7968 // Warn about ignored flag with a fixit removal. 7969 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 7970 << ignoredFlag.toString() << flag.toString(), 7971 getLocationOfByte(ignoredFlag.getPosition()), 7972 /*IsStringLocation*/true, 7973 getSpecifierRange(startSpecifier, specifierLen), 7974 FixItHint::CreateRemoval( 7975 getSpecifierRange(ignoredFlag.getPosition(), 1))); 7976 } 7977 7978 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 7979 unsigned flagLen) { 7980 // Warn about an empty flag. 7981 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 7982 getLocationOfByte(startFlag), 7983 /*IsStringLocation*/true, 7984 getSpecifierRange(startFlag, flagLen)); 7985 } 7986 7987 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 7988 unsigned flagLen) { 7989 // Warn about an invalid flag. 7990 auto Range = getSpecifierRange(startFlag, flagLen); 7991 StringRef flag(startFlag, flagLen); 7992 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 7993 getLocationOfByte(startFlag), 7994 /*IsStringLocation*/true, 7995 Range, FixItHint::CreateRemoval(Range)); 7996 } 7997 7998 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 7999 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 8000 // Warn about using '[...]' without a '@' conversion. 8001 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 8002 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 8003 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 8004 getLocationOfByte(conversionPosition), 8005 /*IsStringLocation*/true, 8006 Range, FixItHint::CreateRemoval(Range)); 8007 } 8008 8009 // Determines if the specified is a C++ class or struct containing 8010 // a member with the specified name and kind (e.g. a CXXMethodDecl named 8011 // "c_str()"). 8012 template<typename MemberKind> 8013 static llvm::SmallPtrSet<MemberKind*, 1> 8014 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 8015 const RecordType *RT = Ty->getAs<RecordType>(); 8016 llvm::SmallPtrSet<MemberKind*, 1> Results; 8017 8018 if (!RT) 8019 return Results; 8020 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 8021 if (!RD || !RD->getDefinition()) 8022 return Results; 8023 8024 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 8025 Sema::LookupMemberName); 8026 R.suppressDiagnostics(); 8027 8028 // We just need to include all members of the right kind turned up by the 8029 // filter, at this point. 8030 if (S.LookupQualifiedName(R, RT->getDecl())) 8031 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 8032 NamedDecl *decl = (*I)->getUnderlyingDecl(); 8033 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 8034 Results.insert(FK); 8035 } 8036 return Results; 8037 } 8038 8039 /// Check if we could call '.c_str()' on an object. 8040 /// 8041 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 8042 /// allow the call, or if it would be ambiguous). 8043 bool Sema::hasCStrMethod(const Expr *E) { 8044 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 8045 8046 MethodSet Results = 8047 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 8048 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 8049 MI != ME; ++MI) 8050 if ((*MI)->getMinRequiredArguments() == 0) 8051 return true; 8052 return false; 8053 } 8054 8055 // Check if a (w)string was passed when a (w)char* was needed, and offer a 8056 // better diagnostic if so. AT is assumed to be valid. 8057 // Returns true when a c_str() conversion method is found. 8058 bool CheckPrintfHandler::checkForCStrMembers( 8059 const analyze_printf::ArgType &AT, const Expr *E) { 8060 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 8061 8062 MethodSet Results = 8063 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 8064 8065 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 8066 MI != ME; ++MI) { 8067 const CXXMethodDecl *Method = *MI; 8068 if (Method->getMinRequiredArguments() == 0 && 8069 AT.matchesType(S.Context, Method->getReturnType())) { 8070 // FIXME: Suggest parens if the expression needs them. 8071 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 8072 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 8073 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 8074 return true; 8075 } 8076 } 8077 8078 return false; 8079 } 8080 8081 bool 8082 CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier 8083 &FS, 8084 const char *startSpecifier, 8085 unsigned specifierLen) { 8086 using namespace analyze_format_string; 8087 using namespace analyze_printf; 8088 8089 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 8090 8091 if (FS.consumesDataArgument()) { 8092 if (atFirstArg) { 8093 atFirstArg = false; 8094 usesPositionalArgs = FS.usesPositionalArg(); 8095 } 8096 else if (usesPositionalArgs != FS.usesPositionalArg()) { 8097 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 8098 startSpecifier, specifierLen); 8099 return false; 8100 } 8101 } 8102 8103 // First check if the field width, precision, and conversion specifier 8104 // have matching data arguments. 8105 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 8106 startSpecifier, specifierLen)) { 8107 return false; 8108 } 8109 8110 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 8111 startSpecifier, specifierLen)) { 8112 return false; 8113 } 8114 8115 if (!CS.consumesDataArgument()) { 8116 // FIXME: Technically specifying a precision or field width here 8117 // makes no sense. Worth issuing a warning at some point. 8118 return true; 8119 } 8120 8121 // Consume the argument. 8122 unsigned argIndex = FS.getArgIndex(); 8123 if (argIndex < NumDataArgs) { 8124 // The check to see if the argIndex is valid will come later. 8125 // We set the bit here because we may exit early from this 8126 // function if we encounter some other error. 8127 CoveredArgs.set(argIndex); 8128 } 8129 8130 // FreeBSD kernel extensions. 8131 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 8132 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 8133 // We need at least two arguments. 8134 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 8135 return false; 8136 8137 // Claim the second argument. 8138 CoveredArgs.set(argIndex + 1); 8139 8140 // Type check the first argument (int for %b, pointer for %D) 8141 const Expr *Ex = getDataArg(argIndex); 8142 const analyze_printf::ArgType &AT = 8143 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? 8144 ArgType(S.Context.IntTy) : ArgType::CPointerTy; 8145 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 8146 EmitFormatDiagnostic( 8147 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 8148 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 8149 << false << Ex->getSourceRange(), 8150 Ex->getBeginLoc(), /*IsStringLocation*/ false, 8151 getSpecifierRange(startSpecifier, specifierLen)); 8152 8153 // Type check the second argument (char * for both %b and %D) 8154 Ex = getDataArg(argIndex + 1); 8155 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 8156 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 8157 EmitFormatDiagnostic( 8158 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 8159 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 8160 << false << Ex->getSourceRange(), 8161 Ex->getBeginLoc(), /*IsStringLocation*/ false, 8162 getSpecifierRange(startSpecifier, specifierLen)); 8163 8164 return true; 8165 } 8166 8167 // Check for using an Objective-C specific conversion specifier 8168 // in a non-ObjC literal. 8169 if (!allowsObjCArg() && CS.isObjCArg()) { 8170 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 8171 specifierLen); 8172 } 8173 8174 // %P can only be used with os_log. 8175 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 8176 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 8177 specifierLen); 8178 } 8179 8180 // %n is not allowed with os_log. 8181 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 8182 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 8183 getLocationOfByte(CS.getStart()), 8184 /*IsStringLocation*/ false, 8185 getSpecifierRange(startSpecifier, specifierLen)); 8186 8187 return true; 8188 } 8189 8190 // Only scalars are allowed for os_trace. 8191 if (FSType == Sema::FST_OSTrace && 8192 (CS.getKind() == ConversionSpecifier::PArg || 8193 CS.getKind() == ConversionSpecifier::sArg || 8194 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 8195 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 8196 specifierLen); 8197 } 8198 8199 // Check for use of public/private annotation outside of os_log(). 8200 if (FSType != Sema::FST_OSLog) { 8201 if (FS.isPublic().isSet()) { 8202 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 8203 << "public", 8204 getLocationOfByte(FS.isPublic().getPosition()), 8205 /*IsStringLocation*/ false, 8206 getSpecifierRange(startSpecifier, specifierLen)); 8207 } 8208 if (FS.isPrivate().isSet()) { 8209 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 8210 << "private", 8211 getLocationOfByte(FS.isPrivate().getPosition()), 8212 /*IsStringLocation*/ false, 8213 getSpecifierRange(startSpecifier, specifierLen)); 8214 } 8215 } 8216 8217 // Check for invalid use of field width 8218 if (!FS.hasValidFieldWidth()) { 8219 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 8220 startSpecifier, specifierLen); 8221 } 8222 8223 // Check for invalid use of precision 8224 if (!FS.hasValidPrecision()) { 8225 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 8226 startSpecifier, specifierLen); 8227 } 8228 8229 // Precision is mandatory for %P specifier. 8230 if (CS.getKind() == ConversionSpecifier::PArg && 8231 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 8232 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 8233 getLocationOfByte(startSpecifier), 8234 /*IsStringLocation*/ false, 8235 getSpecifierRange(startSpecifier, specifierLen)); 8236 } 8237 8238 // Check each flag does not conflict with any other component. 8239 if (!FS.hasValidThousandsGroupingPrefix()) 8240 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 8241 if (!FS.hasValidLeadingZeros()) 8242 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 8243 if (!FS.hasValidPlusPrefix()) 8244 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 8245 if (!FS.hasValidSpacePrefix()) 8246 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 8247 if (!FS.hasValidAlternativeForm()) 8248 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 8249 if (!FS.hasValidLeftJustified()) 8250 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 8251 8252 // Check that flags are not ignored by another flag 8253 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 8254 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 8255 startSpecifier, specifierLen); 8256 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 8257 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 8258 startSpecifier, specifierLen); 8259 8260 // Check the length modifier is valid with the given conversion specifier. 8261 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 8262 S.getLangOpts())) 8263 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8264 diag::warn_format_nonsensical_length); 8265 else if (!FS.hasStandardLengthModifier()) 8266 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 8267 else if (!FS.hasStandardLengthConversionCombination()) 8268 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8269 diag::warn_format_non_standard_conversion_spec); 8270 8271 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 8272 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 8273 8274 // The remaining checks depend on the data arguments. 8275 if (HasVAListArg) 8276 return true; 8277 8278 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 8279 return false; 8280 8281 const Expr *Arg = getDataArg(argIndex); 8282 if (!Arg) 8283 return true; 8284 8285 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 8286 } 8287 8288 static bool requiresParensToAddCast(const Expr *E) { 8289 // FIXME: We should have a general way to reason about operator 8290 // precedence and whether parens are actually needed here. 8291 // Take care of a few common cases where they aren't. 8292 const Expr *Inside = E->IgnoreImpCasts(); 8293 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 8294 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 8295 8296 switch (Inside->getStmtClass()) { 8297 case Stmt::ArraySubscriptExprClass: 8298 case Stmt::CallExprClass: 8299 case Stmt::CharacterLiteralClass: 8300 case Stmt::CXXBoolLiteralExprClass: 8301 case Stmt::DeclRefExprClass: 8302 case Stmt::FloatingLiteralClass: 8303 case Stmt::IntegerLiteralClass: 8304 case Stmt::MemberExprClass: 8305 case Stmt::ObjCArrayLiteralClass: 8306 case Stmt::ObjCBoolLiteralExprClass: 8307 case Stmt::ObjCBoxedExprClass: 8308 case Stmt::ObjCDictionaryLiteralClass: 8309 case Stmt::ObjCEncodeExprClass: 8310 case Stmt::ObjCIvarRefExprClass: 8311 case Stmt::ObjCMessageExprClass: 8312 case Stmt::ObjCPropertyRefExprClass: 8313 case Stmt::ObjCStringLiteralClass: 8314 case Stmt::ObjCSubscriptRefExprClass: 8315 case Stmt::ParenExprClass: 8316 case Stmt::StringLiteralClass: 8317 case Stmt::UnaryOperatorClass: 8318 return false; 8319 default: 8320 return true; 8321 } 8322 } 8323 8324 static std::pair<QualType, StringRef> 8325 shouldNotPrintDirectly(const ASTContext &Context, 8326 QualType IntendedTy, 8327 const Expr *E) { 8328 // Use a 'while' to peel off layers of typedefs. 8329 QualType TyTy = IntendedTy; 8330 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 8331 StringRef Name = UserTy->getDecl()->getName(); 8332 QualType CastTy = llvm::StringSwitch<QualType>(Name) 8333 .Case("CFIndex", Context.getNSIntegerType()) 8334 .Case("NSInteger", Context.getNSIntegerType()) 8335 .Case("NSUInteger", Context.getNSUIntegerType()) 8336 .Case("SInt32", Context.IntTy) 8337 .Case("UInt32", Context.UnsignedIntTy) 8338 .Default(QualType()); 8339 8340 if (!CastTy.isNull()) 8341 return std::make_pair(CastTy, Name); 8342 8343 TyTy = UserTy->desugar(); 8344 } 8345 8346 // Strip parens if necessary. 8347 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 8348 return shouldNotPrintDirectly(Context, 8349 PE->getSubExpr()->getType(), 8350 PE->getSubExpr()); 8351 8352 // If this is a conditional expression, then its result type is constructed 8353 // via usual arithmetic conversions and thus there might be no necessary 8354 // typedef sugar there. Recurse to operands to check for NSInteger & 8355 // Co. usage condition. 8356 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 8357 QualType TrueTy, FalseTy; 8358 StringRef TrueName, FalseName; 8359 8360 std::tie(TrueTy, TrueName) = 8361 shouldNotPrintDirectly(Context, 8362 CO->getTrueExpr()->getType(), 8363 CO->getTrueExpr()); 8364 std::tie(FalseTy, FalseName) = 8365 shouldNotPrintDirectly(Context, 8366 CO->getFalseExpr()->getType(), 8367 CO->getFalseExpr()); 8368 8369 if (TrueTy == FalseTy) 8370 return std::make_pair(TrueTy, TrueName); 8371 else if (TrueTy.isNull()) 8372 return std::make_pair(FalseTy, FalseName); 8373 else if (FalseTy.isNull()) 8374 return std::make_pair(TrueTy, TrueName); 8375 } 8376 8377 return std::make_pair(QualType(), StringRef()); 8378 } 8379 8380 /// Return true if \p ICE is an implicit argument promotion of an arithmetic 8381 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked 8382 /// type do not count. 8383 static bool 8384 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { 8385 QualType From = ICE->getSubExpr()->getType(); 8386 QualType To = ICE->getType(); 8387 // It's an integer promotion if the destination type is the promoted 8388 // source type. 8389 if (ICE->getCastKind() == CK_IntegralCast && 8390 From->isPromotableIntegerType() && 8391 S.Context.getPromotedIntegerType(From) == To) 8392 return true; 8393 // Look through vector types, since we do default argument promotion for 8394 // those in OpenCL. 8395 if (const auto *VecTy = From->getAs<ExtVectorType>()) 8396 From = VecTy->getElementType(); 8397 if (const auto *VecTy = To->getAs<ExtVectorType>()) 8398 To = VecTy->getElementType(); 8399 // It's a floating promotion if the source type is a lower rank. 8400 return ICE->getCastKind() == CK_FloatingCast && 8401 S.Context.getFloatingTypeOrder(From, To) < 0; 8402 } 8403 8404 bool 8405 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 8406 const char *StartSpecifier, 8407 unsigned SpecifierLen, 8408 const Expr *E) { 8409 using namespace analyze_format_string; 8410 using namespace analyze_printf; 8411 8412 // Now type check the data expression that matches the 8413 // format specifier. 8414 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 8415 if (!AT.isValid()) 8416 return true; 8417 8418 QualType ExprTy = E->getType(); 8419 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 8420 ExprTy = TET->getUnderlyingExpr()->getType(); 8421 } 8422 8423 // Diagnose attempts to print a boolean value as a character. Unlike other 8424 // -Wformat diagnostics, this is fine from a type perspective, but it still 8425 // doesn't make sense. 8426 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg && 8427 E->isKnownToHaveBooleanValue()) { 8428 const CharSourceRange &CSR = 8429 getSpecifierRange(StartSpecifier, SpecifierLen); 8430 SmallString<4> FSString; 8431 llvm::raw_svector_ostream os(FSString); 8432 FS.toString(os); 8433 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character) 8434 << FSString, 8435 E->getExprLoc(), false, CSR); 8436 return true; 8437 } 8438 8439 analyze_printf::ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy); 8440 if (Match == analyze_printf::ArgType::Match) 8441 return true; 8442 8443 // Look through argument promotions for our error message's reported type. 8444 // This includes the integral and floating promotions, but excludes array 8445 // and function pointer decay (seeing that an argument intended to be a 8446 // string has type 'char [6]' is probably more confusing than 'char *') and 8447 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type). 8448 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 8449 if (isArithmeticArgumentPromotion(S, ICE)) { 8450 E = ICE->getSubExpr(); 8451 ExprTy = E->getType(); 8452 8453 // Check if we didn't match because of an implicit cast from a 'char' 8454 // or 'short' to an 'int'. This is done because printf is a varargs 8455 // function. 8456 if (ICE->getType() == S.Context.IntTy || 8457 ICE->getType() == S.Context.UnsignedIntTy) { 8458 // All further checking is done on the subexpression 8459 const analyze_printf::ArgType::MatchKind ImplicitMatch = 8460 AT.matchesType(S.Context, ExprTy); 8461 if (ImplicitMatch == analyze_printf::ArgType::Match) 8462 return true; 8463 if (ImplicitMatch == ArgType::NoMatchPedantic || 8464 ImplicitMatch == ArgType::NoMatchTypeConfusion) 8465 Match = ImplicitMatch; 8466 } 8467 } 8468 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 8469 // Special case for 'a', which has type 'int' in C. 8470 // Note, however, that we do /not/ want to treat multibyte constants like 8471 // 'MooV' as characters! This form is deprecated but still exists. 8472 if (ExprTy == S.Context.IntTy) 8473 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) 8474 ExprTy = S.Context.CharTy; 8475 } 8476 8477 // Look through enums to their underlying type. 8478 bool IsEnum = false; 8479 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 8480 ExprTy = EnumTy->getDecl()->getIntegerType(); 8481 IsEnum = true; 8482 } 8483 8484 // %C in an Objective-C context prints a unichar, not a wchar_t. 8485 // If the argument is an integer of some kind, believe the %C and suggest 8486 // a cast instead of changing the conversion specifier. 8487 QualType IntendedTy = ExprTy; 8488 if (isObjCContext() && 8489 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 8490 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 8491 !ExprTy->isCharType()) { 8492 // 'unichar' is defined as a typedef of unsigned short, but we should 8493 // prefer using the typedef if it is visible. 8494 IntendedTy = S.Context.UnsignedShortTy; 8495 8496 // While we are here, check if the value is an IntegerLiteral that happens 8497 // to be within the valid range. 8498 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 8499 const llvm::APInt &V = IL->getValue(); 8500 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 8501 return true; 8502 } 8503 8504 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 8505 Sema::LookupOrdinaryName); 8506 if (S.LookupName(Result, S.getCurScope())) { 8507 NamedDecl *ND = Result.getFoundDecl(); 8508 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 8509 if (TD->getUnderlyingType() == IntendedTy) 8510 IntendedTy = S.Context.getTypedefType(TD); 8511 } 8512 } 8513 } 8514 8515 // Special-case some of Darwin's platform-independence types by suggesting 8516 // casts to primitive types that are known to be large enough. 8517 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 8518 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 8519 QualType CastTy; 8520 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 8521 if (!CastTy.isNull()) { 8522 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 8523 // (long in ASTContext). Only complain to pedants. 8524 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 8525 (AT.isSizeT() || AT.isPtrdiffT()) && 8526 AT.matchesType(S.Context, CastTy)) 8527 Match = ArgType::NoMatchPedantic; 8528 IntendedTy = CastTy; 8529 ShouldNotPrintDirectly = true; 8530 } 8531 } 8532 8533 // We may be able to offer a FixItHint if it is a supported type. 8534 PrintfSpecifier fixedFS = FS; 8535 bool Success = 8536 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 8537 8538 if (Success) { 8539 // Get the fix string from the fixed format specifier 8540 SmallString<16> buf; 8541 llvm::raw_svector_ostream os(buf); 8542 fixedFS.toString(os); 8543 8544 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 8545 8546 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) { 8547 unsigned Diag; 8548 switch (Match) { 8549 case ArgType::Match: llvm_unreachable("expected non-matching"); 8550 case ArgType::NoMatchPedantic: 8551 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 8552 break; 8553 case ArgType::NoMatchTypeConfusion: 8554 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 8555 break; 8556 case ArgType::NoMatch: 8557 Diag = diag::warn_format_conversion_argument_type_mismatch; 8558 break; 8559 } 8560 8561 // In this case, the specifier is wrong and should be changed to match 8562 // the argument. 8563 EmitFormatDiagnostic(S.PDiag(Diag) 8564 << AT.getRepresentativeTypeName(S.Context) 8565 << IntendedTy << IsEnum << E->getSourceRange(), 8566 E->getBeginLoc(), 8567 /*IsStringLocation*/ false, SpecRange, 8568 FixItHint::CreateReplacement(SpecRange, os.str())); 8569 } else { 8570 // The canonical type for formatting this value is different from the 8571 // actual type of the expression. (This occurs, for example, with Darwin's 8572 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 8573 // should be printed as 'long' for 64-bit compatibility.) 8574 // Rather than emitting a normal format/argument mismatch, we want to 8575 // add a cast to the recommended type (and correct the format string 8576 // if necessary). 8577 SmallString<16> CastBuf; 8578 llvm::raw_svector_ostream CastFix(CastBuf); 8579 CastFix << "("; 8580 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 8581 CastFix << ")"; 8582 8583 SmallVector<FixItHint,4> Hints; 8584 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly) 8585 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 8586 8587 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 8588 // If there's already a cast present, just replace it. 8589 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 8590 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 8591 8592 } else if (!requiresParensToAddCast(E)) { 8593 // If the expression has high enough precedence, 8594 // just write the C-style cast. 8595 Hints.push_back( 8596 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 8597 } else { 8598 // Otherwise, add parens around the expression as well as the cast. 8599 CastFix << "("; 8600 Hints.push_back( 8601 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 8602 8603 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc()); 8604 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 8605 } 8606 8607 if (ShouldNotPrintDirectly) { 8608 // The expression has a type that should not be printed directly. 8609 // We extract the name from the typedef because we don't want to show 8610 // the underlying type in the diagnostic. 8611 StringRef Name; 8612 if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy)) 8613 Name = TypedefTy->getDecl()->getName(); 8614 else 8615 Name = CastTyName; 8616 unsigned Diag = Match == ArgType::NoMatchPedantic 8617 ? diag::warn_format_argument_needs_cast_pedantic 8618 : diag::warn_format_argument_needs_cast; 8619 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 8620 << E->getSourceRange(), 8621 E->getBeginLoc(), /*IsStringLocation=*/false, 8622 SpecRange, Hints); 8623 } else { 8624 // In this case, the expression could be printed using a different 8625 // specifier, but we've decided that the specifier is probably correct 8626 // and we should cast instead. Just use the normal warning message. 8627 EmitFormatDiagnostic( 8628 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 8629 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 8630 << E->getSourceRange(), 8631 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 8632 } 8633 } 8634 } else { 8635 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 8636 SpecifierLen); 8637 // Since the warning for passing non-POD types to variadic functions 8638 // was deferred until now, we emit a warning for non-POD 8639 // arguments here. 8640 switch (S.isValidVarArgType(ExprTy)) { 8641 case Sema::VAK_Valid: 8642 case Sema::VAK_ValidInCXX11: { 8643 unsigned Diag; 8644 switch (Match) { 8645 case ArgType::Match: llvm_unreachable("expected non-matching"); 8646 case ArgType::NoMatchPedantic: 8647 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 8648 break; 8649 case ArgType::NoMatchTypeConfusion: 8650 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 8651 break; 8652 case ArgType::NoMatch: 8653 Diag = diag::warn_format_conversion_argument_type_mismatch; 8654 break; 8655 } 8656 8657 EmitFormatDiagnostic( 8658 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 8659 << IsEnum << CSR << E->getSourceRange(), 8660 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 8661 break; 8662 } 8663 case Sema::VAK_Undefined: 8664 case Sema::VAK_MSVCUndefined: 8665 EmitFormatDiagnostic(S.PDiag(diag::warn_non_pod_vararg_with_format_string) 8666 << S.getLangOpts().CPlusPlus11 << ExprTy 8667 << CallType 8668 << AT.getRepresentativeTypeName(S.Context) << CSR 8669 << E->getSourceRange(), 8670 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 8671 checkForCStrMembers(AT, E); 8672 break; 8673 8674 case Sema::VAK_Invalid: 8675 if (ExprTy->isObjCObjectType()) 8676 EmitFormatDiagnostic( 8677 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 8678 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 8679 << AT.getRepresentativeTypeName(S.Context) << CSR 8680 << E->getSourceRange(), 8681 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 8682 else 8683 // FIXME: If this is an initializer list, suggest removing the braces 8684 // or inserting a cast to the target type. 8685 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 8686 << isa<InitListExpr>(E) << ExprTy << CallType 8687 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 8688 break; 8689 } 8690 8691 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 8692 "format string specifier index out of range"); 8693 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 8694 } 8695 8696 return true; 8697 } 8698 8699 //===--- CHECK: Scanf format string checking ------------------------------===// 8700 8701 namespace { 8702 8703 class CheckScanfHandler : public CheckFormatHandler { 8704 public: 8705 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 8706 const Expr *origFormatExpr, Sema::FormatStringType type, 8707 unsigned firstDataArg, unsigned numDataArgs, 8708 const char *beg, bool hasVAListArg, 8709 ArrayRef<const Expr *> Args, unsigned formatIdx, 8710 bool inFunctionCall, Sema::VariadicCallType CallType, 8711 llvm::SmallBitVector &CheckedVarArgs, 8712 UncoveredArgHandler &UncoveredArg) 8713 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 8714 numDataArgs, beg, hasVAListArg, Args, formatIdx, 8715 inFunctionCall, CallType, CheckedVarArgs, 8716 UncoveredArg) {} 8717 8718 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 8719 const char *startSpecifier, 8720 unsigned specifierLen) override; 8721 8722 bool HandleInvalidScanfConversionSpecifier( 8723 const analyze_scanf::ScanfSpecifier &FS, 8724 const char *startSpecifier, 8725 unsigned specifierLen) override; 8726 8727 void HandleIncompleteScanList(const char *start, const char *end) override; 8728 }; 8729 8730 } // namespace 8731 8732 void CheckScanfHandler::HandleIncompleteScanList(const char *start, 8733 const char *end) { 8734 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 8735 getLocationOfByte(end), /*IsStringLocation*/true, 8736 getSpecifierRange(start, end - start)); 8737 } 8738 8739 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 8740 const analyze_scanf::ScanfSpecifier &FS, 8741 const char *startSpecifier, 8742 unsigned specifierLen) { 8743 const analyze_scanf::ScanfConversionSpecifier &CS = 8744 FS.getConversionSpecifier(); 8745 8746 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 8747 getLocationOfByte(CS.getStart()), 8748 startSpecifier, specifierLen, 8749 CS.getStart(), CS.getLength()); 8750 } 8751 8752 bool CheckScanfHandler::HandleScanfSpecifier( 8753 const analyze_scanf::ScanfSpecifier &FS, 8754 const char *startSpecifier, 8755 unsigned specifierLen) { 8756 using namespace analyze_scanf; 8757 using namespace analyze_format_string; 8758 8759 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 8760 8761 // Handle case where '%' and '*' don't consume an argument. These shouldn't 8762 // be used to decide if we are using positional arguments consistently. 8763 if (FS.consumesDataArgument()) { 8764 if (atFirstArg) { 8765 atFirstArg = false; 8766 usesPositionalArgs = FS.usesPositionalArg(); 8767 } 8768 else if (usesPositionalArgs != FS.usesPositionalArg()) { 8769 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 8770 startSpecifier, specifierLen); 8771 return false; 8772 } 8773 } 8774 8775 // Check if the field with is non-zero. 8776 const OptionalAmount &Amt = FS.getFieldWidth(); 8777 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 8778 if (Amt.getConstantAmount() == 0) { 8779 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 8780 Amt.getConstantLength()); 8781 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 8782 getLocationOfByte(Amt.getStart()), 8783 /*IsStringLocation*/true, R, 8784 FixItHint::CreateRemoval(R)); 8785 } 8786 } 8787 8788 if (!FS.consumesDataArgument()) { 8789 // FIXME: Technically specifying a precision or field width here 8790 // makes no sense. Worth issuing a warning at some point. 8791 return true; 8792 } 8793 8794 // Consume the argument. 8795 unsigned argIndex = FS.getArgIndex(); 8796 if (argIndex < NumDataArgs) { 8797 // The check to see if the argIndex is valid will come later. 8798 // We set the bit here because we may exit early from this 8799 // function if we encounter some other error. 8800 CoveredArgs.set(argIndex); 8801 } 8802 8803 // Check the length modifier is valid with the given conversion specifier. 8804 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 8805 S.getLangOpts())) 8806 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8807 diag::warn_format_nonsensical_length); 8808 else if (!FS.hasStandardLengthModifier()) 8809 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 8810 else if (!FS.hasStandardLengthConversionCombination()) 8811 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8812 diag::warn_format_non_standard_conversion_spec); 8813 8814 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 8815 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 8816 8817 // The remaining checks depend on the data arguments. 8818 if (HasVAListArg) 8819 return true; 8820 8821 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 8822 return false; 8823 8824 // Check that the argument type matches the format specifier. 8825 const Expr *Ex = getDataArg(argIndex); 8826 if (!Ex) 8827 return true; 8828 8829 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 8830 8831 if (!AT.isValid()) { 8832 return true; 8833 } 8834 8835 analyze_format_string::ArgType::MatchKind Match = 8836 AT.matchesType(S.Context, Ex->getType()); 8837 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 8838 if (Match == analyze_format_string::ArgType::Match) 8839 return true; 8840 8841 ScanfSpecifier fixedFS = FS; 8842 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 8843 S.getLangOpts(), S.Context); 8844 8845 unsigned Diag = 8846 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 8847 : diag::warn_format_conversion_argument_type_mismatch; 8848 8849 if (Success) { 8850 // Get the fix string from the fixed format specifier. 8851 SmallString<128> buf; 8852 llvm::raw_svector_ostream os(buf); 8853 fixedFS.toString(os); 8854 8855 EmitFormatDiagnostic( 8856 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 8857 << Ex->getType() << false << Ex->getSourceRange(), 8858 Ex->getBeginLoc(), 8859 /*IsStringLocation*/ false, 8860 getSpecifierRange(startSpecifier, specifierLen), 8861 FixItHint::CreateReplacement( 8862 getSpecifierRange(startSpecifier, specifierLen), os.str())); 8863 } else { 8864 EmitFormatDiagnostic(S.PDiag(Diag) 8865 << AT.getRepresentativeTypeName(S.Context) 8866 << Ex->getType() << false << Ex->getSourceRange(), 8867 Ex->getBeginLoc(), 8868 /*IsStringLocation*/ false, 8869 getSpecifierRange(startSpecifier, specifierLen)); 8870 } 8871 8872 return true; 8873 } 8874 8875 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 8876 const Expr *OrigFormatExpr, 8877 ArrayRef<const Expr *> Args, 8878 bool HasVAListArg, unsigned format_idx, 8879 unsigned firstDataArg, 8880 Sema::FormatStringType Type, 8881 bool inFunctionCall, 8882 Sema::VariadicCallType CallType, 8883 llvm::SmallBitVector &CheckedVarArgs, 8884 UncoveredArgHandler &UncoveredArg, 8885 bool IgnoreStringsWithoutSpecifiers) { 8886 // CHECK: is the format string a wide literal? 8887 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 8888 CheckFormatHandler::EmitFormatDiagnostic( 8889 S, inFunctionCall, Args[format_idx], 8890 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 8891 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 8892 return; 8893 } 8894 8895 // Str - The format string. NOTE: this is NOT null-terminated! 8896 StringRef StrRef = FExpr->getString(); 8897 const char *Str = StrRef.data(); 8898 // Account for cases where the string literal is truncated in a declaration. 8899 const ConstantArrayType *T = 8900 S.Context.getAsConstantArrayType(FExpr->getType()); 8901 assert(T && "String literal not of constant array type!"); 8902 size_t TypeSize = T->getSize().getZExtValue(); 8903 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 8904 const unsigned numDataArgs = Args.size() - firstDataArg; 8905 8906 if (IgnoreStringsWithoutSpecifiers && 8907 !analyze_format_string::parseFormatStringHasFormattingSpecifiers( 8908 Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 8909 return; 8910 8911 // Emit a warning if the string literal is truncated and does not contain an 8912 // embedded null character. 8913 if (TypeSize <= StrRef.size() && 8914 StrRef.substr(0, TypeSize).find('\0') == StringRef::npos) { 8915 CheckFormatHandler::EmitFormatDiagnostic( 8916 S, inFunctionCall, Args[format_idx], 8917 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 8918 FExpr->getBeginLoc(), 8919 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 8920 return; 8921 } 8922 8923 // CHECK: empty format string? 8924 if (StrLen == 0 && numDataArgs > 0) { 8925 CheckFormatHandler::EmitFormatDiagnostic( 8926 S, inFunctionCall, Args[format_idx], 8927 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 8928 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 8929 return; 8930 } 8931 8932 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 8933 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog || 8934 Type == Sema::FST_OSTrace) { 8935 CheckPrintfHandler H( 8936 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 8937 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, 8938 HasVAListArg, Args, format_idx, inFunctionCall, CallType, 8939 CheckedVarArgs, UncoveredArg); 8940 8941 if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen, 8942 S.getLangOpts(), 8943 S.Context.getTargetInfo(), 8944 Type == Sema::FST_FreeBSDKPrintf)) 8945 H.DoneProcessing(); 8946 } else if (Type == Sema::FST_Scanf) { 8947 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 8948 numDataArgs, Str, HasVAListArg, Args, format_idx, 8949 inFunctionCall, CallType, CheckedVarArgs, UncoveredArg); 8950 8951 if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen, 8952 S.getLangOpts(), 8953 S.Context.getTargetInfo())) 8954 H.DoneProcessing(); 8955 } // TODO: handle other formats 8956 } 8957 8958 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 8959 // Str - The format string. NOTE: this is NOT null-terminated! 8960 StringRef StrRef = FExpr->getString(); 8961 const char *Str = StrRef.data(); 8962 // Account for cases where the string literal is truncated in a declaration. 8963 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 8964 assert(T && "String literal not of constant array type!"); 8965 size_t TypeSize = T->getSize().getZExtValue(); 8966 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 8967 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 8968 getLangOpts(), 8969 Context.getTargetInfo()); 8970 } 8971 8972 //===--- CHECK: Warn on use of wrong absolute value function. -------------===// 8973 8974 // Returns the related absolute value function that is larger, of 0 if one 8975 // does not exist. 8976 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 8977 switch (AbsFunction) { 8978 default: 8979 return 0; 8980 8981 case Builtin::BI__builtin_abs: 8982 return Builtin::BI__builtin_labs; 8983 case Builtin::BI__builtin_labs: 8984 return Builtin::BI__builtin_llabs; 8985 case Builtin::BI__builtin_llabs: 8986 return 0; 8987 8988 case Builtin::BI__builtin_fabsf: 8989 return Builtin::BI__builtin_fabs; 8990 case Builtin::BI__builtin_fabs: 8991 return Builtin::BI__builtin_fabsl; 8992 case Builtin::BI__builtin_fabsl: 8993 return 0; 8994 8995 case Builtin::BI__builtin_cabsf: 8996 return Builtin::BI__builtin_cabs; 8997 case Builtin::BI__builtin_cabs: 8998 return Builtin::BI__builtin_cabsl; 8999 case Builtin::BI__builtin_cabsl: 9000 return 0; 9001 9002 case Builtin::BIabs: 9003 return Builtin::BIlabs; 9004 case Builtin::BIlabs: 9005 return Builtin::BIllabs; 9006 case Builtin::BIllabs: 9007 return 0; 9008 9009 case Builtin::BIfabsf: 9010 return Builtin::BIfabs; 9011 case Builtin::BIfabs: 9012 return Builtin::BIfabsl; 9013 case Builtin::BIfabsl: 9014 return 0; 9015 9016 case Builtin::BIcabsf: 9017 return Builtin::BIcabs; 9018 case Builtin::BIcabs: 9019 return Builtin::BIcabsl; 9020 case Builtin::BIcabsl: 9021 return 0; 9022 } 9023 } 9024 9025 // Returns the argument type of the absolute value function. 9026 static QualType getAbsoluteValueArgumentType(ASTContext &Context, 9027 unsigned AbsType) { 9028 if (AbsType == 0) 9029 return QualType(); 9030 9031 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 9032 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 9033 if (Error != ASTContext::GE_None) 9034 return QualType(); 9035 9036 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 9037 if (!FT) 9038 return QualType(); 9039 9040 if (FT->getNumParams() != 1) 9041 return QualType(); 9042 9043 return FT->getParamType(0); 9044 } 9045 9046 // Returns the best absolute value function, or zero, based on type and 9047 // current absolute value function. 9048 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 9049 unsigned AbsFunctionKind) { 9050 unsigned BestKind = 0; 9051 uint64_t ArgSize = Context.getTypeSize(ArgType); 9052 for (unsigned Kind = AbsFunctionKind; Kind != 0; 9053 Kind = getLargerAbsoluteValueFunction(Kind)) { 9054 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 9055 if (Context.getTypeSize(ParamType) >= ArgSize) { 9056 if (BestKind == 0) 9057 BestKind = Kind; 9058 else if (Context.hasSameType(ParamType, ArgType)) { 9059 BestKind = Kind; 9060 break; 9061 } 9062 } 9063 } 9064 return BestKind; 9065 } 9066 9067 enum AbsoluteValueKind { 9068 AVK_Integer, 9069 AVK_Floating, 9070 AVK_Complex 9071 }; 9072 9073 static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 9074 if (T->isIntegralOrEnumerationType()) 9075 return AVK_Integer; 9076 if (T->isRealFloatingType()) 9077 return AVK_Floating; 9078 if (T->isAnyComplexType()) 9079 return AVK_Complex; 9080 9081 llvm_unreachable("Type not integer, floating, or complex"); 9082 } 9083 9084 // Changes the absolute value function to a different type. Preserves whether 9085 // the function is a builtin. 9086 static unsigned changeAbsFunction(unsigned AbsKind, 9087 AbsoluteValueKind ValueKind) { 9088 switch (ValueKind) { 9089 case AVK_Integer: 9090 switch (AbsKind) { 9091 default: 9092 return 0; 9093 case Builtin::BI__builtin_fabsf: 9094 case Builtin::BI__builtin_fabs: 9095 case Builtin::BI__builtin_fabsl: 9096 case Builtin::BI__builtin_cabsf: 9097 case Builtin::BI__builtin_cabs: 9098 case Builtin::BI__builtin_cabsl: 9099 return Builtin::BI__builtin_abs; 9100 case Builtin::BIfabsf: 9101 case Builtin::BIfabs: 9102 case Builtin::BIfabsl: 9103 case Builtin::BIcabsf: 9104 case Builtin::BIcabs: 9105 case Builtin::BIcabsl: 9106 return Builtin::BIabs; 9107 } 9108 case AVK_Floating: 9109 switch (AbsKind) { 9110 default: 9111 return 0; 9112 case Builtin::BI__builtin_abs: 9113 case Builtin::BI__builtin_labs: 9114 case Builtin::BI__builtin_llabs: 9115 case Builtin::BI__builtin_cabsf: 9116 case Builtin::BI__builtin_cabs: 9117 case Builtin::BI__builtin_cabsl: 9118 return Builtin::BI__builtin_fabsf; 9119 case Builtin::BIabs: 9120 case Builtin::BIlabs: 9121 case Builtin::BIllabs: 9122 case Builtin::BIcabsf: 9123 case Builtin::BIcabs: 9124 case Builtin::BIcabsl: 9125 return Builtin::BIfabsf; 9126 } 9127 case AVK_Complex: 9128 switch (AbsKind) { 9129 default: 9130 return 0; 9131 case Builtin::BI__builtin_abs: 9132 case Builtin::BI__builtin_labs: 9133 case Builtin::BI__builtin_llabs: 9134 case Builtin::BI__builtin_fabsf: 9135 case Builtin::BI__builtin_fabs: 9136 case Builtin::BI__builtin_fabsl: 9137 return Builtin::BI__builtin_cabsf; 9138 case Builtin::BIabs: 9139 case Builtin::BIlabs: 9140 case Builtin::BIllabs: 9141 case Builtin::BIfabsf: 9142 case Builtin::BIfabs: 9143 case Builtin::BIfabsl: 9144 return Builtin::BIcabsf; 9145 } 9146 } 9147 llvm_unreachable("Unable to convert function"); 9148 } 9149 9150 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 9151 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 9152 if (!FnInfo) 9153 return 0; 9154 9155 switch (FDecl->getBuiltinID()) { 9156 default: 9157 return 0; 9158 case Builtin::BI__builtin_abs: 9159 case Builtin::BI__builtin_fabs: 9160 case Builtin::BI__builtin_fabsf: 9161 case Builtin::BI__builtin_fabsl: 9162 case Builtin::BI__builtin_labs: 9163 case Builtin::BI__builtin_llabs: 9164 case Builtin::BI__builtin_cabs: 9165 case Builtin::BI__builtin_cabsf: 9166 case Builtin::BI__builtin_cabsl: 9167 case Builtin::BIabs: 9168 case Builtin::BIlabs: 9169 case Builtin::BIllabs: 9170 case Builtin::BIfabs: 9171 case Builtin::BIfabsf: 9172 case Builtin::BIfabsl: 9173 case Builtin::BIcabs: 9174 case Builtin::BIcabsf: 9175 case Builtin::BIcabsl: 9176 return FDecl->getBuiltinID(); 9177 } 9178 llvm_unreachable("Unknown Builtin type"); 9179 } 9180 9181 // If the replacement is valid, emit a note with replacement function. 9182 // Additionally, suggest including the proper header if not already included. 9183 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 9184 unsigned AbsKind, QualType ArgType) { 9185 bool EmitHeaderHint = true; 9186 const char *HeaderName = nullptr; 9187 const char *FunctionName = nullptr; 9188 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 9189 FunctionName = "std::abs"; 9190 if (ArgType->isIntegralOrEnumerationType()) { 9191 HeaderName = "cstdlib"; 9192 } else if (ArgType->isRealFloatingType()) { 9193 HeaderName = "cmath"; 9194 } else { 9195 llvm_unreachable("Invalid Type"); 9196 } 9197 9198 // Lookup all std::abs 9199 if (NamespaceDecl *Std = S.getStdNamespace()) { 9200 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 9201 R.suppressDiagnostics(); 9202 S.LookupQualifiedName(R, Std); 9203 9204 for (const auto *I : R) { 9205 const FunctionDecl *FDecl = nullptr; 9206 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 9207 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 9208 } else { 9209 FDecl = dyn_cast<FunctionDecl>(I); 9210 } 9211 if (!FDecl) 9212 continue; 9213 9214 // Found std::abs(), check that they are the right ones. 9215 if (FDecl->getNumParams() != 1) 9216 continue; 9217 9218 // Check that the parameter type can handle the argument. 9219 QualType ParamType = FDecl->getParamDecl(0)->getType(); 9220 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 9221 S.Context.getTypeSize(ArgType) <= 9222 S.Context.getTypeSize(ParamType)) { 9223 // Found a function, don't need the header hint. 9224 EmitHeaderHint = false; 9225 break; 9226 } 9227 } 9228 } 9229 } else { 9230 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 9231 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 9232 9233 if (HeaderName) { 9234 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 9235 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 9236 R.suppressDiagnostics(); 9237 S.LookupName(R, S.getCurScope()); 9238 9239 if (R.isSingleResult()) { 9240 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 9241 if (FD && FD->getBuiltinID() == AbsKind) { 9242 EmitHeaderHint = false; 9243 } else { 9244 return; 9245 } 9246 } else if (!R.empty()) { 9247 return; 9248 } 9249 } 9250 } 9251 9252 S.Diag(Loc, diag::note_replace_abs_function) 9253 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 9254 9255 if (!HeaderName) 9256 return; 9257 9258 if (!EmitHeaderHint) 9259 return; 9260 9261 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 9262 << FunctionName; 9263 } 9264 9265 template <std::size_t StrLen> 9266 static bool IsStdFunction(const FunctionDecl *FDecl, 9267 const char (&Str)[StrLen]) { 9268 if (!FDecl) 9269 return false; 9270 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 9271 return false; 9272 if (!FDecl->isInStdNamespace()) 9273 return false; 9274 9275 return true; 9276 } 9277 9278 // Warn when using the wrong abs() function. 9279 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 9280 const FunctionDecl *FDecl) { 9281 if (Call->getNumArgs() != 1) 9282 return; 9283 9284 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 9285 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 9286 if (AbsKind == 0 && !IsStdAbs) 9287 return; 9288 9289 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 9290 QualType ParamType = Call->getArg(0)->getType(); 9291 9292 // Unsigned types cannot be negative. Suggest removing the absolute value 9293 // function call. 9294 if (ArgType->isUnsignedIntegerType()) { 9295 const char *FunctionName = 9296 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 9297 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 9298 Diag(Call->getExprLoc(), diag::note_remove_abs) 9299 << FunctionName 9300 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 9301 return; 9302 } 9303 9304 // Taking the absolute value of a pointer is very suspicious, they probably 9305 // wanted to index into an array, dereference a pointer, call a function, etc. 9306 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 9307 unsigned DiagType = 0; 9308 if (ArgType->isFunctionType()) 9309 DiagType = 1; 9310 else if (ArgType->isArrayType()) 9311 DiagType = 2; 9312 9313 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 9314 return; 9315 } 9316 9317 // std::abs has overloads which prevent most of the absolute value problems 9318 // from occurring. 9319 if (IsStdAbs) 9320 return; 9321 9322 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 9323 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 9324 9325 // The argument and parameter are the same kind. Check if they are the right 9326 // size. 9327 if (ArgValueKind == ParamValueKind) { 9328 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 9329 return; 9330 9331 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 9332 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 9333 << FDecl << ArgType << ParamType; 9334 9335 if (NewAbsKind == 0) 9336 return; 9337 9338 emitReplacement(*this, Call->getExprLoc(), 9339 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 9340 return; 9341 } 9342 9343 // ArgValueKind != ParamValueKind 9344 // The wrong type of absolute value function was used. Attempt to find the 9345 // proper one. 9346 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 9347 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 9348 if (NewAbsKind == 0) 9349 return; 9350 9351 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 9352 << FDecl << ParamValueKind << ArgValueKind; 9353 9354 emitReplacement(*this, Call->getExprLoc(), 9355 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 9356 } 9357 9358 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 9359 void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 9360 const FunctionDecl *FDecl) { 9361 if (!Call || !FDecl) return; 9362 9363 // Ignore template specializations and macros. 9364 if (inTemplateInstantiation()) return; 9365 if (Call->getExprLoc().isMacroID()) return; 9366 9367 // Only care about the one template argument, two function parameter std::max 9368 if (Call->getNumArgs() != 2) return; 9369 if (!IsStdFunction(FDecl, "max")) return; 9370 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 9371 if (!ArgList) return; 9372 if (ArgList->size() != 1) return; 9373 9374 // Check that template type argument is unsigned integer. 9375 const auto& TA = ArgList->get(0); 9376 if (TA.getKind() != TemplateArgument::Type) return; 9377 QualType ArgType = TA.getAsType(); 9378 if (!ArgType->isUnsignedIntegerType()) return; 9379 9380 // See if either argument is a literal zero. 9381 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 9382 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 9383 if (!MTE) return false; 9384 const auto *Num = dyn_cast<IntegerLiteral>(MTE->getSubExpr()); 9385 if (!Num) return false; 9386 if (Num->getValue() != 0) return false; 9387 return true; 9388 }; 9389 9390 const Expr *FirstArg = Call->getArg(0); 9391 const Expr *SecondArg = Call->getArg(1); 9392 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 9393 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 9394 9395 // Only warn when exactly one argument is zero. 9396 if (IsFirstArgZero == IsSecondArgZero) return; 9397 9398 SourceRange FirstRange = FirstArg->getSourceRange(); 9399 SourceRange SecondRange = SecondArg->getSourceRange(); 9400 9401 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 9402 9403 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 9404 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 9405 9406 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 9407 SourceRange RemovalRange; 9408 if (IsFirstArgZero) { 9409 RemovalRange = SourceRange(FirstRange.getBegin(), 9410 SecondRange.getBegin().getLocWithOffset(-1)); 9411 } else { 9412 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 9413 SecondRange.getEnd()); 9414 } 9415 9416 Diag(Call->getExprLoc(), diag::note_remove_max_call) 9417 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 9418 << FixItHint::CreateRemoval(RemovalRange); 9419 } 9420 9421 //===--- CHECK: Standard memory functions ---------------------------------===// 9422 9423 /// Takes the expression passed to the size_t parameter of functions 9424 /// such as memcmp, strncat, etc and warns if it's a comparison. 9425 /// 9426 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 9427 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 9428 IdentifierInfo *FnName, 9429 SourceLocation FnLoc, 9430 SourceLocation RParenLoc) { 9431 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 9432 if (!Size) 9433 return false; 9434 9435 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 9436 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 9437 return false; 9438 9439 SourceRange SizeRange = Size->getSourceRange(); 9440 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 9441 << SizeRange << FnName; 9442 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 9443 << FnName 9444 << FixItHint::CreateInsertion( 9445 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 9446 << FixItHint::CreateRemoval(RParenLoc); 9447 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 9448 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 9449 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 9450 ")"); 9451 9452 return true; 9453 } 9454 9455 /// Determine whether the given type is or contains a dynamic class type 9456 /// (e.g., whether it has a vtable). 9457 static const CXXRecordDecl *getContainedDynamicClass(QualType T, 9458 bool &IsContained) { 9459 // Look through array types while ignoring qualifiers. 9460 const Type *Ty = T->getBaseElementTypeUnsafe(); 9461 IsContained = false; 9462 9463 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 9464 RD = RD ? RD->getDefinition() : nullptr; 9465 if (!RD || RD->isInvalidDecl()) 9466 return nullptr; 9467 9468 if (RD->isDynamicClass()) 9469 return RD; 9470 9471 // Check all the fields. If any bases were dynamic, the class is dynamic. 9472 // It's impossible for a class to transitively contain itself by value, so 9473 // infinite recursion is impossible. 9474 for (auto *FD : RD->fields()) { 9475 bool SubContained; 9476 if (const CXXRecordDecl *ContainedRD = 9477 getContainedDynamicClass(FD->getType(), SubContained)) { 9478 IsContained = true; 9479 return ContainedRD; 9480 } 9481 } 9482 9483 return nullptr; 9484 } 9485 9486 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 9487 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 9488 if (Unary->getKind() == UETT_SizeOf) 9489 return Unary; 9490 return nullptr; 9491 } 9492 9493 /// If E is a sizeof expression, returns its argument expression, 9494 /// otherwise returns NULL. 9495 static const Expr *getSizeOfExprArg(const Expr *E) { 9496 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 9497 if (!SizeOf->isArgumentType()) 9498 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 9499 return nullptr; 9500 } 9501 9502 /// If E is a sizeof expression, returns its argument type. 9503 static QualType getSizeOfArgType(const Expr *E) { 9504 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 9505 return SizeOf->getTypeOfArgument(); 9506 return QualType(); 9507 } 9508 9509 namespace { 9510 9511 struct SearchNonTrivialToInitializeField 9512 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 9513 using Super = 9514 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 9515 9516 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 9517 9518 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 9519 SourceLocation SL) { 9520 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 9521 asDerived().visitArray(PDIK, AT, SL); 9522 return; 9523 } 9524 9525 Super::visitWithKind(PDIK, FT, SL); 9526 } 9527 9528 void visitARCStrong(QualType FT, SourceLocation SL) { 9529 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 9530 } 9531 void visitARCWeak(QualType FT, SourceLocation SL) { 9532 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 9533 } 9534 void visitStruct(QualType FT, SourceLocation SL) { 9535 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 9536 visit(FD->getType(), FD->getLocation()); 9537 } 9538 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 9539 const ArrayType *AT, SourceLocation SL) { 9540 visit(getContext().getBaseElementType(AT), SL); 9541 } 9542 void visitTrivial(QualType FT, SourceLocation SL) {} 9543 9544 static void diag(QualType RT, const Expr *E, Sema &S) { 9545 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 9546 } 9547 9548 ASTContext &getContext() { return S.getASTContext(); } 9549 9550 const Expr *E; 9551 Sema &S; 9552 }; 9553 9554 struct SearchNonTrivialToCopyField 9555 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 9556 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 9557 9558 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 9559 9560 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 9561 SourceLocation SL) { 9562 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 9563 asDerived().visitArray(PCK, AT, SL); 9564 return; 9565 } 9566 9567 Super::visitWithKind(PCK, FT, SL); 9568 } 9569 9570 void visitARCStrong(QualType FT, SourceLocation SL) { 9571 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 9572 } 9573 void visitARCWeak(QualType FT, SourceLocation SL) { 9574 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 9575 } 9576 void visitStruct(QualType FT, SourceLocation SL) { 9577 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 9578 visit(FD->getType(), FD->getLocation()); 9579 } 9580 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 9581 SourceLocation SL) { 9582 visit(getContext().getBaseElementType(AT), SL); 9583 } 9584 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 9585 SourceLocation SL) {} 9586 void visitTrivial(QualType FT, SourceLocation SL) {} 9587 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 9588 9589 static void diag(QualType RT, const Expr *E, Sema &S) { 9590 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 9591 } 9592 9593 ASTContext &getContext() { return S.getASTContext(); } 9594 9595 const Expr *E; 9596 Sema &S; 9597 }; 9598 9599 } 9600 9601 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 9602 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 9603 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 9604 9605 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 9606 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 9607 return false; 9608 9609 return doesExprLikelyComputeSize(BO->getLHS()) || 9610 doesExprLikelyComputeSize(BO->getRHS()); 9611 } 9612 9613 return getAsSizeOfExpr(SizeofExpr) != nullptr; 9614 } 9615 9616 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 9617 /// 9618 /// \code 9619 /// #define MACRO 0 9620 /// foo(MACRO); 9621 /// foo(0); 9622 /// \endcode 9623 /// 9624 /// This should return true for the first call to foo, but not for the second 9625 /// (regardless of whether foo is a macro or function). 9626 static bool isArgumentExpandedFromMacro(SourceManager &SM, 9627 SourceLocation CallLoc, 9628 SourceLocation ArgLoc) { 9629 if (!CallLoc.isMacroID()) 9630 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 9631 9632 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 9633 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 9634 } 9635 9636 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 9637 /// last two arguments transposed. 9638 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 9639 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 9640 return; 9641 9642 const Expr *SizeArg = 9643 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 9644 9645 auto isLiteralZero = [](const Expr *E) { 9646 return isa<IntegerLiteral>(E) && cast<IntegerLiteral>(E)->getValue() == 0; 9647 }; 9648 9649 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 9650 SourceLocation CallLoc = Call->getRParenLoc(); 9651 SourceManager &SM = S.getSourceManager(); 9652 if (isLiteralZero(SizeArg) && 9653 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 9654 9655 SourceLocation DiagLoc = SizeArg->getExprLoc(); 9656 9657 // Some platforms #define bzero to __builtin_memset. See if this is the 9658 // case, and if so, emit a better diagnostic. 9659 if (BId == Builtin::BIbzero || 9660 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 9661 CallLoc, SM, S.getLangOpts()) == "bzero")) { 9662 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 9663 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 9664 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 9665 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 9666 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 9667 } 9668 return; 9669 } 9670 9671 // If the second argument to a memset is a sizeof expression and the third 9672 // isn't, this is also likely an error. This should catch 9673 // 'memset(buf, sizeof(buf), 0xff)'. 9674 if (BId == Builtin::BImemset && 9675 doesExprLikelyComputeSize(Call->getArg(1)) && 9676 !doesExprLikelyComputeSize(Call->getArg(2))) { 9677 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 9678 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 9679 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 9680 return; 9681 } 9682 } 9683 9684 /// Check for dangerous or invalid arguments to memset(). 9685 /// 9686 /// This issues warnings on known problematic, dangerous or unspecified 9687 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 9688 /// function calls. 9689 /// 9690 /// \param Call The call expression to diagnose. 9691 void Sema::CheckMemaccessArguments(const CallExpr *Call, 9692 unsigned BId, 9693 IdentifierInfo *FnName) { 9694 assert(BId != 0); 9695 9696 // It is possible to have a non-standard definition of memset. Validate 9697 // we have enough arguments, and if not, abort further checking. 9698 unsigned ExpectedNumArgs = 9699 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 9700 if (Call->getNumArgs() < ExpectedNumArgs) 9701 return; 9702 9703 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 9704 BId == Builtin::BIstrndup ? 1 : 2); 9705 unsigned LenArg = 9706 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 9707 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 9708 9709 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 9710 Call->getBeginLoc(), Call->getRParenLoc())) 9711 return; 9712 9713 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 9714 CheckMemaccessSize(*this, BId, Call); 9715 9716 // We have special checking when the length is a sizeof expression. 9717 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 9718 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 9719 llvm::FoldingSetNodeID SizeOfArgID; 9720 9721 // Although widely used, 'bzero' is not a standard function. Be more strict 9722 // with the argument types before allowing diagnostics and only allow the 9723 // form bzero(ptr, sizeof(...)). 9724 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 9725 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 9726 return; 9727 9728 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 9729 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 9730 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 9731 9732 QualType DestTy = Dest->getType(); 9733 QualType PointeeTy; 9734 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 9735 PointeeTy = DestPtrTy->getPointeeType(); 9736 9737 // Never warn about void type pointers. This can be used to suppress 9738 // false positives. 9739 if (PointeeTy->isVoidType()) 9740 continue; 9741 9742 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 9743 // actually comparing the expressions for equality. Because computing the 9744 // expression IDs can be expensive, we only do this if the diagnostic is 9745 // enabled. 9746 if (SizeOfArg && 9747 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 9748 SizeOfArg->getExprLoc())) { 9749 // We only compute IDs for expressions if the warning is enabled, and 9750 // cache the sizeof arg's ID. 9751 if (SizeOfArgID == llvm::FoldingSetNodeID()) 9752 SizeOfArg->Profile(SizeOfArgID, Context, true); 9753 llvm::FoldingSetNodeID DestID; 9754 Dest->Profile(DestID, Context, true); 9755 if (DestID == SizeOfArgID) { 9756 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 9757 // over sizeof(src) as well. 9758 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 9759 StringRef ReadableName = FnName->getName(); 9760 9761 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 9762 if (UnaryOp->getOpcode() == UO_AddrOf) 9763 ActionIdx = 1; // If its an address-of operator, just remove it. 9764 if (!PointeeTy->isIncompleteType() && 9765 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 9766 ActionIdx = 2; // If the pointee's size is sizeof(char), 9767 // suggest an explicit length. 9768 9769 // If the function is defined as a builtin macro, do not show macro 9770 // expansion. 9771 SourceLocation SL = SizeOfArg->getExprLoc(); 9772 SourceRange DSR = Dest->getSourceRange(); 9773 SourceRange SSR = SizeOfArg->getSourceRange(); 9774 SourceManager &SM = getSourceManager(); 9775 9776 if (SM.isMacroArgExpansion(SL)) { 9777 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 9778 SL = SM.getSpellingLoc(SL); 9779 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 9780 SM.getSpellingLoc(DSR.getEnd())); 9781 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 9782 SM.getSpellingLoc(SSR.getEnd())); 9783 } 9784 9785 DiagRuntimeBehavior(SL, SizeOfArg, 9786 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 9787 << ReadableName 9788 << PointeeTy 9789 << DestTy 9790 << DSR 9791 << SSR); 9792 DiagRuntimeBehavior(SL, SizeOfArg, 9793 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 9794 << ActionIdx 9795 << SSR); 9796 9797 break; 9798 } 9799 } 9800 9801 // Also check for cases where the sizeof argument is the exact same 9802 // type as the memory argument, and where it points to a user-defined 9803 // record type. 9804 if (SizeOfArgTy != QualType()) { 9805 if (PointeeTy->isRecordType() && 9806 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 9807 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 9808 PDiag(diag::warn_sizeof_pointer_type_memaccess) 9809 << FnName << SizeOfArgTy << ArgIdx 9810 << PointeeTy << Dest->getSourceRange() 9811 << LenExpr->getSourceRange()); 9812 break; 9813 } 9814 } 9815 } else if (DestTy->isArrayType()) { 9816 PointeeTy = DestTy; 9817 } 9818 9819 if (PointeeTy == QualType()) 9820 continue; 9821 9822 // Always complain about dynamic classes. 9823 bool IsContained; 9824 if (const CXXRecordDecl *ContainedRD = 9825 getContainedDynamicClass(PointeeTy, IsContained)) { 9826 9827 unsigned OperationType = 0; 9828 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp; 9829 // "overwritten" if we're warning about the destination for any call 9830 // but memcmp; otherwise a verb appropriate to the call. 9831 if (ArgIdx != 0 || IsCmp) { 9832 if (BId == Builtin::BImemcpy) 9833 OperationType = 1; 9834 else if(BId == Builtin::BImemmove) 9835 OperationType = 2; 9836 else if (IsCmp) 9837 OperationType = 3; 9838 } 9839 9840 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 9841 PDiag(diag::warn_dyn_class_memaccess) 9842 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName 9843 << IsContained << ContainedRD << OperationType 9844 << Call->getCallee()->getSourceRange()); 9845 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 9846 BId != Builtin::BImemset) 9847 DiagRuntimeBehavior( 9848 Dest->getExprLoc(), Dest, 9849 PDiag(diag::warn_arc_object_memaccess) 9850 << ArgIdx << FnName << PointeeTy 9851 << Call->getCallee()->getSourceRange()); 9852 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 9853 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 9854 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 9855 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 9856 PDiag(diag::warn_cstruct_memaccess) 9857 << ArgIdx << FnName << PointeeTy << 0); 9858 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 9859 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 9860 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 9861 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 9862 PDiag(diag::warn_cstruct_memaccess) 9863 << ArgIdx << FnName << PointeeTy << 1); 9864 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 9865 } else { 9866 continue; 9867 } 9868 } else 9869 continue; 9870 9871 DiagRuntimeBehavior( 9872 Dest->getExprLoc(), Dest, 9873 PDiag(diag::note_bad_memaccess_silence) 9874 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 9875 break; 9876 } 9877 } 9878 9879 // A little helper routine: ignore addition and subtraction of integer literals. 9880 // This intentionally does not ignore all integer constant expressions because 9881 // we don't want to remove sizeof(). 9882 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 9883 Ex = Ex->IgnoreParenCasts(); 9884 9885 while (true) { 9886 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 9887 if (!BO || !BO->isAdditiveOp()) 9888 break; 9889 9890 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 9891 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 9892 9893 if (isa<IntegerLiteral>(RHS)) 9894 Ex = LHS; 9895 else if (isa<IntegerLiteral>(LHS)) 9896 Ex = RHS; 9897 else 9898 break; 9899 } 9900 9901 return Ex; 9902 } 9903 9904 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 9905 ASTContext &Context) { 9906 // Only handle constant-sized or VLAs, but not flexible members. 9907 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 9908 // Only issue the FIXIT for arrays of size > 1. 9909 if (CAT->getSize().getSExtValue() <= 1) 9910 return false; 9911 } else if (!Ty->isVariableArrayType()) { 9912 return false; 9913 } 9914 return true; 9915 } 9916 9917 // Warn if the user has made the 'size' argument to strlcpy or strlcat 9918 // be the size of the source, instead of the destination. 9919 void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 9920 IdentifierInfo *FnName) { 9921 9922 // Don't crash if the user has the wrong number of arguments 9923 unsigned NumArgs = Call->getNumArgs(); 9924 if ((NumArgs != 3) && (NumArgs != 4)) 9925 return; 9926 9927 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 9928 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 9929 const Expr *CompareWithSrc = nullptr; 9930 9931 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 9932 Call->getBeginLoc(), Call->getRParenLoc())) 9933 return; 9934 9935 // Look for 'strlcpy(dst, x, sizeof(x))' 9936 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 9937 CompareWithSrc = Ex; 9938 else { 9939 // Look for 'strlcpy(dst, x, strlen(x))' 9940 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 9941 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 9942 SizeCall->getNumArgs() == 1) 9943 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 9944 } 9945 } 9946 9947 if (!CompareWithSrc) 9948 return; 9949 9950 // Determine if the argument to sizeof/strlen is equal to the source 9951 // argument. In principle there's all kinds of things you could do 9952 // here, for instance creating an == expression and evaluating it with 9953 // EvaluateAsBooleanCondition, but this uses a more direct technique: 9954 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 9955 if (!SrcArgDRE) 9956 return; 9957 9958 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 9959 if (!CompareWithSrcDRE || 9960 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 9961 return; 9962 9963 const Expr *OriginalSizeArg = Call->getArg(2); 9964 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 9965 << OriginalSizeArg->getSourceRange() << FnName; 9966 9967 // Output a FIXIT hint if the destination is an array (rather than a 9968 // pointer to an array). This could be enhanced to handle some 9969 // pointers if we know the actual size, like if DstArg is 'array+2' 9970 // we could say 'sizeof(array)-2'. 9971 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 9972 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 9973 return; 9974 9975 SmallString<128> sizeString; 9976 llvm::raw_svector_ostream OS(sizeString); 9977 OS << "sizeof("; 9978 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 9979 OS << ")"; 9980 9981 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 9982 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 9983 OS.str()); 9984 } 9985 9986 /// Check if two expressions refer to the same declaration. 9987 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 9988 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 9989 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 9990 return D1->getDecl() == D2->getDecl(); 9991 return false; 9992 } 9993 9994 static const Expr *getStrlenExprArg(const Expr *E) { 9995 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 9996 const FunctionDecl *FD = CE->getDirectCallee(); 9997 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 9998 return nullptr; 9999 return CE->getArg(0)->IgnoreParenCasts(); 10000 } 10001 return nullptr; 10002 } 10003 10004 // Warn on anti-patterns as the 'size' argument to strncat. 10005 // The correct size argument should look like following: 10006 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 10007 void Sema::CheckStrncatArguments(const CallExpr *CE, 10008 IdentifierInfo *FnName) { 10009 // Don't crash if the user has the wrong number of arguments. 10010 if (CE->getNumArgs() < 3) 10011 return; 10012 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 10013 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 10014 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 10015 10016 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 10017 CE->getRParenLoc())) 10018 return; 10019 10020 // Identify common expressions, which are wrongly used as the size argument 10021 // to strncat and may lead to buffer overflows. 10022 unsigned PatternType = 0; 10023 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 10024 // - sizeof(dst) 10025 if (referToTheSameDecl(SizeOfArg, DstArg)) 10026 PatternType = 1; 10027 // - sizeof(src) 10028 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 10029 PatternType = 2; 10030 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 10031 if (BE->getOpcode() == BO_Sub) { 10032 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 10033 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 10034 // - sizeof(dst) - strlen(dst) 10035 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 10036 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 10037 PatternType = 1; 10038 // - sizeof(src) - (anything) 10039 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 10040 PatternType = 2; 10041 } 10042 } 10043 10044 if (PatternType == 0) 10045 return; 10046 10047 // Generate the diagnostic. 10048 SourceLocation SL = LenArg->getBeginLoc(); 10049 SourceRange SR = LenArg->getSourceRange(); 10050 SourceManager &SM = getSourceManager(); 10051 10052 // If the function is defined as a builtin macro, do not show macro expansion. 10053 if (SM.isMacroArgExpansion(SL)) { 10054 SL = SM.getSpellingLoc(SL); 10055 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 10056 SM.getSpellingLoc(SR.getEnd())); 10057 } 10058 10059 // Check if the destination is an array (rather than a pointer to an array). 10060 QualType DstTy = DstArg->getType(); 10061 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 10062 Context); 10063 if (!isKnownSizeArray) { 10064 if (PatternType == 1) 10065 Diag(SL, diag::warn_strncat_wrong_size) << SR; 10066 else 10067 Diag(SL, diag::warn_strncat_src_size) << SR; 10068 return; 10069 } 10070 10071 if (PatternType == 1) 10072 Diag(SL, diag::warn_strncat_large_size) << SR; 10073 else 10074 Diag(SL, diag::warn_strncat_src_size) << SR; 10075 10076 SmallString<128> sizeString; 10077 llvm::raw_svector_ostream OS(sizeString); 10078 OS << "sizeof("; 10079 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 10080 OS << ") - "; 10081 OS << "strlen("; 10082 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 10083 OS << ") - 1"; 10084 10085 Diag(SL, diag::note_strncat_wrong_size) 10086 << FixItHint::CreateReplacement(SR, OS.str()); 10087 } 10088 10089 void 10090 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 10091 SourceLocation ReturnLoc, 10092 bool isObjCMethod, 10093 const AttrVec *Attrs, 10094 const FunctionDecl *FD) { 10095 // Check if the return value is null but should not be. 10096 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 10097 (!isObjCMethod && isNonNullType(Context, lhsType))) && 10098 CheckNonNullExpr(*this, RetValExp)) 10099 Diag(ReturnLoc, diag::warn_null_ret) 10100 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 10101 10102 // C++11 [basic.stc.dynamic.allocation]p4: 10103 // If an allocation function declared with a non-throwing 10104 // exception-specification fails to allocate storage, it shall return 10105 // a null pointer. Any other allocation function that fails to allocate 10106 // storage shall indicate failure only by throwing an exception [...] 10107 if (FD) { 10108 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 10109 if (Op == OO_New || Op == OO_Array_New) { 10110 const FunctionProtoType *Proto 10111 = FD->getType()->castAs<FunctionProtoType>(); 10112 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 10113 CheckNonNullExpr(*this, RetValExp)) 10114 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 10115 << FD << getLangOpts().CPlusPlus11; 10116 } 10117 } 10118 } 10119 10120 //===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===// 10121 10122 /// Check for comparisons of floating point operands using != and ==. 10123 /// Issue a warning if these are no self-comparisons, as they are not likely 10124 /// to do what the programmer intended. 10125 void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) { 10126 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 10127 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 10128 10129 // Special case: check for x == x (which is OK). 10130 // Do not emit warnings for such cases. 10131 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 10132 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 10133 if (DRL->getDecl() == DRR->getDecl()) 10134 return; 10135 10136 // Special case: check for comparisons against literals that can be exactly 10137 // represented by APFloat. In such cases, do not emit a warning. This 10138 // is a heuristic: often comparison against such literals are used to 10139 // detect if a value in a variable has not changed. This clearly can 10140 // lead to false negatives. 10141 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 10142 if (FLL->isExact()) 10143 return; 10144 } else 10145 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 10146 if (FLR->isExact()) 10147 return; 10148 10149 // Check for comparisons with builtin types. 10150 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 10151 if (CL->getBuiltinCallee()) 10152 return; 10153 10154 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 10155 if (CR->getBuiltinCallee()) 10156 return; 10157 10158 // Emit the diagnostic. 10159 Diag(Loc, diag::warn_floatingpoint_eq) 10160 << LHS->getSourceRange() << RHS->getSourceRange(); 10161 } 10162 10163 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 10164 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 10165 10166 namespace { 10167 10168 /// Structure recording the 'active' range of an integer-valued 10169 /// expression. 10170 struct IntRange { 10171 /// The number of bits active in the int. Note that this includes exactly one 10172 /// sign bit if !NonNegative. 10173 unsigned Width; 10174 10175 /// True if the int is known not to have negative values. If so, all leading 10176 /// bits before Width are known zero, otherwise they are known to be the 10177 /// same as the MSB within Width. 10178 bool NonNegative; 10179 10180 IntRange(unsigned Width, bool NonNegative) 10181 : Width(Width), NonNegative(NonNegative) {} 10182 10183 /// Number of bits excluding the sign bit. 10184 unsigned valueBits() const { 10185 return NonNegative ? Width : Width - 1; 10186 } 10187 10188 /// Returns the range of the bool type. 10189 static IntRange forBoolType() { 10190 return IntRange(1, true); 10191 } 10192 10193 /// Returns the range of an opaque value of the given integral type. 10194 static IntRange forValueOfType(ASTContext &C, QualType T) { 10195 return forValueOfCanonicalType(C, 10196 T->getCanonicalTypeInternal().getTypePtr()); 10197 } 10198 10199 /// Returns the range of an opaque value of a canonical integral type. 10200 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 10201 assert(T->isCanonicalUnqualified()); 10202 10203 if (const VectorType *VT = dyn_cast<VectorType>(T)) 10204 T = VT->getElementType().getTypePtr(); 10205 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 10206 T = CT->getElementType().getTypePtr(); 10207 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 10208 T = AT->getValueType().getTypePtr(); 10209 10210 if (!C.getLangOpts().CPlusPlus) { 10211 // For enum types in C code, use the underlying datatype. 10212 if (const EnumType *ET = dyn_cast<EnumType>(T)) 10213 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 10214 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 10215 // For enum types in C++, use the known bit width of the enumerators. 10216 EnumDecl *Enum = ET->getDecl(); 10217 // In C++11, enums can have a fixed underlying type. Use this type to 10218 // compute the range. 10219 if (Enum->isFixed()) { 10220 return IntRange(C.getIntWidth(QualType(T, 0)), 10221 !ET->isSignedIntegerOrEnumerationType()); 10222 } 10223 10224 unsigned NumPositive = Enum->getNumPositiveBits(); 10225 unsigned NumNegative = Enum->getNumNegativeBits(); 10226 10227 if (NumNegative == 0) 10228 return IntRange(NumPositive, true/*NonNegative*/); 10229 else 10230 return IntRange(std::max(NumPositive + 1, NumNegative), 10231 false/*NonNegative*/); 10232 } 10233 10234 if (const auto *EIT = dyn_cast<ExtIntType>(T)) 10235 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 10236 10237 const BuiltinType *BT = cast<BuiltinType>(T); 10238 assert(BT->isInteger()); 10239 10240 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 10241 } 10242 10243 /// Returns the "target" range of a canonical integral type, i.e. 10244 /// the range of values expressible in the type. 10245 /// 10246 /// This matches forValueOfCanonicalType except that enums have the 10247 /// full range of their type, not the range of their enumerators. 10248 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 10249 assert(T->isCanonicalUnqualified()); 10250 10251 if (const VectorType *VT = dyn_cast<VectorType>(T)) 10252 T = VT->getElementType().getTypePtr(); 10253 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 10254 T = CT->getElementType().getTypePtr(); 10255 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 10256 T = AT->getValueType().getTypePtr(); 10257 if (const EnumType *ET = dyn_cast<EnumType>(T)) 10258 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 10259 10260 if (const auto *EIT = dyn_cast<ExtIntType>(T)) 10261 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 10262 10263 const BuiltinType *BT = cast<BuiltinType>(T); 10264 assert(BT->isInteger()); 10265 10266 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 10267 } 10268 10269 /// Returns the supremum of two ranges: i.e. their conservative merge. 10270 static IntRange join(IntRange L, IntRange R) { 10271 bool Unsigned = L.NonNegative && R.NonNegative; 10272 return IntRange(std::max(L.valueBits(), R.valueBits()) + !Unsigned, 10273 L.NonNegative && R.NonNegative); 10274 } 10275 10276 /// Return the range of a bitwise-AND of the two ranges. 10277 static IntRange bit_and(IntRange L, IntRange R) { 10278 unsigned Bits = std::max(L.Width, R.Width); 10279 bool NonNegative = false; 10280 if (L.NonNegative) { 10281 Bits = std::min(Bits, L.Width); 10282 NonNegative = true; 10283 } 10284 if (R.NonNegative) { 10285 Bits = std::min(Bits, R.Width); 10286 NonNegative = true; 10287 } 10288 return IntRange(Bits, NonNegative); 10289 } 10290 10291 /// Return the range of a sum of the two ranges. 10292 static IntRange sum(IntRange L, IntRange R) { 10293 bool Unsigned = L.NonNegative && R.NonNegative; 10294 return IntRange(std::max(L.valueBits(), R.valueBits()) + 1 + !Unsigned, 10295 Unsigned); 10296 } 10297 10298 /// Return the range of a difference of the two ranges. 10299 static IntRange difference(IntRange L, IntRange R) { 10300 // We need a 1-bit-wider range if: 10301 // 1) LHS can be negative: least value can be reduced. 10302 // 2) RHS can be negative: greatest value can be increased. 10303 bool CanWiden = !L.NonNegative || !R.NonNegative; 10304 bool Unsigned = L.NonNegative && R.Width == 0; 10305 return IntRange(std::max(L.valueBits(), R.valueBits()) + CanWiden + 10306 !Unsigned, 10307 Unsigned); 10308 } 10309 10310 /// Return the range of a product of the two ranges. 10311 static IntRange product(IntRange L, IntRange R) { 10312 // If both LHS and RHS can be negative, we can form 10313 // -2^L * -2^R = 2^(L + R) 10314 // which requires L + R + 1 value bits to represent. 10315 bool CanWiden = !L.NonNegative && !R.NonNegative; 10316 bool Unsigned = L.NonNegative && R.NonNegative; 10317 return IntRange(L.valueBits() + R.valueBits() + CanWiden + !Unsigned, 10318 Unsigned); 10319 } 10320 10321 /// Return the range of a remainder operation between the two ranges. 10322 static IntRange rem(IntRange L, IntRange R) { 10323 // The result of a remainder can't be larger than the result of 10324 // either side. The sign of the result is the sign of the LHS. 10325 bool Unsigned = L.NonNegative; 10326 return IntRange(std::min(L.valueBits(), R.valueBits()) + !Unsigned, 10327 Unsigned); 10328 } 10329 }; 10330 10331 } // namespace 10332 10333 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 10334 unsigned MaxWidth) { 10335 if (value.isSigned() && value.isNegative()) 10336 return IntRange(value.getMinSignedBits(), false); 10337 10338 if (value.getBitWidth() > MaxWidth) 10339 value = value.trunc(MaxWidth); 10340 10341 // isNonNegative() just checks the sign bit without considering 10342 // signedness. 10343 return IntRange(value.getActiveBits(), true); 10344 } 10345 10346 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 10347 unsigned MaxWidth) { 10348 if (result.isInt()) 10349 return GetValueRange(C, result.getInt(), MaxWidth); 10350 10351 if (result.isVector()) { 10352 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 10353 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 10354 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 10355 R = IntRange::join(R, El); 10356 } 10357 return R; 10358 } 10359 10360 if (result.isComplexInt()) { 10361 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 10362 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 10363 return IntRange::join(R, I); 10364 } 10365 10366 // This can happen with lossless casts to intptr_t of "based" lvalues. 10367 // Assume it might use arbitrary bits. 10368 // FIXME: The only reason we need to pass the type in here is to get 10369 // the sign right on this one case. It would be nice if APValue 10370 // preserved this. 10371 assert(result.isLValue() || result.isAddrLabelDiff()); 10372 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 10373 } 10374 10375 static QualType GetExprType(const Expr *E) { 10376 QualType Ty = E->getType(); 10377 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 10378 Ty = AtomicRHS->getValueType(); 10379 return Ty; 10380 } 10381 10382 /// Pseudo-evaluate the given integer expression, estimating the 10383 /// range of values it might take. 10384 /// 10385 /// \param MaxWidth The width to which the value will be truncated. 10386 /// \param Approximate If \c true, return a likely range for the result: in 10387 /// particular, assume that aritmetic on narrower types doesn't leave 10388 /// those types. If \c false, return a range including all possible 10389 /// result values. 10390 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth, 10391 bool InConstantContext, bool Approximate) { 10392 E = E->IgnoreParens(); 10393 10394 // Try a full evaluation first. 10395 Expr::EvalResult result; 10396 if (E->EvaluateAsRValue(result, C, InConstantContext)) 10397 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 10398 10399 // I think we only want to look through implicit casts here; if the 10400 // user has an explicit widening cast, we should treat the value as 10401 // being of the new, wider type. 10402 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 10403 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 10404 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext, 10405 Approximate); 10406 10407 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 10408 10409 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 10410 CE->getCastKind() == CK_BooleanToSignedIntegral; 10411 10412 // Assume that non-integer casts can span the full range of the type. 10413 if (!isIntegerCast) 10414 return OutputTypeRange; 10415 10416 IntRange SubRange = GetExprRange(C, CE->getSubExpr(), 10417 std::min(MaxWidth, OutputTypeRange.Width), 10418 InConstantContext, Approximate); 10419 10420 // Bail out if the subexpr's range is as wide as the cast type. 10421 if (SubRange.Width >= OutputTypeRange.Width) 10422 return OutputTypeRange; 10423 10424 // Otherwise, we take the smaller width, and we're non-negative if 10425 // either the output type or the subexpr is. 10426 return IntRange(SubRange.Width, 10427 SubRange.NonNegative || OutputTypeRange.NonNegative); 10428 } 10429 10430 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 10431 // If we can fold the condition, just take that operand. 10432 bool CondResult; 10433 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 10434 return GetExprRange(C, 10435 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(), 10436 MaxWidth, InConstantContext, Approximate); 10437 10438 // Otherwise, conservatively merge. 10439 // GetExprRange requires an integer expression, but a throw expression 10440 // results in a void type. 10441 Expr *E = CO->getTrueExpr(); 10442 IntRange L = E->getType()->isVoidType() 10443 ? IntRange{0, true} 10444 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 10445 E = CO->getFalseExpr(); 10446 IntRange R = E->getType()->isVoidType() 10447 ? IntRange{0, true} 10448 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 10449 return IntRange::join(L, R); 10450 } 10451 10452 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 10453 IntRange (*Combine)(IntRange, IntRange) = IntRange::join; 10454 10455 switch (BO->getOpcode()) { 10456 case BO_Cmp: 10457 llvm_unreachable("builtin <=> should have class type"); 10458 10459 // Boolean-valued operations are single-bit and positive. 10460 case BO_LAnd: 10461 case BO_LOr: 10462 case BO_LT: 10463 case BO_GT: 10464 case BO_LE: 10465 case BO_GE: 10466 case BO_EQ: 10467 case BO_NE: 10468 return IntRange::forBoolType(); 10469 10470 // The type of the assignments is the type of the LHS, so the RHS 10471 // is not necessarily the same type. 10472 case BO_MulAssign: 10473 case BO_DivAssign: 10474 case BO_RemAssign: 10475 case BO_AddAssign: 10476 case BO_SubAssign: 10477 case BO_XorAssign: 10478 case BO_OrAssign: 10479 // TODO: bitfields? 10480 return IntRange::forValueOfType(C, GetExprType(E)); 10481 10482 // Simple assignments just pass through the RHS, which will have 10483 // been coerced to the LHS type. 10484 case BO_Assign: 10485 // TODO: bitfields? 10486 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 10487 Approximate); 10488 10489 // Operations with opaque sources are black-listed. 10490 case BO_PtrMemD: 10491 case BO_PtrMemI: 10492 return IntRange::forValueOfType(C, GetExprType(E)); 10493 10494 // Bitwise-and uses the *infinum* of the two source ranges. 10495 case BO_And: 10496 case BO_AndAssign: 10497 Combine = IntRange::bit_and; 10498 break; 10499 10500 // Left shift gets black-listed based on a judgement call. 10501 case BO_Shl: 10502 // ...except that we want to treat '1 << (blah)' as logically 10503 // positive. It's an important idiom. 10504 if (IntegerLiteral *I 10505 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 10506 if (I->getValue() == 1) { 10507 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 10508 return IntRange(R.Width, /*NonNegative*/ true); 10509 } 10510 } 10511 LLVM_FALLTHROUGH; 10512 10513 case BO_ShlAssign: 10514 return IntRange::forValueOfType(C, GetExprType(E)); 10515 10516 // Right shift by a constant can narrow its left argument. 10517 case BO_Shr: 10518 case BO_ShrAssign: { 10519 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext, 10520 Approximate); 10521 10522 // If the shift amount is a positive constant, drop the width by 10523 // that much. 10524 if (Optional<llvm::APSInt> shift = 10525 BO->getRHS()->getIntegerConstantExpr(C)) { 10526 if (shift->isNonNegative()) { 10527 unsigned zext = shift->getZExtValue(); 10528 if (zext >= L.Width) 10529 L.Width = (L.NonNegative ? 0 : 1); 10530 else 10531 L.Width -= zext; 10532 } 10533 } 10534 10535 return L; 10536 } 10537 10538 // Comma acts as its right operand. 10539 case BO_Comma: 10540 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 10541 Approximate); 10542 10543 case BO_Add: 10544 if (!Approximate) 10545 Combine = IntRange::sum; 10546 break; 10547 10548 case BO_Sub: 10549 if (BO->getLHS()->getType()->isPointerType()) 10550 return IntRange::forValueOfType(C, GetExprType(E)); 10551 if (!Approximate) 10552 Combine = IntRange::difference; 10553 break; 10554 10555 case BO_Mul: 10556 if (!Approximate) 10557 Combine = IntRange::product; 10558 break; 10559 10560 // The width of a division result is mostly determined by the size 10561 // of the LHS. 10562 case BO_Div: { 10563 // Don't 'pre-truncate' the operands. 10564 unsigned opWidth = C.getIntWidth(GetExprType(E)); 10565 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, 10566 Approximate); 10567 10568 // If the divisor is constant, use that. 10569 if (Optional<llvm::APSInt> divisor = 10570 BO->getRHS()->getIntegerConstantExpr(C)) { 10571 unsigned log2 = divisor->logBase2(); // floor(log_2(divisor)) 10572 if (log2 >= L.Width) 10573 L.Width = (L.NonNegative ? 0 : 1); 10574 else 10575 L.Width = std::min(L.Width - log2, MaxWidth); 10576 return L; 10577 } 10578 10579 // Otherwise, just use the LHS's width. 10580 // FIXME: This is wrong if the LHS could be its minimal value and the RHS 10581 // could be -1. 10582 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, 10583 Approximate); 10584 return IntRange(L.Width, L.NonNegative && R.NonNegative); 10585 } 10586 10587 case BO_Rem: 10588 Combine = IntRange::rem; 10589 break; 10590 10591 // The default behavior is okay for these. 10592 case BO_Xor: 10593 case BO_Or: 10594 break; 10595 } 10596 10597 // Combine the two ranges, but limit the result to the type in which we 10598 // performed the computation. 10599 QualType T = GetExprType(E); 10600 unsigned opWidth = C.getIntWidth(T); 10601 IntRange L = 10602 GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, Approximate); 10603 IntRange R = 10604 GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, Approximate); 10605 IntRange C = Combine(L, R); 10606 C.NonNegative |= T->isUnsignedIntegerOrEnumerationType(); 10607 C.Width = std::min(C.Width, MaxWidth); 10608 return C; 10609 } 10610 10611 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 10612 switch (UO->getOpcode()) { 10613 // Boolean-valued operations are white-listed. 10614 case UO_LNot: 10615 return IntRange::forBoolType(); 10616 10617 // Operations with opaque sources are black-listed. 10618 case UO_Deref: 10619 case UO_AddrOf: // should be impossible 10620 return IntRange::forValueOfType(C, GetExprType(E)); 10621 10622 default: 10623 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext, 10624 Approximate); 10625 } 10626 } 10627 10628 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 10629 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext, 10630 Approximate); 10631 10632 if (const auto *BitField = E->getSourceBitField()) 10633 return IntRange(BitField->getBitWidthValue(C), 10634 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 10635 10636 return IntRange::forValueOfType(C, GetExprType(E)); 10637 } 10638 10639 static IntRange GetExprRange(ASTContext &C, const Expr *E, 10640 bool InConstantContext, bool Approximate) { 10641 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext, 10642 Approximate); 10643 } 10644 10645 /// Checks whether the given value, which currently has the given 10646 /// source semantics, has the same value when coerced through the 10647 /// target semantics. 10648 static bool IsSameFloatAfterCast(const llvm::APFloat &value, 10649 const llvm::fltSemantics &Src, 10650 const llvm::fltSemantics &Tgt) { 10651 llvm::APFloat truncated = value; 10652 10653 bool ignored; 10654 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 10655 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 10656 10657 return truncated.bitwiseIsEqual(value); 10658 } 10659 10660 /// Checks whether the given value, which currently has the given 10661 /// source semantics, has the same value when coerced through the 10662 /// target semantics. 10663 /// 10664 /// The value might be a vector of floats (or a complex number). 10665 static bool IsSameFloatAfterCast(const APValue &value, 10666 const llvm::fltSemantics &Src, 10667 const llvm::fltSemantics &Tgt) { 10668 if (value.isFloat()) 10669 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 10670 10671 if (value.isVector()) { 10672 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 10673 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 10674 return false; 10675 return true; 10676 } 10677 10678 assert(value.isComplexFloat()); 10679 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 10680 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 10681 } 10682 10683 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC, 10684 bool IsListInit = false); 10685 10686 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 10687 // Suppress cases where we are comparing against an enum constant. 10688 if (const DeclRefExpr *DR = 10689 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 10690 if (isa<EnumConstantDecl>(DR->getDecl())) 10691 return true; 10692 10693 // Suppress cases where the value is expanded from a macro, unless that macro 10694 // is how a language represents a boolean literal. This is the case in both C 10695 // and Objective-C. 10696 SourceLocation BeginLoc = E->getBeginLoc(); 10697 if (BeginLoc.isMacroID()) { 10698 StringRef MacroName = Lexer::getImmediateMacroName( 10699 BeginLoc, S.getSourceManager(), S.getLangOpts()); 10700 return MacroName != "YES" && MacroName != "NO" && 10701 MacroName != "true" && MacroName != "false"; 10702 } 10703 10704 return false; 10705 } 10706 10707 static bool isKnownToHaveUnsignedValue(Expr *E) { 10708 return E->getType()->isIntegerType() && 10709 (!E->getType()->isSignedIntegerType() || 10710 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 10711 } 10712 10713 namespace { 10714 /// The promoted range of values of a type. In general this has the 10715 /// following structure: 10716 /// 10717 /// |-----------| . . . |-----------| 10718 /// ^ ^ ^ ^ 10719 /// Min HoleMin HoleMax Max 10720 /// 10721 /// ... where there is only a hole if a signed type is promoted to unsigned 10722 /// (in which case Min and Max are the smallest and largest representable 10723 /// values). 10724 struct PromotedRange { 10725 // Min, or HoleMax if there is a hole. 10726 llvm::APSInt PromotedMin; 10727 // Max, or HoleMin if there is a hole. 10728 llvm::APSInt PromotedMax; 10729 10730 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 10731 if (R.Width == 0) 10732 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 10733 else if (R.Width >= BitWidth && !Unsigned) { 10734 // Promotion made the type *narrower*. This happens when promoting 10735 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 10736 // Treat all values of 'signed int' as being in range for now. 10737 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 10738 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 10739 } else { 10740 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 10741 .extOrTrunc(BitWidth); 10742 PromotedMin.setIsUnsigned(Unsigned); 10743 10744 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 10745 .extOrTrunc(BitWidth); 10746 PromotedMax.setIsUnsigned(Unsigned); 10747 } 10748 } 10749 10750 // Determine whether this range is contiguous (has no hole). 10751 bool isContiguous() const { return PromotedMin <= PromotedMax; } 10752 10753 // Where a constant value is within the range. 10754 enum ComparisonResult { 10755 LT = 0x1, 10756 LE = 0x2, 10757 GT = 0x4, 10758 GE = 0x8, 10759 EQ = 0x10, 10760 NE = 0x20, 10761 InRangeFlag = 0x40, 10762 10763 Less = LE | LT | NE, 10764 Min = LE | InRangeFlag, 10765 InRange = InRangeFlag, 10766 Max = GE | InRangeFlag, 10767 Greater = GE | GT | NE, 10768 10769 OnlyValue = LE | GE | EQ | InRangeFlag, 10770 InHole = NE 10771 }; 10772 10773 ComparisonResult compare(const llvm::APSInt &Value) const { 10774 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 10775 Value.isUnsigned() == PromotedMin.isUnsigned()); 10776 if (!isContiguous()) { 10777 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 10778 if (Value.isMinValue()) return Min; 10779 if (Value.isMaxValue()) return Max; 10780 if (Value >= PromotedMin) return InRange; 10781 if (Value <= PromotedMax) return InRange; 10782 return InHole; 10783 } 10784 10785 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 10786 case -1: return Less; 10787 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 10788 case 1: 10789 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 10790 case -1: return InRange; 10791 case 0: return Max; 10792 case 1: return Greater; 10793 } 10794 } 10795 10796 llvm_unreachable("impossible compare result"); 10797 } 10798 10799 static llvm::Optional<StringRef> 10800 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 10801 if (Op == BO_Cmp) { 10802 ComparisonResult LTFlag = LT, GTFlag = GT; 10803 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 10804 10805 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 10806 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 10807 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 10808 return llvm::None; 10809 } 10810 10811 ComparisonResult TrueFlag, FalseFlag; 10812 if (Op == BO_EQ) { 10813 TrueFlag = EQ; 10814 FalseFlag = NE; 10815 } else if (Op == BO_NE) { 10816 TrueFlag = NE; 10817 FalseFlag = EQ; 10818 } else { 10819 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 10820 TrueFlag = LT; 10821 FalseFlag = GE; 10822 } else { 10823 TrueFlag = GT; 10824 FalseFlag = LE; 10825 } 10826 if (Op == BO_GE || Op == BO_LE) 10827 std::swap(TrueFlag, FalseFlag); 10828 } 10829 if (R & TrueFlag) 10830 return StringRef("true"); 10831 if (R & FalseFlag) 10832 return StringRef("false"); 10833 return llvm::None; 10834 } 10835 }; 10836 } 10837 10838 static bool HasEnumType(Expr *E) { 10839 // Strip off implicit integral promotions. 10840 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 10841 if (ICE->getCastKind() != CK_IntegralCast && 10842 ICE->getCastKind() != CK_NoOp) 10843 break; 10844 E = ICE->getSubExpr(); 10845 } 10846 10847 return E->getType()->isEnumeralType(); 10848 } 10849 10850 static int classifyConstantValue(Expr *Constant) { 10851 // The values of this enumeration are used in the diagnostics 10852 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 10853 enum ConstantValueKind { 10854 Miscellaneous = 0, 10855 LiteralTrue, 10856 LiteralFalse 10857 }; 10858 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 10859 return BL->getValue() ? ConstantValueKind::LiteralTrue 10860 : ConstantValueKind::LiteralFalse; 10861 return ConstantValueKind::Miscellaneous; 10862 } 10863 10864 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 10865 Expr *Constant, Expr *Other, 10866 const llvm::APSInt &Value, 10867 bool RhsConstant) { 10868 if (S.inTemplateInstantiation()) 10869 return false; 10870 10871 Expr *OriginalOther = Other; 10872 10873 Constant = Constant->IgnoreParenImpCasts(); 10874 Other = Other->IgnoreParenImpCasts(); 10875 10876 // Suppress warnings on tautological comparisons between values of the same 10877 // enumeration type. There are only two ways we could warn on this: 10878 // - If the constant is outside the range of representable values of 10879 // the enumeration. In such a case, we should warn about the cast 10880 // to enumeration type, not about the comparison. 10881 // - If the constant is the maximum / minimum in-range value. For an 10882 // enumeratin type, such comparisons can be meaningful and useful. 10883 if (Constant->getType()->isEnumeralType() && 10884 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 10885 return false; 10886 10887 IntRange OtherValueRange = GetExprRange( 10888 S.Context, Other, S.isConstantEvaluated(), /*Approximate*/ false); 10889 10890 QualType OtherT = Other->getType(); 10891 if (const auto *AT = OtherT->getAs<AtomicType>()) 10892 OtherT = AT->getValueType(); 10893 IntRange OtherTypeRange = IntRange::forValueOfType(S.Context, OtherT); 10894 10895 // Special case for ObjC BOOL on targets where its a typedef for a signed char 10896 // (Namely, macOS). FIXME: IntRange::forValueOfType should do this. 10897 bool IsObjCSignedCharBool = S.getLangOpts().ObjC && 10898 S.NSAPIObj->isObjCBOOLType(OtherT) && 10899 OtherT->isSpecificBuiltinType(BuiltinType::SChar); 10900 10901 // Whether we're treating Other as being a bool because of the form of 10902 // expression despite it having another type (typically 'int' in C). 10903 bool OtherIsBooleanDespiteType = 10904 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 10905 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool) 10906 OtherTypeRange = OtherValueRange = IntRange::forBoolType(); 10907 10908 // Check if all values in the range of possible values of this expression 10909 // lead to the same comparison outcome. 10910 PromotedRange OtherPromotedValueRange(OtherValueRange, Value.getBitWidth(), 10911 Value.isUnsigned()); 10912 auto Cmp = OtherPromotedValueRange.compare(Value); 10913 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 10914 if (!Result) 10915 return false; 10916 10917 // Also consider the range determined by the type alone. This allows us to 10918 // classify the warning under the proper diagnostic group. 10919 bool TautologicalTypeCompare = false; 10920 { 10921 PromotedRange OtherPromotedTypeRange(OtherTypeRange, Value.getBitWidth(), 10922 Value.isUnsigned()); 10923 auto TypeCmp = OtherPromotedTypeRange.compare(Value); 10924 if (auto TypeResult = PromotedRange::constantValue(E->getOpcode(), TypeCmp, 10925 RhsConstant)) { 10926 TautologicalTypeCompare = true; 10927 Cmp = TypeCmp; 10928 Result = TypeResult; 10929 } 10930 } 10931 10932 // Don't warn if the non-constant operand actually always evaluates to the 10933 // same value. 10934 if (!TautologicalTypeCompare && OtherValueRange.Width == 0) 10935 return false; 10936 10937 // Suppress the diagnostic for an in-range comparison if the constant comes 10938 // from a macro or enumerator. We don't want to diagnose 10939 // 10940 // some_long_value <= INT_MAX 10941 // 10942 // when sizeof(int) == sizeof(long). 10943 bool InRange = Cmp & PromotedRange::InRangeFlag; 10944 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 10945 return false; 10946 10947 // A comparison of an unsigned bit-field against 0 is really a type problem, 10948 // even though at the type level the bit-field might promote to 'signed int'. 10949 if (Other->refersToBitField() && InRange && Value == 0 && 10950 Other->getType()->isUnsignedIntegerOrEnumerationType()) 10951 TautologicalTypeCompare = true; 10952 10953 // If this is a comparison to an enum constant, include that 10954 // constant in the diagnostic. 10955 const EnumConstantDecl *ED = nullptr; 10956 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 10957 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 10958 10959 // Should be enough for uint128 (39 decimal digits) 10960 SmallString<64> PrettySourceValue; 10961 llvm::raw_svector_ostream OS(PrettySourceValue); 10962 if (ED) { 10963 OS << '\'' << *ED << "' (" << Value << ")"; 10964 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>( 10965 Constant->IgnoreParenImpCasts())) { 10966 OS << (BL->getValue() ? "YES" : "NO"); 10967 } else { 10968 OS << Value; 10969 } 10970 10971 if (!TautologicalTypeCompare) { 10972 S.Diag(E->getOperatorLoc(), diag::warn_tautological_compare_value_range) 10973 << RhsConstant << OtherValueRange.Width << OtherValueRange.NonNegative 10974 << E->getOpcodeStr() << OS.str() << *Result 10975 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 10976 return true; 10977 } 10978 10979 if (IsObjCSignedCharBool) { 10980 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 10981 S.PDiag(diag::warn_tautological_compare_objc_bool) 10982 << OS.str() << *Result); 10983 return true; 10984 } 10985 10986 // FIXME: We use a somewhat different formatting for the in-range cases and 10987 // cases involving boolean values for historical reasons. We should pick a 10988 // consistent way of presenting these diagnostics. 10989 if (!InRange || Other->isKnownToHaveBooleanValue()) { 10990 10991 S.DiagRuntimeBehavior( 10992 E->getOperatorLoc(), E, 10993 S.PDiag(!InRange ? diag::warn_out_of_range_compare 10994 : diag::warn_tautological_bool_compare) 10995 << OS.str() << classifyConstantValue(Constant) << OtherT 10996 << OtherIsBooleanDespiteType << *Result 10997 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 10998 } else { 10999 unsigned Diag = (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 11000 ? (HasEnumType(OriginalOther) 11001 ? diag::warn_unsigned_enum_always_true_comparison 11002 : diag::warn_unsigned_always_true_comparison) 11003 : diag::warn_tautological_constant_compare; 11004 11005 S.Diag(E->getOperatorLoc(), Diag) 11006 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 11007 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 11008 } 11009 11010 return true; 11011 } 11012 11013 /// Analyze the operands of the given comparison. Implements the 11014 /// fallback case from AnalyzeComparison. 11015 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 11016 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 11017 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 11018 } 11019 11020 /// Implements -Wsign-compare. 11021 /// 11022 /// \param E the binary operator to check for warnings 11023 static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 11024 // The type the comparison is being performed in. 11025 QualType T = E->getLHS()->getType(); 11026 11027 // Only analyze comparison operators where both sides have been converted to 11028 // the same type. 11029 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 11030 return AnalyzeImpConvsInComparison(S, E); 11031 11032 // Don't analyze value-dependent comparisons directly. 11033 if (E->isValueDependent()) 11034 return AnalyzeImpConvsInComparison(S, E); 11035 11036 Expr *LHS = E->getLHS(); 11037 Expr *RHS = E->getRHS(); 11038 11039 if (T->isIntegralType(S.Context)) { 11040 Optional<llvm::APSInt> RHSValue = RHS->getIntegerConstantExpr(S.Context); 11041 Optional<llvm::APSInt> LHSValue = LHS->getIntegerConstantExpr(S.Context); 11042 11043 // We don't care about expressions whose result is a constant. 11044 if (RHSValue && LHSValue) 11045 return AnalyzeImpConvsInComparison(S, E); 11046 11047 // We only care about expressions where just one side is literal 11048 if ((bool)RHSValue ^ (bool)LHSValue) { 11049 // Is the constant on the RHS or LHS? 11050 const bool RhsConstant = (bool)RHSValue; 11051 Expr *Const = RhsConstant ? RHS : LHS; 11052 Expr *Other = RhsConstant ? LHS : RHS; 11053 const llvm::APSInt &Value = RhsConstant ? *RHSValue : *LHSValue; 11054 11055 // Check whether an integer constant comparison results in a value 11056 // of 'true' or 'false'. 11057 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 11058 return AnalyzeImpConvsInComparison(S, E); 11059 } 11060 } 11061 11062 if (!T->hasUnsignedIntegerRepresentation()) { 11063 // We don't do anything special if this isn't an unsigned integral 11064 // comparison: we're only interested in integral comparisons, and 11065 // signed comparisons only happen in cases we don't care to warn about. 11066 return AnalyzeImpConvsInComparison(S, E); 11067 } 11068 11069 LHS = LHS->IgnoreParenImpCasts(); 11070 RHS = RHS->IgnoreParenImpCasts(); 11071 11072 if (!S.getLangOpts().CPlusPlus) { 11073 // Avoid warning about comparison of integers with different signs when 11074 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 11075 // the type of `E`. 11076 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 11077 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 11078 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 11079 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 11080 } 11081 11082 // Check to see if one of the (unmodified) operands is of different 11083 // signedness. 11084 Expr *signedOperand, *unsignedOperand; 11085 if (LHS->getType()->hasSignedIntegerRepresentation()) { 11086 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 11087 "unsigned comparison between two signed integer expressions?"); 11088 signedOperand = LHS; 11089 unsignedOperand = RHS; 11090 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 11091 signedOperand = RHS; 11092 unsignedOperand = LHS; 11093 } else { 11094 return AnalyzeImpConvsInComparison(S, E); 11095 } 11096 11097 // Otherwise, calculate the effective range of the signed operand. 11098 IntRange signedRange = GetExprRange( 11099 S.Context, signedOperand, S.isConstantEvaluated(), /*Approximate*/ true); 11100 11101 // Go ahead and analyze implicit conversions in the operands. Note 11102 // that we skip the implicit conversions on both sides. 11103 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 11104 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 11105 11106 // If the signed range is non-negative, -Wsign-compare won't fire. 11107 if (signedRange.NonNegative) 11108 return; 11109 11110 // For (in)equality comparisons, if the unsigned operand is a 11111 // constant which cannot collide with a overflowed signed operand, 11112 // then reinterpreting the signed operand as unsigned will not 11113 // change the result of the comparison. 11114 if (E->isEqualityOp()) { 11115 unsigned comparisonWidth = S.Context.getIntWidth(T); 11116 IntRange unsignedRange = 11117 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated(), 11118 /*Approximate*/ true); 11119 11120 // We should never be unable to prove that the unsigned operand is 11121 // non-negative. 11122 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 11123 11124 if (unsignedRange.Width < comparisonWidth) 11125 return; 11126 } 11127 11128 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 11129 S.PDiag(diag::warn_mixed_sign_comparison) 11130 << LHS->getType() << RHS->getType() 11131 << LHS->getSourceRange() << RHS->getSourceRange()); 11132 } 11133 11134 /// Analyzes an attempt to assign the given value to a bitfield. 11135 /// 11136 /// Returns true if there was something fishy about the attempt. 11137 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 11138 SourceLocation InitLoc) { 11139 assert(Bitfield->isBitField()); 11140 if (Bitfield->isInvalidDecl()) 11141 return false; 11142 11143 // White-list bool bitfields. 11144 QualType BitfieldType = Bitfield->getType(); 11145 if (BitfieldType->isBooleanType()) 11146 return false; 11147 11148 if (BitfieldType->isEnumeralType()) { 11149 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl(); 11150 // If the underlying enum type was not explicitly specified as an unsigned 11151 // type and the enum contain only positive values, MSVC++ will cause an 11152 // inconsistency by storing this as a signed type. 11153 if (S.getLangOpts().CPlusPlus11 && 11154 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 11155 BitfieldEnumDecl->getNumPositiveBits() > 0 && 11156 BitfieldEnumDecl->getNumNegativeBits() == 0) { 11157 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 11158 << BitfieldEnumDecl; 11159 } 11160 } 11161 11162 if (Bitfield->getType()->isBooleanType()) 11163 return false; 11164 11165 // Ignore value- or type-dependent expressions. 11166 if (Bitfield->getBitWidth()->isValueDependent() || 11167 Bitfield->getBitWidth()->isTypeDependent() || 11168 Init->isValueDependent() || 11169 Init->isTypeDependent()) 11170 return false; 11171 11172 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 11173 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 11174 11175 Expr::EvalResult Result; 11176 if (!OriginalInit->EvaluateAsInt(Result, S.Context, 11177 Expr::SE_AllowSideEffects)) { 11178 // The RHS is not constant. If the RHS has an enum type, make sure the 11179 // bitfield is wide enough to hold all the values of the enum without 11180 // truncation. 11181 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 11182 EnumDecl *ED = EnumTy->getDecl(); 11183 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 11184 11185 // Enum types are implicitly signed on Windows, so check if there are any 11186 // negative enumerators to see if the enum was intended to be signed or 11187 // not. 11188 bool SignedEnum = ED->getNumNegativeBits() > 0; 11189 11190 // Check for surprising sign changes when assigning enum values to a 11191 // bitfield of different signedness. If the bitfield is signed and we 11192 // have exactly the right number of bits to store this unsigned enum, 11193 // suggest changing the enum to an unsigned type. This typically happens 11194 // on Windows where unfixed enums always use an underlying type of 'int'. 11195 unsigned DiagID = 0; 11196 if (SignedEnum && !SignedBitfield) { 11197 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 11198 } else if (SignedBitfield && !SignedEnum && 11199 ED->getNumPositiveBits() == FieldWidth) { 11200 DiagID = diag::warn_signed_bitfield_enum_conversion; 11201 } 11202 11203 if (DiagID) { 11204 S.Diag(InitLoc, DiagID) << Bitfield << ED; 11205 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 11206 SourceRange TypeRange = 11207 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 11208 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 11209 << SignedEnum << TypeRange; 11210 } 11211 11212 // Compute the required bitwidth. If the enum has negative values, we need 11213 // one more bit than the normal number of positive bits to represent the 11214 // sign bit. 11215 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 11216 ED->getNumNegativeBits()) 11217 : ED->getNumPositiveBits(); 11218 11219 // Check the bitwidth. 11220 if (BitsNeeded > FieldWidth) { 11221 Expr *WidthExpr = Bitfield->getBitWidth(); 11222 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 11223 << Bitfield << ED; 11224 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 11225 << BitsNeeded << ED << WidthExpr->getSourceRange(); 11226 } 11227 } 11228 11229 return false; 11230 } 11231 11232 llvm::APSInt Value = Result.Val.getInt(); 11233 11234 unsigned OriginalWidth = Value.getBitWidth(); 11235 11236 if (!Value.isSigned() || Value.isNegative()) 11237 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 11238 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 11239 OriginalWidth = Value.getMinSignedBits(); 11240 11241 if (OriginalWidth <= FieldWidth) 11242 return false; 11243 11244 // Compute the value which the bitfield will contain. 11245 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 11246 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 11247 11248 // Check whether the stored value is equal to the original value. 11249 TruncatedValue = TruncatedValue.extend(OriginalWidth); 11250 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 11251 return false; 11252 11253 // Special-case bitfields of width 1: booleans are naturally 0/1, and 11254 // therefore don't strictly fit into a signed bitfield of width 1. 11255 if (FieldWidth == 1 && Value == 1) 11256 return false; 11257 11258 std::string PrettyValue = Value.toString(10); 11259 std::string PrettyTrunc = TruncatedValue.toString(10); 11260 11261 S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant) 11262 << PrettyValue << PrettyTrunc << OriginalInit->getType() 11263 << Init->getSourceRange(); 11264 11265 return true; 11266 } 11267 11268 /// Analyze the given simple or compound assignment for warning-worthy 11269 /// operations. 11270 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 11271 // Just recurse on the LHS. 11272 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 11273 11274 // We want to recurse on the RHS as normal unless we're assigning to 11275 // a bitfield. 11276 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 11277 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 11278 E->getOperatorLoc())) { 11279 // Recurse, ignoring any implicit conversions on the RHS. 11280 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 11281 E->getOperatorLoc()); 11282 } 11283 } 11284 11285 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 11286 11287 // Diagnose implicitly sequentially-consistent atomic assignment. 11288 if (E->getLHS()->getType()->isAtomicType()) 11289 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 11290 } 11291 11292 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 11293 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 11294 SourceLocation CContext, unsigned diag, 11295 bool pruneControlFlow = false) { 11296 if (pruneControlFlow) { 11297 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11298 S.PDiag(diag) 11299 << SourceType << T << E->getSourceRange() 11300 << SourceRange(CContext)); 11301 return; 11302 } 11303 S.Diag(E->getExprLoc(), diag) 11304 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 11305 } 11306 11307 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 11308 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 11309 SourceLocation CContext, 11310 unsigned diag, bool pruneControlFlow = false) { 11311 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 11312 } 11313 11314 static bool isObjCSignedCharBool(Sema &S, QualType Ty) { 11315 return Ty->isSpecificBuiltinType(BuiltinType::SChar) && 11316 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty); 11317 } 11318 11319 static void adornObjCBoolConversionDiagWithTernaryFixit( 11320 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) { 11321 Expr *Ignored = SourceExpr->IgnoreImplicit(); 11322 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored)) 11323 Ignored = OVE->getSourceExpr(); 11324 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) || 11325 isa<BinaryOperator>(Ignored) || 11326 isa<CXXOperatorCallExpr>(Ignored); 11327 SourceLocation EndLoc = S.getLocForEndOfToken(SourceExpr->getEndLoc()); 11328 if (NeedsParens) 11329 Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(") 11330 << FixItHint::CreateInsertion(EndLoc, ")"); 11331 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO"); 11332 } 11333 11334 /// Diagnose an implicit cast from a floating point value to an integer value. 11335 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 11336 SourceLocation CContext) { 11337 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 11338 const bool PruneWarnings = S.inTemplateInstantiation(); 11339 11340 Expr *InnerE = E->IgnoreParenImpCasts(); 11341 // We also want to warn on, e.g., "int i = -1.234" 11342 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 11343 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 11344 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 11345 11346 const bool IsLiteral = 11347 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 11348 11349 llvm::APFloat Value(0.0); 11350 bool IsConstant = 11351 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 11352 if (!IsConstant) { 11353 if (isObjCSignedCharBool(S, T)) { 11354 return adornObjCBoolConversionDiagWithTernaryFixit( 11355 S, E, 11356 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool) 11357 << E->getType()); 11358 } 11359 11360 return DiagnoseImpCast(S, E, T, CContext, 11361 diag::warn_impcast_float_integer, PruneWarnings); 11362 } 11363 11364 bool isExact = false; 11365 11366 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 11367 T->hasUnsignedIntegerRepresentation()); 11368 llvm::APFloat::opStatus Result = Value.convertToInteger( 11369 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 11370 11371 // FIXME: Force the precision of the source value down so we don't print 11372 // digits which are usually useless (we don't really care here if we 11373 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 11374 // would automatically print the shortest representation, but it's a bit 11375 // tricky to implement. 11376 SmallString<16> PrettySourceValue; 11377 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 11378 precision = (precision * 59 + 195) / 196; 11379 Value.toString(PrettySourceValue, precision); 11380 11381 if (isObjCSignedCharBool(S, T) && IntegerValue != 0 && IntegerValue != 1) { 11382 return adornObjCBoolConversionDiagWithTernaryFixit( 11383 S, E, 11384 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool) 11385 << PrettySourceValue); 11386 } 11387 11388 if (Result == llvm::APFloat::opOK && isExact) { 11389 if (IsLiteral) return; 11390 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 11391 PruneWarnings); 11392 } 11393 11394 // Conversion of a floating-point value to a non-bool integer where the 11395 // integral part cannot be represented by the integer type is undefined. 11396 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 11397 return DiagnoseImpCast( 11398 S, E, T, CContext, 11399 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 11400 : diag::warn_impcast_float_to_integer_out_of_range, 11401 PruneWarnings); 11402 11403 unsigned DiagID = 0; 11404 if (IsLiteral) { 11405 // Warn on floating point literal to integer. 11406 DiagID = diag::warn_impcast_literal_float_to_integer; 11407 } else if (IntegerValue == 0) { 11408 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 11409 return DiagnoseImpCast(S, E, T, CContext, 11410 diag::warn_impcast_float_integer, PruneWarnings); 11411 } 11412 // Warn on non-zero to zero conversion. 11413 DiagID = diag::warn_impcast_float_to_integer_zero; 11414 } else { 11415 if (IntegerValue.isUnsigned()) { 11416 if (!IntegerValue.isMaxValue()) { 11417 return DiagnoseImpCast(S, E, T, CContext, 11418 diag::warn_impcast_float_integer, PruneWarnings); 11419 } 11420 } else { // IntegerValue.isSigned() 11421 if (!IntegerValue.isMaxSignedValue() && 11422 !IntegerValue.isMinSignedValue()) { 11423 return DiagnoseImpCast(S, E, T, CContext, 11424 diag::warn_impcast_float_integer, PruneWarnings); 11425 } 11426 } 11427 // Warn on evaluatable floating point expression to integer conversion. 11428 DiagID = diag::warn_impcast_float_to_integer; 11429 } 11430 11431 SmallString<16> PrettyTargetValue; 11432 if (IsBool) 11433 PrettyTargetValue = Value.isZero() ? "false" : "true"; 11434 else 11435 IntegerValue.toString(PrettyTargetValue); 11436 11437 if (PruneWarnings) { 11438 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11439 S.PDiag(DiagID) 11440 << E->getType() << T.getUnqualifiedType() 11441 << PrettySourceValue << PrettyTargetValue 11442 << E->getSourceRange() << SourceRange(CContext)); 11443 } else { 11444 S.Diag(E->getExprLoc(), DiagID) 11445 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 11446 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 11447 } 11448 } 11449 11450 /// Analyze the given compound assignment for the possible losing of 11451 /// floating-point precision. 11452 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 11453 assert(isa<CompoundAssignOperator>(E) && 11454 "Must be compound assignment operation"); 11455 // Recurse on the LHS and RHS in here 11456 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 11457 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 11458 11459 if (E->getLHS()->getType()->isAtomicType()) 11460 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 11461 11462 // Now check the outermost expression 11463 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 11464 const auto *RBT = cast<CompoundAssignOperator>(E) 11465 ->getComputationResultType() 11466 ->getAs<BuiltinType>(); 11467 11468 // The below checks assume source is floating point. 11469 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 11470 11471 // If source is floating point but target is an integer. 11472 if (ResultBT->isInteger()) 11473 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(), 11474 E->getExprLoc(), diag::warn_impcast_float_integer); 11475 11476 if (!ResultBT->isFloatingPoint()) 11477 return; 11478 11479 // If both source and target are floating points, warn about losing precision. 11480 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 11481 QualType(ResultBT, 0), QualType(RBT, 0)); 11482 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 11483 // warn about dropping FP rank. 11484 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 11485 diag::warn_impcast_float_result_precision); 11486 } 11487 11488 static std::string PrettyPrintInRange(const llvm::APSInt &Value, 11489 IntRange Range) { 11490 if (!Range.Width) return "0"; 11491 11492 llvm::APSInt ValueInRange = Value; 11493 ValueInRange.setIsSigned(!Range.NonNegative); 11494 ValueInRange = ValueInRange.trunc(Range.Width); 11495 return ValueInRange.toString(10); 11496 } 11497 11498 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 11499 if (!isa<ImplicitCastExpr>(Ex)) 11500 return false; 11501 11502 Expr *InnerE = Ex->IgnoreParenImpCasts(); 11503 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 11504 const Type *Source = 11505 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 11506 if (Target->isDependentType()) 11507 return false; 11508 11509 const BuiltinType *FloatCandidateBT = 11510 dyn_cast<BuiltinType>(ToBool ? Source : Target); 11511 const Type *BoolCandidateType = ToBool ? Target : Source; 11512 11513 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 11514 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 11515 } 11516 11517 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 11518 SourceLocation CC) { 11519 unsigned NumArgs = TheCall->getNumArgs(); 11520 for (unsigned i = 0; i < NumArgs; ++i) { 11521 Expr *CurrA = TheCall->getArg(i); 11522 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 11523 continue; 11524 11525 bool IsSwapped = ((i > 0) && 11526 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 11527 IsSwapped |= ((i < (NumArgs - 1)) && 11528 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 11529 if (IsSwapped) { 11530 // Warn on this floating-point to bool conversion. 11531 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 11532 CurrA->getType(), CC, 11533 diag::warn_impcast_floating_point_to_bool); 11534 } 11535 } 11536 } 11537 11538 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 11539 SourceLocation CC) { 11540 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 11541 E->getExprLoc())) 11542 return; 11543 11544 // Don't warn on functions which have return type nullptr_t. 11545 if (isa<CallExpr>(E)) 11546 return; 11547 11548 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 11549 const Expr::NullPointerConstantKind NullKind = 11550 E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull); 11551 if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr) 11552 return; 11553 11554 // Return if target type is a safe conversion. 11555 if (T->isAnyPointerType() || T->isBlockPointerType() || 11556 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 11557 return; 11558 11559 SourceLocation Loc = E->getSourceRange().getBegin(); 11560 11561 // Venture through the macro stacks to get to the source of macro arguments. 11562 // The new location is a better location than the complete location that was 11563 // passed in. 11564 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 11565 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 11566 11567 // __null is usually wrapped in a macro. Go up a macro if that is the case. 11568 if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) { 11569 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 11570 Loc, S.SourceMgr, S.getLangOpts()); 11571 if (MacroName == "NULL") 11572 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 11573 } 11574 11575 // Only warn if the null and context location are in the same macro expansion. 11576 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 11577 return; 11578 11579 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 11580 << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC) 11581 << FixItHint::CreateReplacement(Loc, 11582 S.getFixItZeroLiteralForType(T, Loc)); 11583 } 11584 11585 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 11586 ObjCArrayLiteral *ArrayLiteral); 11587 11588 static void 11589 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 11590 ObjCDictionaryLiteral *DictionaryLiteral); 11591 11592 /// Check a single element within a collection literal against the 11593 /// target element type. 11594 static void checkObjCCollectionLiteralElement(Sema &S, 11595 QualType TargetElementType, 11596 Expr *Element, 11597 unsigned ElementKind) { 11598 // Skip a bitcast to 'id' or qualified 'id'. 11599 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 11600 if (ICE->getCastKind() == CK_BitCast && 11601 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 11602 Element = ICE->getSubExpr(); 11603 } 11604 11605 QualType ElementType = Element->getType(); 11606 ExprResult ElementResult(Element); 11607 if (ElementType->getAs<ObjCObjectPointerType>() && 11608 S.CheckSingleAssignmentConstraints(TargetElementType, 11609 ElementResult, 11610 false, false) 11611 != Sema::Compatible) { 11612 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 11613 << ElementType << ElementKind << TargetElementType 11614 << Element->getSourceRange(); 11615 } 11616 11617 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 11618 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 11619 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 11620 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 11621 } 11622 11623 /// Check an Objective-C array literal being converted to the given 11624 /// target type. 11625 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 11626 ObjCArrayLiteral *ArrayLiteral) { 11627 if (!S.NSArrayDecl) 11628 return; 11629 11630 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 11631 if (!TargetObjCPtr) 11632 return; 11633 11634 if (TargetObjCPtr->isUnspecialized() || 11635 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 11636 != S.NSArrayDecl->getCanonicalDecl()) 11637 return; 11638 11639 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 11640 if (TypeArgs.size() != 1) 11641 return; 11642 11643 QualType TargetElementType = TypeArgs[0]; 11644 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 11645 checkObjCCollectionLiteralElement(S, TargetElementType, 11646 ArrayLiteral->getElement(I), 11647 0); 11648 } 11649 } 11650 11651 /// Check an Objective-C dictionary literal being converted to the given 11652 /// target type. 11653 static void 11654 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 11655 ObjCDictionaryLiteral *DictionaryLiteral) { 11656 if (!S.NSDictionaryDecl) 11657 return; 11658 11659 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 11660 if (!TargetObjCPtr) 11661 return; 11662 11663 if (TargetObjCPtr->isUnspecialized() || 11664 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 11665 != S.NSDictionaryDecl->getCanonicalDecl()) 11666 return; 11667 11668 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 11669 if (TypeArgs.size() != 2) 11670 return; 11671 11672 QualType TargetKeyType = TypeArgs[0]; 11673 QualType TargetObjectType = TypeArgs[1]; 11674 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 11675 auto Element = DictionaryLiteral->getKeyValueElement(I); 11676 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 11677 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 11678 } 11679 } 11680 11681 // Helper function to filter out cases for constant width constant conversion. 11682 // Don't warn on char array initialization or for non-decimal values. 11683 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 11684 SourceLocation CC) { 11685 // If initializing from a constant, and the constant starts with '0', 11686 // then it is a binary, octal, or hexadecimal. Allow these constants 11687 // to fill all the bits, even if there is a sign change. 11688 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 11689 const char FirstLiteralCharacter = 11690 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 11691 if (FirstLiteralCharacter == '0') 11692 return false; 11693 } 11694 11695 // If the CC location points to a '{', and the type is char, then assume 11696 // assume it is an array initialization. 11697 if (CC.isValid() && T->isCharType()) { 11698 const char FirstContextCharacter = 11699 S.getSourceManager().getCharacterData(CC)[0]; 11700 if (FirstContextCharacter == '{') 11701 return false; 11702 } 11703 11704 return true; 11705 } 11706 11707 static const IntegerLiteral *getIntegerLiteral(Expr *E) { 11708 const auto *IL = dyn_cast<IntegerLiteral>(E); 11709 if (!IL) { 11710 if (auto *UO = dyn_cast<UnaryOperator>(E)) { 11711 if (UO->getOpcode() == UO_Minus) 11712 return dyn_cast<IntegerLiteral>(UO->getSubExpr()); 11713 } 11714 } 11715 11716 return IL; 11717 } 11718 11719 static void DiagnoseIntInBoolContext(Sema &S, Expr *E) { 11720 E = E->IgnoreParenImpCasts(); 11721 SourceLocation ExprLoc = E->getExprLoc(); 11722 11723 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 11724 BinaryOperator::Opcode Opc = BO->getOpcode(); 11725 Expr::EvalResult Result; 11726 // Do not diagnose unsigned shifts. 11727 if (Opc == BO_Shl) { 11728 const auto *LHS = getIntegerLiteral(BO->getLHS()); 11729 const auto *RHS = getIntegerLiteral(BO->getRHS()); 11730 if (LHS && LHS->getValue() == 0) 11731 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0; 11732 else if (!E->isValueDependent() && LHS && RHS && 11733 RHS->getValue().isNonNegative() && 11734 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) 11735 S.Diag(ExprLoc, diag::warn_left_shift_always) 11736 << (Result.Val.getInt() != 0); 11737 else if (E->getType()->isSignedIntegerType()) 11738 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E; 11739 } 11740 } 11741 11742 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 11743 const auto *LHS = getIntegerLiteral(CO->getTrueExpr()); 11744 const auto *RHS = getIntegerLiteral(CO->getFalseExpr()); 11745 if (!LHS || !RHS) 11746 return; 11747 if ((LHS->getValue() == 0 || LHS->getValue() == 1) && 11748 (RHS->getValue() == 0 || RHS->getValue() == 1)) 11749 // Do not diagnose common idioms. 11750 return; 11751 if (LHS->getValue() != 0 && RHS->getValue() != 0) 11752 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true); 11753 } 11754 } 11755 11756 static void CheckImplicitConversion(Sema &S, Expr *E, QualType T, 11757 SourceLocation CC, 11758 bool *ICContext = nullptr, 11759 bool IsListInit = false) { 11760 if (E->isTypeDependent() || E->isValueDependent()) return; 11761 11762 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 11763 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 11764 if (Source == Target) return; 11765 if (Target->isDependentType()) return; 11766 11767 // If the conversion context location is invalid don't complain. We also 11768 // don't want to emit a warning if the issue occurs from the expansion of 11769 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 11770 // delay this check as long as possible. Once we detect we are in that 11771 // scenario, we just return. 11772 if (CC.isInvalid()) 11773 return; 11774 11775 if (Source->isAtomicType()) 11776 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 11777 11778 // Diagnose implicit casts to bool. 11779 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 11780 if (isa<StringLiteral>(E)) 11781 // Warn on string literal to bool. Checks for string literals in logical 11782 // and expressions, for instance, assert(0 && "error here"), are 11783 // prevented by a check in AnalyzeImplicitConversions(). 11784 return DiagnoseImpCast(S, E, T, CC, 11785 diag::warn_impcast_string_literal_to_bool); 11786 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 11787 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 11788 // This covers the literal expressions that evaluate to Objective-C 11789 // objects. 11790 return DiagnoseImpCast(S, E, T, CC, 11791 diag::warn_impcast_objective_c_literal_to_bool); 11792 } 11793 if (Source->isPointerType() || Source->canDecayToPointerType()) { 11794 // Warn on pointer to bool conversion that is always true. 11795 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 11796 SourceRange(CC)); 11797 } 11798 } 11799 11800 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL 11801 // is a typedef for signed char (macOS), then that constant value has to be 1 11802 // or 0. 11803 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) { 11804 Expr::EvalResult Result; 11805 if (E->EvaluateAsInt(Result, S.getASTContext(), 11806 Expr::SE_AllowSideEffects)) { 11807 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) { 11808 adornObjCBoolConversionDiagWithTernaryFixit( 11809 S, E, 11810 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool) 11811 << Result.Val.getInt().toString(10)); 11812 } 11813 return; 11814 } 11815 } 11816 11817 // Check implicit casts from Objective-C collection literals to specialized 11818 // collection types, e.g., NSArray<NSString *> *. 11819 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 11820 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 11821 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 11822 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 11823 11824 // Strip vector types. 11825 if (isa<VectorType>(Source)) { 11826 if (!isa<VectorType>(Target)) { 11827 if (S.SourceMgr.isInSystemMacro(CC)) 11828 return; 11829 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 11830 } 11831 11832 // If the vector cast is cast between two vectors of the same size, it is 11833 // a bitcast, not a conversion. 11834 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 11835 return; 11836 11837 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 11838 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 11839 } 11840 if (auto VecTy = dyn_cast<VectorType>(Target)) 11841 Target = VecTy->getElementType().getTypePtr(); 11842 11843 // Strip complex types. 11844 if (isa<ComplexType>(Source)) { 11845 if (!isa<ComplexType>(Target)) { 11846 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 11847 return; 11848 11849 return DiagnoseImpCast(S, E, T, CC, 11850 S.getLangOpts().CPlusPlus 11851 ? diag::err_impcast_complex_scalar 11852 : diag::warn_impcast_complex_scalar); 11853 } 11854 11855 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 11856 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 11857 } 11858 11859 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 11860 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 11861 11862 // If the source is floating point... 11863 if (SourceBT && SourceBT->isFloatingPoint()) { 11864 // ...and the target is floating point... 11865 if (TargetBT && TargetBT->isFloatingPoint()) { 11866 // ...then warn if we're dropping FP rank. 11867 11868 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 11869 QualType(SourceBT, 0), QualType(TargetBT, 0)); 11870 if (Order > 0) { 11871 // Don't warn about float constants that are precisely 11872 // representable in the target type. 11873 Expr::EvalResult result; 11874 if (E->EvaluateAsRValue(result, S.Context)) { 11875 // Value might be a float, a float vector, or a float complex. 11876 if (IsSameFloatAfterCast(result.Val, 11877 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 11878 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 11879 return; 11880 } 11881 11882 if (S.SourceMgr.isInSystemMacro(CC)) 11883 return; 11884 11885 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 11886 } 11887 // ... or possibly if we're increasing rank, too 11888 else if (Order < 0) { 11889 if (S.SourceMgr.isInSystemMacro(CC)) 11890 return; 11891 11892 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 11893 } 11894 return; 11895 } 11896 11897 // If the target is integral, always warn. 11898 if (TargetBT && TargetBT->isInteger()) { 11899 if (S.SourceMgr.isInSystemMacro(CC)) 11900 return; 11901 11902 DiagnoseFloatingImpCast(S, E, T, CC); 11903 } 11904 11905 // Detect the case where a call result is converted from floating-point to 11906 // to bool, and the final argument to the call is converted from bool, to 11907 // discover this typo: 11908 // 11909 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 11910 // 11911 // FIXME: This is an incredibly special case; is there some more general 11912 // way to detect this class of misplaced-parentheses bug? 11913 if (Target->isBooleanType() && isa<CallExpr>(E)) { 11914 // Check last argument of function call to see if it is an 11915 // implicit cast from a type matching the type the result 11916 // is being cast to. 11917 CallExpr *CEx = cast<CallExpr>(E); 11918 if (unsigned NumArgs = CEx->getNumArgs()) { 11919 Expr *LastA = CEx->getArg(NumArgs - 1); 11920 Expr *InnerE = LastA->IgnoreParenImpCasts(); 11921 if (isa<ImplicitCastExpr>(LastA) && 11922 InnerE->getType()->isBooleanType()) { 11923 // Warn on this floating-point to bool conversion 11924 DiagnoseImpCast(S, E, T, CC, 11925 diag::warn_impcast_floating_point_to_bool); 11926 } 11927 } 11928 } 11929 return; 11930 } 11931 11932 // Valid casts involving fixed point types should be accounted for here. 11933 if (Source->isFixedPointType()) { 11934 if (Target->isUnsaturatedFixedPointType()) { 11935 Expr::EvalResult Result; 11936 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects, 11937 S.isConstantEvaluated())) { 11938 llvm::APFixedPoint Value = Result.Val.getFixedPoint(); 11939 llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(T); 11940 llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(T); 11941 if (Value > MaxVal || Value < MinVal) { 11942 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11943 S.PDiag(diag::warn_impcast_fixed_point_range) 11944 << Value.toString() << T 11945 << E->getSourceRange() 11946 << clang::SourceRange(CC)); 11947 return; 11948 } 11949 } 11950 } else if (Target->isIntegerType()) { 11951 Expr::EvalResult Result; 11952 if (!S.isConstantEvaluated() && 11953 E->EvaluateAsFixedPoint(Result, S.Context, 11954 Expr::SE_AllowSideEffects)) { 11955 llvm::APFixedPoint FXResult = Result.Val.getFixedPoint(); 11956 11957 bool Overflowed; 11958 llvm::APSInt IntResult = FXResult.convertToInt( 11959 S.Context.getIntWidth(T), 11960 Target->isSignedIntegerOrEnumerationType(), &Overflowed); 11961 11962 if (Overflowed) { 11963 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11964 S.PDiag(diag::warn_impcast_fixed_point_range) 11965 << FXResult.toString() << T 11966 << E->getSourceRange() 11967 << clang::SourceRange(CC)); 11968 return; 11969 } 11970 } 11971 } 11972 } else if (Target->isUnsaturatedFixedPointType()) { 11973 if (Source->isIntegerType()) { 11974 Expr::EvalResult Result; 11975 if (!S.isConstantEvaluated() && 11976 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 11977 llvm::APSInt Value = Result.Val.getInt(); 11978 11979 bool Overflowed; 11980 llvm::APFixedPoint IntResult = llvm::APFixedPoint::getFromIntValue( 11981 Value, S.Context.getFixedPointSemantics(T), &Overflowed); 11982 11983 if (Overflowed) { 11984 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11985 S.PDiag(diag::warn_impcast_fixed_point_range) 11986 << Value.toString(/*Radix=*/10) << T 11987 << E->getSourceRange() 11988 << clang::SourceRange(CC)); 11989 return; 11990 } 11991 } 11992 } 11993 } 11994 11995 // If we are casting an integer type to a floating point type without 11996 // initialization-list syntax, we might lose accuracy if the floating 11997 // point type has a narrower significand than the integer type. 11998 if (SourceBT && TargetBT && SourceBT->isIntegerType() && 11999 TargetBT->isFloatingType() && !IsListInit) { 12000 // Determine the number of precision bits in the source integer type. 12001 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated(), 12002 /*Approximate*/ true); 12003 unsigned int SourcePrecision = SourceRange.Width; 12004 12005 // Determine the number of precision bits in the 12006 // target floating point type. 12007 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision( 12008 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 12009 12010 if (SourcePrecision > 0 && TargetPrecision > 0 && 12011 SourcePrecision > TargetPrecision) { 12012 12013 if (Optional<llvm::APSInt> SourceInt = 12014 E->getIntegerConstantExpr(S.Context)) { 12015 // If the source integer is a constant, convert it to the target 12016 // floating point type. Issue a warning if the value changes 12017 // during the whole conversion. 12018 llvm::APFloat TargetFloatValue( 12019 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 12020 llvm::APFloat::opStatus ConversionStatus = 12021 TargetFloatValue.convertFromAPInt( 12022 *SourceInt, SourceBT->isSignedInteger(), 12023 llvm::APFloat::rmNearestTiesToEven); 12024 12025 if (ConversionStatus != llvm::APFloat::opOK) { 12026 std::string PrettySourceValue = SourceInt->toString(10); 12027 SmallString<32> PrettyTargetValue; 12028 TargetFloatValue.toString(PrettyTargetValue, TargetPrecision); 12029 12030 S.DiagRuntimeBehavior( 12031 E->getExprLoc(), E, 12032 S.PDiag(diag::warn_impcast_integer_float_precision_constant) 12033 << PrettySourceValue << PrettyTargetValue << E->getType() << T 12034 << E->getSourceRange() << clang::SourceRange(CC)); 12035 } 12036 } else { 12037 // Otherwise, the implicit conversion may lose precision. 12038 DiagnoseImpCast(S, E, T, CC, 12039 diag::warn_impcast_integer_float_precision); 12040 } 12041 } 12042 } 12043 12044 DiagnoseNullConversion(S, E, T, CC); 12045 12046 S.DiscardMisalignedMemberAddress(Target, E); 12047 12048 if (Target->isBooleanType()) 12049 DiagnoseIntInBoolContext(S, E); 12050 12051 if (!Source->isIntegerType() || !Target->isIntegerType()) 12052 return; 12053 12054 // TODO: remove this early return once the false positives for constant->bool 12055 // in templates, macros, etc, are reduced or removed. 12056 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 12057 return; 12058 12059 if (isObjCSignedCharBool(S, T) && !Source->isCharType() && 12060 !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) { 12061 return adornObjCBoolConversionDiagWithTernaryFixit( 12062 S, E, 12063 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool) 12064 << E->getType()); 12065 } 12066 12067 IntRange SourceTypeRange = 12068 IntRange::forTargetOfCanonicalType(S.Context, Source); 12069 IntRange LikelySourceRange = 12070 GetExprRange(S.Context, E, S.isConstantEvaluated(), /*Approximate*/ true); 12071 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 12072 12073 if (LikelySourceRange.Width > TargetRange.Width) { 12074 // If the source is a constant, use a default-on diagnostic. 12075 // TODO: this should happen for bitfield stores, too. 12076 Expr::EvalResult Result; 12077 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects, 12078 S.isConstantEvaluated())) { 12079 llvm::APSInt Value(32); 12080 Value = Result.Val.getInt(); 12081 12082 if (S.SourceMgr.isInSystemMacro(CC)) 12083 return; 12084 12085 std::string PrettySourceValue = Value.toString(10); 12086 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 12087 12088 S.DiagRuntimeBehavior( 12089 E->getExprLoc(), E, 12090 S.PDiag(diag::warn_impcast_integer_precision_constant) 12091 << PrettySourceValue << PrettyTargetValue << E->getType() << T 12092 << E->getSourceRange() << SourceRange(CC)); 12093 return; 12094 } 12095 12096 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 12097 if (S.SourceMgr.isInSystemMacro(CC)) 12098 return; 12099 12100 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 12101 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 12102 /* pruneControlFlow */ true); 12103 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 12104 } 12105 12106 if (TargetRange.Width > SourceTypeRange.Width) { 12107 if (auto *UO = dyn_cast<UnaryOperator>(E)) 12108 if (UO->getOpcode() == UO_Minus) 12109 if (Source->isUnsignedIntegerType()) { 12110 if (Target->isUnsignedIntegerType()) 12111 return DiagnoseImpCast(S, E, T, CC, 12112 diag::warn_impcast_high_order_zero_bits); 12113 if (Target->isSignedIntegerType()) 12114 return DiagnoseImpCast(S, E, T, CC, 12115 diag::warn_impcast_nonnegative_result); 12116 } 12117 } 12118 12119 if (TargetRange.Width == LikelySourceRange.Width && 12120 !TargetRange.NonNegative && LikelySourceRange.NonNegative && 12121 Source->isSignedIntegerType()) { 12122 // Warn when doing a signed to signed conversion, warn if the positive 12123 // source value is exactly the width of the target type, which will 12124 // cause a negative value to be stored. 12125 12126 Expr::EvalResult Result; 12127 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) && 12128 !S.SourceMgr.isInSystemMacro(CC)) { 12129 llvm::APSInt Value = Result.Val.getInt(); 12130 if (isSameWidthConstantConversion(S, E, T, CC)) { 12131 std::string PrettySourceValue = Value.toString(10); 12132 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 12133 12134 S.DiagRuntimeBehavior( 12135 E->getExprLoc(), E, 12136 S.PDiag(diag::warn_impcast_integer_precision_constant) 12137 << PrettySourceValue << PrettyTargetValue << E->getType() << T 12138 << E->getSourceRange() << SourceRange(CC)); 12139 return; 12140 } 12141 } 12142 12143 // Fall through for non-constants to give a sign conversion warning. 12144 } 12145 12146 if ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) || 12147 (!TargetRange.NonNegative && LikelySourceRange.NonNegative && 12148 LikelySourceRange.Width == TargetRange.Width)) { 12149 if (S.SourceMgr.isInSystemMacro(CC)) 12150 return; 12151 12152 unsigned DiagID = diag::warn_impcast_integer_sign; 12153 12154 // Traditionally, gcc has warned about this under -Wsign-compare. 12155 // We also want to warn about it in -Wconversion. 12156 // So if -Wconversion is off, use a completely identical diagnostic 12157 // in the sign-compare group. 12158 // The conditional-checking code will 12159 if (ICContext) { 12160 DiagID = diag::warn_impcast_integer_sign_conditional; 12161 *ICContext = true; 12162 } 12163 12164 return DiagnoseImpCast(S, E, T, CC, DiagID); 12165 } 12166 12167 // Diagnose conversions between different enumeration types. 12168 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 12169 // type, to give us better diagnostics. 12170 QualType SourceType = E->getType(); 12171 if (!S.getLangOpts().CPlusPlus) { 12172 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 12173 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 12174 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 12175 SourceType = S.Context.getTypeDeclType(Enum); 12176 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 12177 } 12178 } 12179 12180 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 12181 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 12182 if (SourceEnum->getDecl()->hasNameForLinkage() && 12183 TargetEnum->getDecl()->hasNameForLinkage() && 12184 SourceEnum != TargetEnum) { 12185 if (S.SourceMgr.isInSystemMacro(CC)) 12186 return; 12187 12188 return DiagnoseImpCast(S, E, SourceType, T, CC, 12189 diag::warn_impcast_different_enum_types); 12190 } 12191 } 12192 12193 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 12194 SourceLocation CC, QualType T); 12195 12196 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 12197 SourceLocation CC, bool &ICContext) { 12198 E = E->IgnoreParenImpCasts(); 12199 12200 if (auto *CO = dyn_cast<AbstractConditionalOperator>(E)) 12201 return CheckConditionalOperator(S, CO, CC, T); 12202 12203 AnalyzeImplicitConversions(S, E, CC); 12204 if (E->getType() != T) 12205 return CheckImplicitConversion(S, E, T, CC, &ICContext); 12206 } 12207 12208 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 12209 SourceLocation CC, QualType T) { 12210 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 12211 12212 Expr *TrueExpr = E->getTrueExpr(); 12213 if (auto *BCO = dyn_cast<BinaryConditionalOperator>(E)) 12214 TrueExpr = BCO->getCommon(); 12215 12216 bool Suspicious = false; 12217 CheckConditionalOperand(S, TrueExpr, T, CC, Suspicious); 12218 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 12219 12220 if (T->isBooleanType()) 12221 DiagnoseIntInBoolContext(S, E); 12222 12223 // If -Wconversion would have warned about either of the candidates 12224 // for a signedness conversion to the context type... 12225 if (!Suspicious) return; 12226 12227 // ...but it's currently ignored... 12228 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 12229 return; 12230 12231 // ...then check whether it would have warned about either of the 12232 // candidates for a signedness conversion to the condition type. 12233 if (E->getType() == T) return; 12234 12235 Suspicious = false; 12236 CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(), 12237 E->getType(), CC, &Suspicious); 12238 if (!Suspicious) 12239 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 12240 E->getType(), CC, &Suspicious); 12241 } 12242 12243 /// Check conversion of given expression to boolean. 12244 /// Input argument E is a logical expression. 12245 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 12246 if (S.getLangOpts().Bool) 12247 return; 12248 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 12249 return; 12250 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 12251 } 12252 12253 namespace { 12254 struct AnalyzeImplicitConversionsWorkItem { 12255 Expr *E; 12256 SourceLocation CC; 12257 bool IsListInit; 12258 }; 12259 } 12260 12261 /// Data recursive variant of AnalyzeImplicitConversions. Subexpressions 12262 /// that should be visited are added to WorkList. 12263 static void AnalyzeImplicitConversions( 12264 Sema &S, AnalyzeImplicitConversionsWorkItem Item, 12265 llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) { 12266 Expr *OrigE = Item.E; 12267 SourceLocation CC = Item.CC; 12268 12269 QualType T = OrigE->getType(); 12270 Expr *E = OrigE->IgnoreParenImpCasts(); 12271 12272 // Propagate whether we are in a C++ list initialization expression. 12273 // If so, we do not issue warnings for implicit int-float conversion 12274 // precision loss, because C++11 narrowing already handles it. 12275 bool IsListInit = Item.IsListInit || 12276 (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus); 12277 12278 if (E->isTypeDependent() || E->isValueDependent()) 12279 return; 12280 12281 Expr *SourceExpr = E; 12282 // Examine, but don't traverse into the source expression of an 12283 // OpaqueValueExpr, since it may have multiple parents and we don't want to 12284 // emit duplicate diagnostics. Its fine to examine the form or attempt to 12285 // evaluate it in the context of checking the specific conversion to T though. 12286 if (auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 12287 if (auto *Src = OVE->getSourceExpr()) 12288 SourceExpr = Src; 12289 12290 if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr)) 12291 if (UO->getOpcode() == UO_Not && 12292 UO->getSubExpr()->isKnownToHaveBooleanValue()) 12293 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool) 12294 << OrigE->getSourceRange() << T->isBooleanType() 12295 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!"); 12296 12297 // For conditional operators, we analyze the arguments as if they 12298 // were being fed directly into the output. 12299 if (auto *CO = dyn_cast<AbstractConditionalOperator>(SourceExpr)) { 12300 CheckConditionalOperator(S, CO, CC, T); 12301 return; 12302 } 12303 12304 // Check implicit argument conversions for function calls. 12305 if (CallExpr *Call = dyn_cast<CallExpr>(SourceExpr)) 12306 CheckImplicitArgumentConversions(S, Call, CC); 12307 12308 // Go ahead and check any implicit conversions we might have skipped. 12309 // The non-canonical typecheck is just an optimization; 12310 // CheckImplicitConversion will filter out dead implicit conversions. 12311 if (SourceExpr->getType() != T) 12312 CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit); 12313 12314 // Now continue drilling into this expression. 12315 12316 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 12317 // The bound subexpressions in a PseudoObjectExpr are not reachable 12318 // as transitive children. 12319 // FIXME: Use a more uniform representation for this. 12320 for (auto *SE : POE->semantics()) 12321 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 12322 WorkList.push_back({OVE->getSourceExpr(), CC, IsListInit}); 12323 } 12324 12325 // Skip past explicit casts. 12326 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 12327 E = CE->getSubExpr()->IgnoreParenImpCasts(); 12328 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 12329 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 12330 WorkList.push_back({E, CC, IsListInit}); 12331 return; 12332 } 12333 12334 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 12335 // Do a somewhat different check with comparison operators. 12336 if (BO->isComparisonOp()) 12337 return AnalyzeComparison(S, BO); 12338 12339 // And with simple assignments. 12340 if (BO->getOpcode() == BO_Assign) 12341 return AnalyzeAssignment(S, BO); 12342 // And with compound assignments. 12343 if (BO->isAssignmentOp()) 12344 return AnalyzeCompoundAssignment(S, BO); 12345 } 12346 12347 // These break the otherwise-useful invariant below. Fortunately, 12348 // we don't really need to recurse into them, because any internal 12349 // expressions should have been analyzed already when they were 12350 // built into statements. 12351 if (isa<StmtExpr>(E)) return; 12352 12353 // Don't descend into unevaluated contexts. 12354 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 12355 12356 // Now just recurse over the expression's children. 12357 CC = E->getExprLoc(); 12358 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 12359 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 12360 for (Stmt *SubStmt : E->children()) { 12361 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 12362 if (!ChildExpr) 12363 continue; 12364 12365 if (IsLogicalAndOperator && 12366 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 12367 // Ignore checking string literals that are in logical and operators. 12368 // This is a common pattern for asserts. 12369 continue; 12370 WorkList.push_back({ChildExpr, CC, IsListInit}); 12371 } 12372 12373 if (BO && BO->isLogicalOp()) { 12374 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 12375 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 12376 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 12377 12378 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 12379 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 12380 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 12381 } 12382 12383 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 12384 if (U->getOpcode() == UO_LNot) { 12385 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 12386 } else if (U->getOpcode() != UO_AddrOf) { 12387 if (U->getSubExpr()->getType()->isAtomicType()) 12388 S.Diag(U->getSubExpr()->getBeginLoc(), 12389 diag::warn_atomic_implicit_seq_cst); 12390 } 12391 } 12392 } 12393 12394 /// AnalyzeImplicitConversions - Find and report any interesting 12395 /// implicit conversions in the given expression. There are a couple 12396 /// of competing diagnostics here, -Wconversion and -Wsign-compare. 12397 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC, 12398 bool IsListInit/*= false*/) { 12399 llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList; 12400 WorkList.push_back({OrigE, CC, IsListInit}); 12401 while (!WorkList.empty()) 12402 AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList); 12403 } 12404 12405 /// Diagnose integer type and any valid implicit conversion to it. 12406 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 12407 // Taking into account implicit conversions, 12408 // allow any integer. 12409 if (!E->getType()->isIntegerType()) { 12410 S.Diag(E->getBeginLoc(), 12411 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 12412 return true; 12413 } 12414 // Potentially emit standard warnings for implicit conversions if enabled 12415 // using -Wconversion. 12416 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 12417 return false; 12418 } 12419 12420 // Helper function for Sema::DiagnoseAlwaysNonNullPointer. 12421 // Returns true when emitting a warning about taking the address of a reference. 12422 static bool CheckForReference(Sema &SemaRef, const Expr *E, 12423 const PartialDiagnostic &PD) { 12424 E = E->IgnoreParenImpCasts(); 12425 12426 const FunctionDecl *FD = nullptr; 12427 12428 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 12429 if (!DRE->getDecl()->getType()->isReferenceType()) 12430 return false; 12431 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 12432 if (!M->getMemberDecl()->getType()->isReferenceType()) 12433 return false; 12434 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 12435 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 12436 return false; 12437 FD = Call->getDirectCallee(); 12438 } else { 12439 return false; 12440 } 12441 12442 SemaRef.Diag(E->getExprLoc(), PD); 12443 12444 // If possible, point to location of function. 12445 if (FD) { 12446 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 12447 } 12448 12449 return true; 12450 } 12451 12452 // Returns true if the SourceLocation is expanded from any macro body. 12453 // Returns false if the SourceLocation is invalid, is from not in a macro 12454 // expansion, or is from expanded from a top-level macro argument. 12455 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 12456 if (Loc.isInvalid()) 12457 return false; 12458 12459 while (Loc.isMacroID()) { 12460 if (SM.isMacroBodyExpansion(Loc)) 12461 return true; 12462 Loc = SM.getImmediateMacroCallerLoc(Loc); 12463 } 12464 12465 return false; 12466 } 12467 12468 /// Diagnose pointers that are always non-null. 12469 /// \param E the expression containing the pointer 12470 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 12471 /// compared to a null pointer 12472 /// \param IsEqual True when the comparison is equal to a null pointer 12473 /// \param Range Extra SourceRange to highlight in the diagnostic 12474 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 12475 Expr::NullPointerConstantKind NullKind, 12476 bool IsEqual, SourceRange Range) { 12477 if (!E) 12478 return; 12479 12480 // Don't warn inside macros. 12481 if (E->getExprLoc().isMacroID()) { 12482 const SourceManager &SM = getSourceManager(); 12483 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 12484 IsInAnyMacroBody(SM, Range.getBegin())) 12485 return; 12486 } 12487 E = E->IgnoreImpCasts(); 12488 12489 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 12490 12491 if (isa<CXXThisExpr>(E)) { 12492 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 12493 : diag::warn_this_bool_conversion; 12494 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 12495 return; 12496 } 12497 12498 bool IsAddressOf = false; 12499 12500 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 12501 if (UO->getOpcode() != UO_AddrOf) 12502 return; 12503 IsAddressOf = true; 12504 E = UO->getSubExpr(); 12505 } 12506 12507 if (IsAddressOf) { 12508 unsigned DiagID = IsCompare 12509 ? diag::warn_address_of_reference_null_compare 12510 : diag::warn_address_of_reference_bool_conversion; 12511 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 12512 << IsEqual; 12513 if (CheckForReference(*this, E, PD)) { 12514 return; 12515 } 12516 } 12517 12518 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 12519 bool IsParam = isa<NonNullAttr>(NonnullAttr); 12520 std::string Str; 12521 llvm::raw_string_ostream S(Str); 12522 E->printPretty(S, nullptr, getPrintingPolicy()); 12523 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 12524 : diag::warn_cast_nonnull_to_bool; 12525 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 12526 << E->getSourceRange() << Range << IsEqual; 12527 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 12528 }; 12529 12530 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 12531 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 12532 if (auto *Callee = Call->getDirectCallee()) { 12533 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 12534 ComplainAboutNonnullParamOrCall(A); 12535 return; 12536 } 12537 } 12538 } 12539 12540 // Expect to find a single Decl. Skip anything more complicated. 12541 ValueDecl *D = nullptr; 12542 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 12543 D = R->getDecl(); 12544 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 12545 D = M->getMemberDecl(); 12546 } 12547 12548 // Weak Decls can be null. 12549 if (!D || D->isWeak()) 12550 return; 12551 12552 // Check for parameter decl with nonnull attribute 12553 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 12554 if (getCurFunction() && 12555 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 12556 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 12557 ComplainAboutNonnullParamOrCall(A); 12558 return; 12559 } 12560 12561 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 12562 // Skip function template not specialized yet. 12563 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 12564 return; 12565 auto ParamIter = llvm::find(FD->parameters(), PV); 12566 assert(ParamIter != FD->param_end()); 12567 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 12568 12569 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 12570 if (!NonNull->args_size()) { 12571 ComplainAboutNonnullParamOrCall(NonNull); 12572 return; 12573 } 12574 12575 for (const ParamIdx &ArgNo : NonNull->args()) { 12576 if (ArgNo.getASTIndex() == ParamNo) { 12577 ComplainAboutNonnullParamOrCall(NonNull); 12578 return; 12579 } 12580 } 12581 } 12582 } 12583 } 12584 } 12585 12586 QualType T = D->getType(); 12587 const bool IsArray = T->isArrayType(); 12588 const bool IsFunction = T->isFunctionType(); 12589 12590 // Address of function is used to silence the function warning. 12591 if (IsAddressOf && IsFunction) { 12592 return; 12593 } 12594 12595 // Found nothing. 12596 if (!IsAddressOf && !IsFunction && !IsArray) 12597 return; 12598 12599 // Pretty print the expression for the diagnostic. 12600 std::string Str; 12601 llvm::raw_string_ostream S(Str); 12602 E->printPretty(S, nullptr, getPrintingPolicy()); 12603 12604 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 12605 : diag::warn_impcast_pointer_to_bool; 12606 enum { 12607 AddressOf, 12608 FunctionPointer, 12609 ArrayPointer 12610 } DiagType; 12611 if (IsAddressOf) 12612 DiagType = AddressOf; 12613 else if (IsFunction) 12614 DiagType = FunctionPointer; 12615 else if (IsArray) 12616 DiagType = ArrayPointer; 12617 else 12618 llvm_unreachable("Could not determine diagnostic."); 12619 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 12620 << Range << IsEqual; 12621 12622 if (!IsFunction) 12623 return; 12624 12625 // Suggest '&' to silence the function warning. 12626 Diag(E->getExprLoc(), diag::note_function_warning_silence) 12627 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 12628 12629 // Check to see if '()' fixit should be emitted. 12630 QualType ReturnType; 12631 UnresolvedSet<4> NonTemplateOverloads; 12632 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 12633 if (ReturnType.isNull()) 12634 return; 12635 12636 if (IsCompare) { 12637 // There are two cases here. If there is null constant, the only suggest 12638 // for a pointer return type. If the null is 0, then suggest if the return 12639 // type is a pointer or an integer type. 12640 if (!ReturnType->isPointerType()) { 12641 if (NullKind == Expr::NPCK_ZeroExpression || 12642 NullKind == Expr::NPCK_ZeroLiteral) { 12643 if (!ReturnType->isIntegerType()) 12644 return; 12645 } else { 12646 return; 12647 } 12648 } 12649 } else { // !IsCompare 12650 // For function to bool, only suggest if the function pointer has bool 12651 // return type. 12652 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 12653 return; 12654 } 12655 Diag(E->getExprLoc(), diag::note_function_to_function_call) 12656 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 12657 } 12658 12659 /// Diagnoses "dangerous" implicit conversions within the given 12660 /// expression (which is a full expression). Implements -Wconversion 12661 /// and -Wsign-compare. 12662 /// 12663 /// \param CC the "context" location of the implicit conversion, i.e. 12664 /// the most location of the syntactic entity requiring the implicit 12665 /// conversion 12666 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 12667 // Don't diagnose in unevaluated contexts. 12668 if (isUnevaluatedContext()) 12669 return; 12670 12671 // Don't diagnose for value- or type-dependent expressions. 12672 if (E->isTypeDependent() || E->isValueDependent()) 12673 return; 12674 12675 // Check for array bounds violations in cases where the check isn't triggered 12676 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 12677 // ArraySubscriptExpr is on the RHS of a variable initialization. 12678 CheckArrayAccess(E); 12679 12680 // This is not the right CC for (e.g.) a variable initialization. 12681 AnalyzeImplicitConversions(*this, E, CC); 12682 } 12683 12684 /// CheckBoolLikeConversion - Check conversion of given expression to boolean. 12685 /// Input argument E is a logical expression. 12686 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 12687 ::CheckBoolLikeConversion(*this, E, CC); 12688 } 12689 12690 /// Diagnose when expression is an integer constant expression and its evaluation 12691 /// results in integer overflow 12692 void Sema::CheckForIntOverflow (Expr *E) { 12693 // Use a work list to deal with nested struct initializers. 12694 SmallVector<Expr *, 2> Exprs(1, E); 12695 12696 do { 12697 Expr *OriginalE = Exprs.pop_back_val(); 12698 Expr *E = OriginalE->IgnoreParenCasts(); 12699 12700 if (isa<BinaryOperator>(E)) { 12701 E->EvaluateForOverflow(Context); 12702 continue; 12703 } 12704 12705 if (auto InitList = dyn_cast<InitListExpr>(OriginalE)) 12706 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 12707 else if (isa<ObjCBoxedExpr>(OriginalE)) 12708 E->EvaluateForOverflow(Context); 12709 else if (auto Call = dyn_cast<CallExpr>(E)) 12710 Exprs.append(Call->arg_begin(), Call->arg_end()); 12711 else if (auto Message = dyn_cast<ObjCMessageExpr>(E)) 12712 Exprs.append(Message->arg_begin(), Message->arg_end()); 12713 } while (!Exprs.empty()); 12714 } 12715 12716 namespace { 12717 12718 /// Visitor for expressions which looks for unsequenced operations on the 12719 /// same object. 12720 class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> { 12721 using Base = ConstEvaluatedExprVisitor<SequenceChecker>; 12722 12723 /// A tree of sequenced regions within an expression. Two regions are 12724 /// unsequenced if one is an ancestor or a descendent of the other. When we 12725 /// finish processing an expression with sequencing, such as a comma 12726 /// expression, we fold its tree nodes into its parent, since they are 12727 /// unsequenced with respect to nodes we will visit later. 12728 class SequenceTree { 12729 struct Value { 12730 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 12731 unsigned Parent : 31; 12732 unsigned Merged : 1; 12733 }; 12734 SmallVector<Value, 8> Values; 12735 12736 public: 12737 /// A region within an expression which may be sequenced with respect 12738 /// to some other region. 12739 class Seq { 12740 friend class SequenceTree; 12741 12742 unsigned Index; 12743 12744 explicit Seq(unsigned N) : Index(N) {} 12745 12746 public: 12747 Seq() : Index(0) {} 12748 }; 12749 12750 SequenceTree() { Values.push_back(Value(0)); } 12751 Seq root() const { return Seq(0); } 12752 12753 /// Create a new sequence of operations, which is an unsequenced 12754 /// subset of \p Parent. This sequence of operations is sequenced with 12755 /// respect to other children of \p Parent. 12756 Seq allocate(Seq Parent) { 12757 Values.push_back(Value(Parent.Index)); 12758 return Seq(Values.size() - 1); 12759 } 12760 12761 /// Merge a sequence of operations into its parent. 12762 void merge(Seq S) { 12763 Values[S.Index].Merged = true; 12764 } 12765 12766 /// Determine whether two operations are unsequenced. This operation 12767 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 12768 /// should have been merged into its parent as appropriate. 12769 bool isUnsequenced(Seq Cur, Seq Old) { 12770 unsigned C = representative(Cur.Index); 12771 unsigned Target = representative(Old.Index); 12772 while (C >= Target) { 12773 if (C == Target) 12774 return true; 12775 C = Values[C].Parent; 12776 } 12777 return false; 12778 } 12779 12780 private: 12781 /// Pick a representative for a sequence. 12782 unsigned representative(unsigned K) { 12783 if (Values[K].Merged) 12784 // Perform path compression as we go. 12785 return Values[K].Parent = representative(Values[K].Parent); 12786 return K; 12787 } 12788 }; 12789 12790 /// An object for which we can track unsequenced uses. 12791 using Object = const NamedDecl *; 12792 12793 /// Different flavors of object usage which we track. We only track the 12794 /// least-sequenced usage of each kind. 12795 enum UsageKind { 12796 /// A read of an object. Multiple unsequenced reads are OK. 12797 UK_Use, 12798 12799 /// A modification of an object which is sequenced before the value 12800 /// computation of the expression, such as ++n in C++. 12801 UK_ModAsValue, 12802 12803 /// A modification of an object which is not sequenced before the value 12804 /// computation of the expression, such as n++. 12805 UK_ModAsSideEffect, 12806 12807 UK_Count = UK_ModAsSideEffect + 1 12808 }; 12809 12810 /// Bundle together a sequencing region and the expression corresponding 12811 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo. 12812 struct Usage { 12813 const Expr *UsageExpr; 12814 SequenceTree::Seq Seq; 12815 12816 Usage() : UsageExpr(nullptr), Seq() {} 12817 }; 12818 12819 struct UsageInfo { 12820 Usage Uses[UK_Count]; 12821 12822 /// Have we issued a diagnostic for this object already? 12823 bool Diagnosed; 12824 12825 UsageInfo() : Uses(), Diagnosed(false) {} 12826 }; 12827 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 12828 12829 Sema &SemaRef; 12830 12831 /// Sequenced regions within the expression. 12832 SequenceTree Tree; 12833 12834 /// Declaration modifications and references which we have seen. 12835 UsageInfoMap UsageMap; 12836 12837 /// The region we are currently within. 12838 SequenceTree::Seq Region; 12839 12840 /// Filled in with declarations which were modified as a side-effect 12841 /// (that is, post-increment operations). 12842 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 12843 12844 /// Expressions to check later. We defer checking these to reduce 12845 /// stack usage. 12846 SmallVectorImpl<const Expr *> &WorkList; 12847 12848 /// RAII object wrapping the visitation of a sequenced subexpression of an 12849 /// expression. At the end of this process, the side-effects of the evaluation 12850 /// become sequenced with respect to the value computation of the result, so 12851 /// we downgrade any UK_ModAsSideEffect within the evaluation to 12852 /// UK_ModAsValue. 12853 struct SequencedSubexpression { 12854 SequencedSubexpression(SequenceChecker &Self) 12855 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 12856 Self.ModAsSideEffect = &ModAsSideEffect; 12857 } 12858 12859 ~SequencedSubexpression() { 12860 for (const std::pair<Object, Usage> &M : llvm::reverse(ModAsSideEffect)) { 12861 // Add a new usage with usage kind UK_ModAsValue, and then restore 12862 // the previous usage with UK_ModAsSideEffect (thus clearing it if 12863 // the previous one was empty). 12864 UsageInfo &UI = Self.UsageMap[M.first]; 12865 auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect]; 12866 Self.addUsage(M.first, UI, SideEffectUsage.UsageExpr, UK_ModAsValue); 12867 SideEffectUsage = M.second; 12868 } 12869 Self.ModAsSideEffect = OldModAsSideEffect; 12870 } 12871 12872 SequenceChecker &Self; 12873 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 12874 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 12875 }; 12876 12877 /// RAII object wrapping the visitation of a subexpression which we might 12878 /// choose to evaluate as a constant. If any subexpression is evaluated and 12879 /// found to be non-constant, this allows us to suppress the evaluation of 12880 /// the outer expression. 12881 class EvaluationTracker { 12882 public: 12883 EvaluationTracker(SequenceChecker &Self) 12884 : Self(Self), Prev(Self.EvalTracker) { 12885 Self.EvalTracker = this; 12886 } 12887 12888 ~EvaluationTracker() { 12889 Self.EvalTracker = Prev; 12890 if (Prev) 12891 Prev->EvalOK &= EvalOK; 12892 } 12893 12894 bool evaluate(const Expr *E, bool &Result) { 12895 if (!EvalOK || E->isValueDependent()) 12896 return false; 12897 EvalOK = E->EvaluateAsBooleanCondition( 12898 Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated()); 12899 return EvalOK; 12900 } 12901 12902 private: 12903 SequenceChecker &Self; 12904 EvaluationTracker *Prev; 12905 bool EvalOK = true; 12906 } *EvalTracker = nullptr; 12907 12908 /// Find the object which is produced by the specified expression, 12909 /// if any. 12910 Object getObject(const Expr *E, bool Mod) const { 12911 E = E->IgnoreParenCasts(); 12912 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 12913 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 12914 return getObject(UO->getSubExpr(), Mod); 12915 } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 12916 if (BO->getOpcode() == BO_Comma) 12917 return getObject(BO->getRHS(), Mod); 12918 if (Mod && BO->isAssignmentOp()) 12919 return getObject(BO->getLHS(), Mod); 12920 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 12921 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 12922 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 12923 return ME->getMemberDecl(); 12924 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 12925 // FIXME: If this is a reference, map through to its value. 12926 return DRE->getDecl(); 12927 return nullptr; 12928 } 12929 12930 /// Note that an object \p O was modified or used by an expression 12931 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for 12932 /// the object \p O as obtained via the \p UsageMap. 12933 void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) { 12934 // Get the old usage for the given object and usage kind. 12935 Usage &U = UI.Uses[UK]; 12936 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) { 12937 // If we have a modification as side effect and are in a sequenced 12938 // subexpression, save the old Usage so that we can restore it later 12939 // in SequencedSubexpression::~SequencedSubexpression. 12940 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 12941 ModAsSideEffect->push_back(std::make_pair(O, U)); 12942 // Then record the new usage with the current sequencing region. 12943 U.UsageExpr = UsageExpr; 12944 U.Seq = Region; 12945 } 12946 } 12947 12948 /// Check whether a modification or use of an object \p O in an expression 12949 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is 12950 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap. 12951 /// \p IsModMod is true when we are checking for a mod-mod unsequenced 12952 /// usage and false we are checking for a mod-use unsequenced usage. 12953 void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, 12954 UsageKind OtherKind, bool IsModMod) { 12955 if (UI.Diagnosed) 12956 return; 12957 12958 const Usage &U = UI.Uses[OtherKind]; 12959 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) 12960 return; 12961 12962 const Expr *Mod = U.UsageExpr; 12963 const Expr *ModOrUse = UsageExpr; 12964 if (OtherKind == UK_Use) 12965 std::swap(Mod, ModOrUse); 12966 12967 SemaRef.DiagRuntimeBehavior( 12968 Mod->getExprLoc(), {Mod, ModOrUse}, 12969 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod 12970 : diag::warn_unsequenced_mod_use) 12971 << O << SourceRange(ModOrUse->getExprLoc())); 12972 UI.Diagnosed = true; 12973 } 12974 12975 // A note on note{Pre, Post}{Use, Mod}: 12976 // 12977 // (It helps to follow the algorithm with an expression such as 12978 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced 12979 // operations before C++17 and both are well-defined in C++17). 12980 // 12981 // When visiting a node which uses/modify an object we first call notePreUse 12982 // or notePreMod before visiting its sub-expression(s). At this point the 12983 // children of the current node have not yet been visited and so the eventual 12984 // uses/modifications resulting from the children of the current node have not 12985 // been recorded yet. 12986 // 12987 // We then visit the children of the current node. After that notePostUse or 12988 // notePostMod is called. These will 1) detect an unsequenced modification 12989 // as side effect (as in "k++ + k") and 2) add a new usage with the 12990 // appropriate usage kind. 12991 // 12992 // We also have to be careful that some operation sequences modification as 12993 // side effect as well (for example: || or ,). To account for this we wrap 12994 // the visitation of such a sub-expression (for example: the LHS of || or ,) 12995 // with SequencedSubexpression. SequencedSubexpression is an RAII object 12996 // which record usages which are modifications as side effect, and then 12997 // downgrade them (or more accurately restore the previous usage which was a 12998 // modification as side effect) when exiting the scope of the sequenced 12999 // subexpression. 13000 13001 void notePreUse(Object O, const Expr *UseExpr) { 13002 UsageInfo &UI = UsageMap[O]; 13003 // Uses conflict with other modifications. 13004 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false); 13005 } 13006 13007 void notePostUse(Object O, const Expr *UseExpr) { 13008 UsageInfo &UI = UsageMap[O]; 13009 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsSideEffect, 13010 /*IsModMod=*/false); 13011 addUsage(O, UI, UseExpr, /*UsageKind=*/UK_Use); 13012 } 13013 13014 void notePreMod(Object O, const Expr *ModExpr) { 13015 UsageInfo &UI = UsageMap[O]; 13016 // Modifications conflict with other modifications and with uses. 13017 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true); 13018 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false); 13019 } 13020 13021 void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) { 13022 UsageInfo &UI = UsageMap[O]; 13023 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsSideEffect, 13024 /*IsModMod=*/true); 13025 addUsage(O, UI, ModExpr, /*UsageKind=*/UK); 13026 } 13027 13028 public: 13029 SequenceChecker(Sema &S, const Expr *E, 13030 SmallVectorImpl<const Expr *> &WorkList) 13031 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 13032 Visit(E); 13033 // Silence a -Wunused-private-field since WorkList is now unused. 13034 // TODO: Evaluate if it can be used, and if not remove it. 13035 (void)this->WorkList; 13036 } 13037 13038 void VisitStmt(const Stmt *S) { 13039 // Skip all statements which aren't expressions for now. 13040 } 13041 13042 void VisitExpr(const Expr *E) { 13043 // By default, just recurse to evaluated subexpressions. 13044 Base::VisitStmt(E); 13045 } 13046 13047 void VisitCastExpr(const CastExpr *E) { 13048 Object O = Object(); 13049 if (E->getCastKind() == CK_LValueToRValue) 13050 O = getObject(E->getSubExpr(), false); 13051 13052 if (O) 13053 notePreUse(O, E); 13054 VisitExpr(E); 13055 if (O) 13056 notePostUse(O, E); 13057 } 13058 13059 void VisitSequencedExpressions(const Expr *SequencedBefore, 13060 const Expr *SequencedAfter) { 13061 SequenceTree::Seq BeforeRegion = Tree.allocate(Region); 13062 SequenceTree::Seq AfterRegion = Tree.allocate(Region); 13063 SequenceTree::Seq OldRegion = Region; 13064 13065 { 13066 SequencedSubexpression SeqBefore(*this); 13067 Region = BeforeRegion; 13068 Visit(SequencedBefore); 13069 } 13070 13071 Region = AfterRegion; 13072 Visit(SequencedAfter); 13073 13074 Region = OldRegion; 13075 13076 Tree.merge(BeforeRegion); 13077 Tree.merge(AfterRegion); 13078 } 13079 13080 void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) { 13081 // C++17 [expr.sub]p1: 13082 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The 13083 // expression E1 is sequenced before the expression E2. 13084 if (SemaRef.getLangOpts().CPlusPlus17) 13085 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS()); 13086 else { 13087 Visit(ASE->getLHS()); 13088 Visit(ASE->getRHS()); 13089 } 13090 } 13091 13092 void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 13093 void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 13094 void VisitBinPtrMem(const BinaryOperator *BO) { 13095 // C++17 [expr.mptr.oper]p4: 13096 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...] 13097 // the expression E1 is sequenced before the expression E2. 13098 if (SemaRef.getLangOpts().CPlusPlus17) 13099 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 13100 else { 13101 Visit(BO->getLHS()); 13102 Visit(BO->getRHS()); 13103 } 13104 } 13105 13106 void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); } 13107 void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); } 13108 void VisitBinShlShr(const BinaryOperator *BO) { 13109 // C++17 [expr.shift]p4: 13110 // The expression E1 is sequenced before the expression E2. 13111 if (SemaRef.getLangOpts().CPlusPlus17) 13112 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 13113 else { 13114 Visit(BO->getLHS()); 13115 Visit(BO->getRHS()); 13116 } 13117 } 13118 13119 void VisitBinComma(const BinaryOperator *BO) { 13120 // C++11 [expr.comma]p1: 13121 // Every value computation and side effect associated with the left 13122 // expression is sequenced before every value computation and side 13123 // effect associated with the right expression. 13124 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 13125 } 13126 13127 void VisitBinAssign(const BinaryOperator *BO) { 13128 SequenceTree::Seq RHSRegion; 13129 SequenceTree::Seq LHSRegion; 13130 if (SemaRef.getLangOpts().CPlusPlus17) { 13131 RHSRegion = Tree.allocate(Region); 13132 LHSRegion = Tree.allocate(Region); 13133 } else { 13134 RHSRegion = Region; 13135 LHSRegion = Region; 13136 } 13137 SequenceTree::Seq OldRegion = Region; 13138 13139 // C++11 [expr.ass]p1: 13140 // [...] the assignment is sequenced after the value computation 13141 // of the right and left operands, [...] 13142 // 13143 // so check it before inspecting the operands and update the 13144 // map afterwards. 13145 Object O = getObject(BO->getLHS(), /*Mod=*/true); 13146 if (O) 13147 notePreMod(O, BO); 13148 13149 if (SemaRef.getLangOpts().CPlusPlus17) { 13150 // C++17 [expr.ass]p1: 13151 // [...] The right operand is sequenced before the left operand. [...] 13152 { 13153 SequencedSubexpression SeqBefore(*this); 13154 Region = RHSRegion; 13155 Visit(BO->getRHS()); 13156 } 13157 13158 Region = LHSRegion; 13159 Visit(BO->getLHS()); 13160 13161 if (O && isa<CompoundAssignOperator>(BO)) 13162 notePostUse(O, BO); 13163 13164 } else { 13165 // C++11 does not specify any sequencing between the LHS and RHS. 13166 Region = LHSRegion; 13167 Visit(BO->getLHS()); 13168 13169 if (O && isa<CompoundAssignOperator>(BO)) 13170 notePostUse(O, BO); 13171 13172 Region = RHSRegion; 13173 Visit(BO->getRHS()); 13174 } 13175 13176 // C++11 [expr.ass]p1: 13177 // the assignment is sequenced [...] before the value computation of the 13178 // assignment expression. 13179 // C11 6.5.16/3 has no such rule. 13180 Region = OldRegion; 13181 if (O) 13182 notePostMod(O, BO, 13183 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 13184 : UK_ModAsSideEffect); 13185 if (SemaRef.getLangOpts().CPlusPlus17) { 13186 Tree.merge(RHSRegion); 13187 Tree.merge(LHSRegion); 13188 } 13189 } 13190 13191 void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) { 13192 VisitBinAssign(CAO); 13193 } 13194 13195 void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 13196 void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 13197 void VisitUnaryPreIncDec(const UnaryOperator *UO) { 13198 Object O = getObject(UO->getSubExpr(), true); 13199 if (!O) 13200 return VisitExpr(UO); 13201 13202 notePreMod(O, UO); 13203 Visit(UO->getSubExpr()); 13204 // C++11 [expr.pre.incr]p1: 13205 // the expression ++x is equivalent to x+=1 13206 notePostMod(O, UO, 13207 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 13208 : UK_ModAsSideEffect); 13209 } 13210 13211 void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 13212 void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 13213 void VisitUnaryPostIncDec(const UnaryOperator *UO) { 13214 Object O = getObject(UO->getSubExpr(), true); 13215 if (!O) 13216 return VisitExpr(UO); 13217 13218 notePreMod(O, UO); 13219 Visit(UO->getSubExpr()); 13220 notePostMod(O, UO, UK_ModAsSideEffect); 13221 } 13222 13223 void VisitBinLOr(const BinaryOperator *BO) { 13224 // C++11 [expr.log.or]p2: 13225 // If the second expression is evaluated, every value computation and 13226 // side effect associated with the first expression is sequenced before 13227 // every value computation and side effect associated with the 13228 // second expression. 13229 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 13230 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 13231 SequenceTree::Seq OldRegion = Region; 13232 13233 EvaluationTracker Eval(*this); 13234 { 13235 SequencedSubexpression Sequenced(*this); 13236 Region = LHSRegion; 13237 Visit(BO->getLHS()); 13238 } 13239 13240 // C++11 [expr.log.or]p1: 13241 // [...] the second operand is not evaluated if the first operand 13242 // evaluates to true. 13243 bool EvalResult = false; 13244 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 13245 bool ShouldVisitRHS = !EvalOK || (EvalOK && !EvalResult); 13246 if (ShouldVisitRHS) { 13247 Region = RHSRegion; 13248 Visit(BO->getRHS()); 13249 } 13250 13251 Region = OldRegion; 13252 Tree.merge(LHSRegion); 13253 Tree.merge(RHSRegion); 13254 } 13255 13256 void VisitBinLAnd(const BinaryOperator *BO) { 13257 // C++11 [expr.log.and]p2: 13258 // If the second expression is evaluated, every value computation and 13259 // side effect associated with the first expression is sequenced before 13260 // every value computation and side effect associated with the 13261 // second expression. 13262 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 13263 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 13264 SequenceTree::Seq OldRegion = Region; 13265 13266 EvaluationTracker Eval(*this); 13267 { 13268 SequencedSubexpression Sequenced(*this); 13269 Region = LHSRegion; 13270 Visit(BO->getLHS()); 13271 } 13272 13273 // C++11 [expr.log.and]p1: 13274 // [...] the second operand is not evaluated if the first operand is false. 13275 bool EvalResult = false; 13276 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 13277 bool ShouldVisitRHS = !EvalOK || (EvalOK && EvalResult); 13278 if (ShouldVisitRHS) { 13279 Region = RHSRegion; 13280 Visit(BO->getRHS()); 13281 } 13282 13283 Region = OldRegion; 13284 Tree.merge(LHSRegion); 13285 Tree.merge(RHSRegion); 13286 } 13287 13288 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) { 13289 // C++11 [expr.cond]p1: 13290 // [...] Every value computation and side effect associated with the first 13291 // expression is sequenced before every value computation and side effect 13292 // associated with the second or third expression. 13293 SequenceTree::Seq ConditionRegion = Tree.allocate(Region); 13294 13295 // No sequencing is specified between the true and false expression. 13296 // However since exactly one of both is going to be evaluated we can 13297 // consider them to be sequenced. This is needed to avoid warning on 13298 // something like "x ? y+= 1 : y += 2;" in the case where we will visit 13299 // both the true and false expressions because we can't evaluate x. 13300 // This will still allow us to detect an expression like (pre C++17) 13301 // "(x ? y += 1 : y += 2) = y". 13302 // 13303 // We don't wrap the visitation of the true and false expression with 13304 // SequencedSubexpression because we don't want to downgrade modifications 13305 // as side effect in the true and false expressions after the visition 13306 // is done. (for example in the expression "(x ? y++ : y++) + y" we should 13307 // not warn between the two "y++", but we should warn between the "y++" 13308 // and the "y". 13309 SequenceTree::Seq TrueRegion = Tree.allocate(Region); 13310 SequenceTree::Seq FalseRegion = Tree.allocate(Region); 13311 SequenceTree::Seq OldRegion = Region; 13312 13313 EvaluationTracker Eval(*this); 13314 { 13315 SequencedSubexpression Sequenced(*this); 13316 Region = ConditionRegion; 13317 Visit(CO->getCond()); 13318 } 13319 13320 // C++11 [expr.cond]p1: 13321 // [...] The first expression is contextually converted to bool (Clause 4). 13322 // It is evaluated and if it is true, the result of the conditional 13323 // expression is the value of the second expression, otherwise that of the 13324 // third expression. Only one of the second and third expressions is 13325 // evaluated. [...] 13326 bool EvalResult = false; 13327 bool EvalOK = Eval.evaluate(CO->getCond(), EvalResult); 13328 bool ShouldVisitTrueExpr = !EvalOK || (EvalOK && EvalResult); 13329 bool ShouldVisitFalseExpr = !EvalOK || (EvalOK && !EvalResult); 13330 if (ShouldVisitTrueExpr) { 13331 Region = TrueRegion; 13332 Visit(CO->getTrueExpr()); 13333 } 13334 if (ShouldVisitFalseExpr) { 13335 Region = FalseRegion; 13336 Visit(CO->getFalseExpr()); 13337 } 13338 13339 Region = OldRegion; 13340 Tree.merge(ConditionRegion); 13341 Tree.merge(TrueRegion); 13342 Tree.merge(FalseRegion); 13343 } 13344 13345 void VisitCallExpr(const CallExpr *CE) { 13346 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 13347 13348 if (CE->isUnevaluatedBuiltinCall(Context)) 13349 return; 13350 13351 // C++11 [intro.execution]p15: 13352 // When calling a function [...], every value computation and side effect 13353 // associated with any argument expression, or with the postfix expression 13354 // designating the called function, is sequenced before execution of every 13355 // expression or statement in the body of the function [and thus before 13356 // the value computation of its result]. 13357 SequencedSubexpression Sequenced(*this); 13358 SemaRef.runWithSufficientStackSpace(CE->getExprLoc(), [&] { 13359 // C++17 [expr.call]p5 13360 // The postfix-expression is sequenced before each expression in the 13361 // expression-list and any default argument. [...] 13362 SequenceTree::Seq CalleeRegion; 13363 SequenceTree::Seq OtherRegion; 13364 if (SemaRef.getLangOpts().CPlusPlus17) { 13365 CalleeRegion = Tree.allocate(Region); 13366 OtherRegion = Tree.allocate(Region); 13367 } else { 13368 CalleeRegion = Region; 13369 OtherRegion = Region; 13370 } 13371 SequenceTree::Seq OldRegion = Region; 13372 13373 // Visit the callee expression first. 13374 Region = CalleeRegion; 13375 if (SemaRef.getLangOpts().CPlusPlus17) { 13376 SequencedSubexpression Sequenced(*this); 13377 Visit(CE->getCallee()); 13378 } else { 13379 Visit(CE->getCallee()); 13380 } 13381 13382 // Then visit the argument expressions. 13383 Region = OtherRegion; 13384 for (const Expr *Argument : CE->arguments()) 13385 Visit(Argument); 13386 13387 Region = OldRegion; 13388 if (SemaRef.getLangOpts().CPlusPlus17) { 13389 Tree.merge(CalleeRegion); 13390 Tree.merge(OtherRegion); 13391 } 13392 }); 13393 } 13394 13395 void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) { 13396 // C++17 [over.match.oper]p2: 13397 // [...] the operator notation is first transformed to the equivalent 13398 // function-call notation as summarized in Table 12 (where @ denotes one 13399 // of the operators covered in the specified subclause). However, the 13400 // operands are sequenced in the order prescribed for the built-in 13401 // operator (Clause 8). 13402 // 13403 // From the above only overloaded binary operators and overloaded call 13404 // operators have sequencing rules in C++17 that we need to handle 13405 // separately. 13406 if (!SemaRef.getLangOpts().CPlusPlus17 || 13407 (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call)) 13408 return VisitCallExpr(CXXOCE); 13409 13410 enum { 13411 NoSequencing, 13412 LHSBeforeRHS, 13413 RHSBeforeLHS, 13414 LHSBeforeRest 13415 } SequencingKind; 13416 switch (CXXOCE->getOperator()) { 13417 case OO_Equal: 13418 case OO_PlusEqual: 13419 case OO_MinusEqual: 13420 case OO_StarEqual: 13421 case OO_SlashEqual: 13422 case OO_PercentEqual: 13423 case OO_CaretEqual: 13424 case OO_AmpEqual: 13425 case OO_PipeEqual: 13426 case OO_LessLessEqual: 13427 case OO_GreaterGreaterEqual: 13428 SequencingKind = RHSBeforeLHS; 13429 break; 13430 13431 case OO_LessLess: 13432 case OO_GreaterGreater: 13433 case OO_AmpAmp: 13434 case OO_PipePipe: 13435 case OO_Comma: 13436 case OO_ArrowStar: 13437 case OO_Subscript: 13438 SequencingKind = LHSBeforeRHS; 13439 break; 13440 13441 case OO_Call: 13442 SequencingKind = LHSBeforeRest; 13443 break; 13444 13445 default: 13446 SequencingKind = NoSequencing; 13447 break; 13448 } 13449 13450 if (SequencingKind == NoSequencing) 13451 return VisitCallExpr(CXXOCE); 13452 13453 // This is a call, so all subexpressions are sequenced before the result. 13454 SequencedSubexpression Sequenced(*this); 13455 13456 SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] { 13457 assert(SemaRef.getLangOpts().CPlusPlus17 && 13458 "Should only get there with C++17 and above!"); 13459 assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) && 13460 "Should only get there with an overloaded binary operator" 13461 " or an overloaded call operator!"); 13462 13463 if (SequencingKind == LHSBeforeRest) { 13464 assert(CXXOCE->getOperator() == OO_Call && 13465 "We should only have an overloaded call operator here!"); 13466 13467 // This is very similar to VisitCallExpr, except that we only have the 13468 // C++17 case. The postfix-expression is the first argument of the 13469 // CXXOperatorCallExpr. The expressions in the expression-list, if any, 13470 // are in the following arguments. 13471 // 13472 // Note that we intentionally do not visit the callee expression since 13473 // it is just a decayed reference to a function. 13474 SequenceTree::Seq PostfixExprRegion = Tree.allocate(Region); 13475 SequenceTree::Seq ArgsRegion = Tree.allocate(Region); 13476 SequenceTree::Seq OldRegion = Region; 13477 13478 assert(CXXOCE->getNumArgs() >= 1 && 13479 "An overloaded call operator must have at least one argument" 13480 " for the postfix-expression!"); 13481 const Expr *PostfixExpr = CXXOCE->getArgs()[0]; 13482 llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1, 13483 CXXOCE->getNumArgs() - 1); 13484 13485 // Visit the postfix-expression first. 13486 { 13487 Region = PostfixExprRegion; 13488 SequencedSubexpression Sequenced(*this); 13489 Visit(PostfixExpr); 13490 } 13491 13492 // Then visit the argument expressions. 13493 Region = ArgsRegion; 13494 for (const Expr *Arg : Args) 13495 Visit(Arg); 13496 13497 Region = OldRegion; 13498 Tree.merge(PostfixExprRegion); 13499 Tree.merge(ArgsRegion); 13500 } else { 13501 assert(CXXOCE->getNumArgs() == 2 && 13502 "Should only have two arguments here!"); 13503 assert((SequencingKind == LHSBeforeRHS || 13504 SequencingKind == RHSBeforeLHS) && 13505 "Unexpected sequencing kind!"); 13506 13507 // We do not visit the callee expression since it is just a decayed 13508 // reference to a function. 13509 const Expr *E1 = CXXOCE->getArg(0); 13510 const Expr *E2 = CXXOCE->getArg(1); 13511 if (SequencingKind == RHSBeforeLHS) 13512 std::swap(E1, E2); 13513 13514 return VisitSequencedExpressions(E1, E2); 13515 } 13516 }); 13517 } 13518 13519 void VisitCXXConstructExpr(const CXXConstructExpr *CCE) { 13520 // This is a call, so all subexpressions are sequenced before the result. 13521 SequencedSubexpression Sequenced(*this); 13522 13523 if (!CCE->isListInitialization()) 13524 return VisitExpr(CCE); 13525 13526 // In C++11, list initializations are sequenced. 13527 SmallVector<SequenceTree::Seq, 32> Elts; 13528 SequenceTree::Seq Parent = Region; 13529 for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(), 13530 E = CCE->arg_end(); 13531 I != E; ++I) { 13532 Region = Tree.allocate(Parent); 13533 Elts.push_back(Region); 13534 Visit(*I); 13535 } 13536 13537 // Forget that the initializers are sequenced. 13538 Region = Parent; 13539 for (unsigned I = 0; I < Elts.size(); ++I) 13540 Tree.merge(Elts[I]); 13541 } 13542 13543 void VisitInitListExpr(const InitListExpr *ILE) { 13544 if (!SemaRef.getLangOpts().CPlusPlus11) 13545 return VisitExpr(ILE); 13546 13547 // In C++11, list initializations are sequenced. 13548 SmallVector<SequenceTree::Seq, 32> Elts; 13549 SequenceTree::Seq Parent = Region; 13550 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 13551 const Expr *E = ILE->getInit(I); 13552 if (!E) 13553 continue; 13554 Region = Tree.allocate(Parent); 13555 Elts.push_back(Region); 13556 Visit(E); 13557 } 13558 13559 // Forget that the initializers are sequenced. 13560 Region = Parent; 13561 for (unsigned I = 0; I < Elts.size(); ++I) 13562 Tree.merge(Elts[I]); 13563 } 13564 }; 13565 13566 } // namespace 13567 13568 void Sema::CheckUnsequencedOperations(const Expr *E) { 13569 SmallVector<const Expr *, 8> WorkList; 13570 WorkList.push_back(E); 13571 while (!WorkList.empty()) { 13572 const Expr *Item = WorkList.pop_back_val(); 13573 SequenceChecker(*this, Item, WorkList); 13574 } 13575 } 13576 13577 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 13578 bool IsConstexpr) { 13579 llvm::SaveAndRestore<bool> ConstantContext( 13580 isConstantEvaluatedOverride, IsConstexpr || isa<ConstantExpr>(E)); 13581 CheckImplicitConversions(E, CheckLoc); 13582 if (!E->isInstantiationDependent()) 13583 CheckUnsequencedOperations(E); 13584 if (!IsConstexpr && !E->isValueDependent()) 13585 CheckForIntOverflow(E); 13586 DiagnoseMisalignedMembers(); 13587 } 13588 13589 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 13590 FieldDecl *BitField, 13591 Expr *Init) { 13592 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 13593 } 13594 13595 static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 13596 SourceLocation Loc) { 13597 if (!PType->isVariablyModifiedType()) 13598 return; 13599 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 13600 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 13601 return; 13602 } 13603 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 13604 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 13605 return; 13606 } 13607 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 13608 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 13609 return; 13610 } 13611 13612 const ArrayType *AT = S.Context.getAsArrayType(PType); 13613 if (!AT) 13614 return; 13615 13616 if (AT->getSizeModifier() != ArrayType::Star) { 13617 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 13618 return; 13619 } 13620 13621 S.Diag(Loc, diag::err_array_star_in_function_definition); 13622 } 13623 13624 /// CheckParmsForFunctionDef - Check that the parameters of the given 13625 /// function are appropriate for the definition of a function. This 13626 /// takes care of any checks that cannot be performed on the 13627 /// declaration itself, e.g., that the types of each of the function 13628 /// parameters are complete. 13629 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 13630 bool CheckParameterNames) { 13631 bool HasInvalidParm = false; 13632 for (ParmVarDecl *Param : Parameters) { 13633 // C99 6.7.5.3p4: the parameters in a parameter type list in a 13634 // function declarator that is part of a function definition of 13635 // that function shall not have incomplete type. 13636 // 13637 // This is also C++ [dcl.fct]p6. 13638 if (!Param->isInvalidDecl() && 13639 RequireCompleteType(Param->getLocation(), Param->getType(), 13640 diag::err_typecheck_decl_incomplete_type)) { 13641 Param->setInvalidDecl(); 13642 HasInvalidParm = true; 13643 } 13644 13645 // C99 6.9.1p5: If the declarator includes a parameter type list, the 13646 // declaration of each parameter shall include an identifier. 13647 if (CheckParameterNames && Param->getIdentifier() == nullptr && 13648 !Param->isImplicit() && !getLangOpts().CPlusPlus) { 13649 // Diagnose this as an extension in C17 and earlier. 13650 if (!getLangOpts().C2x) 13651 Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x); 13652 } 13653 13654 // C99 6.7.5.3p12: 13655 // If the function declarator is not part of a definition of that 13656 // function, parameters may have incomplete type and may use the [*] 13657 // notation in their sequences of declarator specifiers to specify 13658 // variable length array types. 13659 QualType PType = Param->getOriginalType(); 13660 // FIXME: This diagnostic should point the '[*]' if source-location 13661 // information is added for it. 13662 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 13663 13664 // If the parameter is a c++ class type and it has to be destructed in the 13665 // callee function, declare the destructor so that it can be called by the 13666 // callee function. Do not perform any direct access check on the dtor here. 13667 if (!Param->isInvalidDecl()) { 13668 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 13669 if (!ClassDecl->isInvalidDecl() && 13670 !ClassDecl->hasIrrelevantDestructor() && 13671 !ClassDecl->isDependentContext() && 13672 ClassDecl->isParamDestroyedInCallee()) { 13673 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 13674 MarkFunctionReferenced(Param->getLocation(), Destructor); 13675 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 13676 } 13677 } 13678 } 13679 13680 // Parameters with the pass_object_size attribute only need to be marked 13681 // constant at function definitions. Because we lack information about 13682 // whether we're on a declaration or definition when we're instantiating the 13683 // attribute, we need to check for constness here. 13684 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 13685 if (!Param->getType().isConstQualified()) 13686 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 13687 << Attr->getSpelling() << 1; 13688 13689 // Check for parameter names shadowing fields from the class. 13690 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) { 13691 // The owning context for the parameter should be the function, but we 13692 // want to see if this function's declaration context is a record. 13693 DeclContext *DC = Param->getDeclContext(); 13694 if (DC && DC->isFunctionOrMethod()) { 13695 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent())) 13696 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(), 13697 RD, /*DeclIsField*/ false); 13698 } 13699 } 13700 } 13701 13702 return HasInvalidParm; 13703 } 13704 13705 Optional<std::pair<CharUnits, CharUnits>> 13706 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx); 13707 13708 /// Compute the alignment and offset of the base class object given the 13709 /// derived-to-base cast expression and the alignment and offset of the derived 13710 /// class object. 13711 static std::pair<CharUnits, CharUnits> 13712 getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType, 13713 CharUnits BaseAlignment, CharUnits Offset, 13714 ASTContext &Ctx) { 13715 for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE; 13716 ++PathI) { 13717 const CXXBaseSpecifier *Base = *PathI; 13718 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl(); 13719 if (Base->isVirtual()) { 13720 // The complete object may have a lower alignment than the non-virtual 13721 // alignment of the base, in which case the base may be misaligned. Choose 13722 // the smaller of the non-virtual alignment and BaseAlignment, which is a 13723 // conservative lower bound of the complete object alignment. 13724 CharUnits NonVirtualAlignment = 13725 Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment(); 13726 BaseAlignment = std::min(BaseAlignment, NonVirtualAlignment); 13727 Offset = CharUnits::Zero(); 13728 } else { 13729 const ASTRecordLayout &RL = 13730 Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl()); 13731 Offset += RL.getBaseClassOffset(BaseDecl); 13732 } 13733 DerivedType = Base->getType(); 13734 } 13735 13736 return std::make_pair(BaseAlignment, Offset); 13737 } 13738 13739 /// Compute the alignment and offset of a binary additive operator. 13740 static Optional<std::pair<CharUnits, CharUnits>> 13741 getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE, 13742 bool IsSub, ASTContext &Ctx) { 13743 QualType PointeeType = PtrE->getType()->getPointeeType(); 13744 13745 if (!PointeeType->isConstantSizeType()) 13746 return llvm::None; 13747 13748 auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx); 13749 13750 if (!P) 13751 return llvm::None; 13752 13753 CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType); 13754 if (Optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) { 13755 CharUnits Offset = EltSize * IdxRes->getExtValue(); 13756 if (IsSub) 13757 Offset = -Offset; 13758 return std::make_pair(P->first, P->second + Offset); 13759 } 13760 13761 // If the integer expression isn't a constant expression, compute the lower 13762 // bound of the alignment using the alignment and offset of the pointer 13763 // expression and the element size. 13764 return std::make_pair( 13765 P->first.alignmentAtOffset(P->second).alignmentAtOffset(EltSize), 13766 CharUnits::Zero()); 13767 } 13768 13769 /// This helper function takes an lvalue expression and returns the alignment of 13770 /// a VarDecl and a constant offset from the VarDecl. 13771 Optional<std::pair<CharUnits, CharUnits>> 13772 static getBaseAlignmentAndOffsetFromLValue(const Expr *E, ASTContext &Ctx) { 13773 E = E->IgnoreParens(); 13774 switch (E->getStmtClass()) { 13775 default: 13776 break; 13777 case Stmt::CStyleCastExprClass: 13778 case Stmt::CXXStaticCastExprClass: 13779 case Stmt::ImplicitCastExprClass: { 13780 auto *CE = cast<CastExpr>(E); 13781 const Expr *From = CE->getSubExpr(); 13782 switch (CE->getCastKind()) { 13783 default: 13784 break; 13785 case CK_NoOp: 13786 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 13787 case CK_UncheckedDerivedToBase: 13788 case CK_DerivedToBase: { 13789 auto P = getBaseAlignmentAndOffsetFromLValue(From, Ctx); 13790 if (!P) 13791 break; 13792 return getDerivedToBaseAlignmentAndOffset(CE, From->getType(), P->first, 13793 P->second, Ctx); 13794 } 13795 } 13796 break; 13797 } 13798 case Stmt::ArraySubscriptExprClass: { 13799 auto *ASE = cast<ArraySubscriptExpr>(E); 13800 return getAlignmentAndOffsetFromBinAddOrSub(ASE->getBase(), ASE->getIdx(), 13801 false, Ctx); 13802 } 13803 case Stmt::DeclRefExprClass: { 13804 if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) { 13805 // FIXME: If VD is captured by copy or is an escaping __block variable, 13806 // use the alignment of VD's type. 13807 if (!VD->getType()->isReferenceType()) 13808 return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero()); 13809 if (VD->hasInit()) 13810 return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx); 13811 } 13812 break; 13813 } 13814 case Stmt::MemberExprClass: { 13815 auto *ME = cast<MemberExpr>(E); 13816 auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); 13817 if (!FD || FD->getType()->isReferenceType()) 13818 break; 13819 Optional<std::pair<CharUnits, CharUnits>> P; 13820 if (ME->isArrow()) 13821 P = getBaseAlignmentAndOffsetFromPtr(ME->getBase(), Ctx); 13822 else 13823 P = getBaseAlignmentAndOffsetFromLValue(ME->getBase(), Ctx); 13824 if (!P) 13825 break; 13826 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent()); 13827 uint64_t Offset = Layout.getFieldOffset(FD->getFieldIndex()); 13828 return std::make_pair(P->first, 13829 P->second + CharUnits::fromQuantity(Offset)); 13830 } 13831 case Stmt::UnaryOperatorClass: { 13832 auto *UO = cast<UnaryOperator>(E); 13833 switch (UO->getOpcode()) { 13834 default: 13835 break; 13836 case UO_Deref: 13837 return getBaseAlignmentAndOffsetFromPtr(UO->getSubExpr(), Ctx); 13838 } 13839 break; 13840 } 13841 case Stmt::BinaryOperatorClass: { 13842 auto *BO = cast<BinaryOperator>(E); 13843 auto Opcode = BO->getOpcode(); 13844 switch (Opcode) { 13845 default: 13846 break; 13847 case BO_Comma: 13848 return getBaseAlignmentAndOffsetFromLValue(BO->getRHS(), Ctx); 13849 } 13850 break; 13851 } 13852 } 13853 return llvm::None; 13854 } 13855 13856 /// This helper function takes a pointer expression and returns the alignment of 13857 /// a VarDecl and a constant offset from the VarDecl. 13858 Optional<std::pair<CharUnits, CharUnits>> 13859 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx) { 13860 E = E->IgnoreParens(); 13861 switch (E->getStmtClass()) { 13862 default: 13863 break; 13864 case Stmt::CStyleCastExprClass: 13865 case Stmt::CXXStaticCastExprClass: 13866 case Stmt::ImplicitCastExprClass: { 13867 auto *CE = cast<CastExpr>(E); 13868 const Expr *From = CE->getSubExpr(); 13869 switch (CE->getCastKind()) { 13870 default: 13871 break; 13872 case CK_NoOp: 13873 return getBaseAlignmentAndOffsetFromPtr(From, Ctx); 13874 case CK_ArrayToPointerDecay: 13875 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 13876 case CK_UncheckedDerivedToBase: 13877 case CK_DerivedToBase: { 13878 auto P = getBaseAlignmentAndOffsetFromPtr(From, Ctx); 13879 if (!P) 13880 break; 13881 return getDerivedToBaseAlignmentAndOffset( 13882 CE, From->getType()->getPointeeType(), P->first, P->second, Ctx); 13883 } 13884 } 13885 break; 13886 } 13887 case Stmt::CXXThisExprClass: { 13888 auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl(); 13889 CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment(); 13890 return std::make_pair(Alignment, CharUnits::Zero()); 13891 } 13892 case Stmt::UnaryOperatorClass: { 13893 auto *UO = cast<UnaryOperator>(E); 13894 if (UO->getOpcode() == UO_AddrOf) 13895 return getBaseAlignmentAndOffsetFromLValue(UO->getSubExpr(), Ctx); 13896 break; 13897 } 13898 case Stmt::BinaryOperatorClass: { 13899 auto *BO = cast<BinaryOperator>(E); 13900 auto Opcode = BO->getOpcode(); 13901 switch (Opcode) { 13902 default: 13903 break; 13904 case BO_Add: 13905 case BO_Sub: { 13906 const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS(); 13907 if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType()) 13908 std::swap(LHS, RHS); 13909 return getAlignmentAndOffsetFromBinAddOrSub(LHS, RHS, Opcode == BO_Sub, 13910 Ctx); 13911 } 13912 case BO_Comma: 13913 return getBaseAlignmentAndOffsetFromPtr(BO->getRHS(), Ctx); 13914 } 13915 break; 13916 } 13917 } 13918 return llvm::None; 13919 } 13920 13921 static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) { 13922 // See if we can compute the alignment of a VarDecl and an offset from it. 13923 Optional<std::pair<CharUnits, CharUnits>> P = 13924 getBaseAlignmentAndOffsetFromPtr(E, S.Context); 13925 13926 if (P) 13927 return P->first.alignmentAtOffset(P->second); 13928 13929 // If that failed, return the type's alignment. 13930 return S.Context.getTypeAlignInChars(E->getType()->getPointeeType()); 13931 } 13932 13933 /// CheckCastAlign - Implements -Wcast-align, which warns when a 13934 /// pointer cast increases the alignment requirements. 13935 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 13936 // This is actually a lot of work to potentially be doing on every 13937 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 13938 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 13939 return; 13940 13941 // Ignore dependent types. 13942 if (T->isDependentType() || Op->getType()->isDependentType()) 13943 return; 13944 13945 // Require that the destination be a pointer type. 13946 const PointerType *DestPtr = T->getAs<PointerType>(); 13947 if (!DestPtr) return; 13948 13949 // If the destination has alignment 1, we're done. 13950 QualType DestPointee = DestPtr->getPointeeType(); 13951 if (DestPointee->isIncompleteType()) return; 13952 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 13953 if (DestAlign.isOne()) return; 13954 13955 // Require that the source be a pointer type. 13956 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 13957 if (!SrcPtr) return; 13958 QualType SrcPointee = SrcPtr->getPointeeType(); 13959 13960 // Explicitly allow casts from cv void*. We already implicitly 13961 // allowed casts to cv void*, since they have alignment 1. 13962 // Also allow casts involving incomplete types, which implicitly 13963 // includes 'void'. 13964 if (SrcPointee->isIncompleteType()) return; 13965 13966 CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this); 13967 13968 if (SrcAlign >= DestAlign) return; 13969 13970 Diag(TRange.getBegin(), diag::warn_cast_align) 13971 << Op->getType() << T 13972 << static_cast<unsigned>(SrcAlign.getQuantity()) 13973 << static_cast<unsigned>(DestAlign.getQuantity()) 13974 << TRange << Op->getSourceRange(); 13975 } 13976 13977 /// Check whether this array fits the idiom of a size-one tail padded 13978 /// array member of a struct. 13979 /// 13980 /// We avoid emitting out-of-bounds access warnings for such arrays as they are 13981 /// commonly used to emulate flexible arrays in C89 code. 13982 static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size, 13983 const NamedDecl *ND) { 13984 if (Size != 1 || !ND) return false; 13985 13986 const FieldDecl *FD = dyn_cast<FieldDecl>(ND); 13987 if (!FD) return false; 13988 13989 // Don't consider sizes resulting from macro expansions or template argument 13990 // substitution to form C89 tail-padded arrays. 13991 13992 TypeSourceInfo *TInfo = FD->getTypeSourceInfo(); 13993 while (TInfo) { 13994 TypeLoc TL = TInfo->getTypeLoc(); 13995 // Look through typedefs. 13996 if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) { 13997 const TypedefNameDecl *TDL = TTL.getTypedefNameDecl(); 13998 TInfo = TDL->getTypeSourceInfo(); 13999 continue; 14000 } 14001 if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) { 14002 const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr()); 14003 if (!SizeExpr || SizeExpr->getExprLoc().isMacroID()) 14004 return false; 14005 } 14006 break; 14007 } 14008 14009 const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext()); 14010 if (!RD) return false; 14011 if (RD->isUnion()) return false; 14012 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 14013 if (!CRD->isStandardLayout()) return false; 14014 } 14015 14016 // See if this is the last field decl in the record. 14017 const Decl *D = FD; 14018 while ((D = D->getNextDeclInContext())) 14019 if (isa<FieldDecl>(D)) 14020 return false; 14021 return true; 14022 } 14023 14024 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 14025 const ArraySubscriptExpr *ASE, 14026 bool AllowOnePastEnd, bool IndexNegated) { 14027 // Already diagnosed by the constant evaluator. 14028 if (isConstantEvaluated()) 14029 return; 14030 14031 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 14032 if (IndexExpr->isValueDependent()) 14033 return; 14034 14035 const Type *EffectiveType = 14036 BaseExpr->getType()->getPointeeOrArrayElementType(); 14037 BaseExpr = BaseExpr->IgnoreParenCasts(); 14038 const ConstantArrayType *ArrayTy = 14039 Context.getAsConstantArrayType(BaseExpr->getType()); 14040 14041 if (!ArrayTy) 14042 return; 14043 14044 const Type *BaseType = ArrayTy->getElementType().getTypePtr(); 14045 if (EffectiveType->isDependentType() || BaseType->isDependentType()) 14046 return; 14047 14048 Expr::EvalResult Result; 14049 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) 14050 return; 14051 14052 llvm::APSInt index = Result.Val.getInt(); 14053 if (IndexNegated) 14054 index = -index; 14055 14056 const NamedDecl *ND = nullptr; 14057 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 14058 ND = DRE->getDecl(); 14059 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 14060 ND = ME->getMemberDecl(); 14061 14062 if (index.isUnsigned() || !index.isNegative()) { 14063 // It is possible that the type of the base expression after 14064 // IgnoreParenCasts is incomplete, even though the type of the base 14065 // expression before IgnoreParenCasts is complete (see PR39746 for an 14066 // example). In this case we have no information about whether the array 14067 // access exceeds the array bounds. However we can still diagnose an array 14068 // access which precedes the array bounds. 14069 if (BaseType->isIncompleteType()) 14070 return; 14071 14072 llvm::APInt size = ArrayTy->getSize(); 14073 if (!size.isStrictlyPositive()) 14074 return; 14075 14076 if (BaseType != EffectiveType) { 14077 // Make sure we're comparing apples to apples when comparing index to size 14078 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 14079 uint64_t array_typesize = Context.getTypeSize(BaseType); 14080 // Handle ptrarith_typesize being zero, such as when casting to void* 14081 if (!ptrarith_typesize) ptrarith_typesize = 1; 14082 if (ptrarith_typesize != array_typesize) { 14083 // There's a cast to a different size type involved 14084 uint64_t ratio = array_typesize / ptrarith_typesize; 14085 // TODO: Be smarter about handling cases where array_typesize is not a 14086 // multiple of ptrarith_typesize 14087 if (ptrarith_typesize * ratio == array_typesize) 14088 size *= llvm::APInt(size.getBitWidth(), ratio); 14089 } 14090 } 14091 14092 if (size.getBitWidth() > index.getBitWidth()) 14093 index = index.zext(size.getBitWidth()); 14094 else if (size.getBitWidth() < index.getBitWidth()) 14095 size = size.zext(index.getBitWidth()); 14096 14097 // For array subscripting the index must be less than size, but for pointer 14098 // arithmetic also allow the index (offset) to be equal to size since 14099 // computing the next address after the end of the array is legal and 14100 // commonly done e.g. in C++ iterators and range-based for loops. 14101 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 14102 return; 14103 14104 // Also don't warn for arrays of size 1 which are members of some 14105 // structure. These are often used to approximate flexible arrays in C89 14106 // code. 14107 if (IsTailPaddedMemberArray(*this, size, ND)) 14108 return; 14109 14110 // Suppress the warning if the subscript expression (as identified by the 14111 // ']' location) and the index expression are both from macro expansions 14112 // within a system header. 14113 if (ASE) { 14114 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 14115 ASE->getRBracketLoc()); 14116 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 14117 SourceLocation IndexLoc = 14118 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 14119 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 14120 return; 14121 } 14122 } 14123 14124 unsigned DiagID = diag::warn_ptr_arith_exceeds_bounds; 14125 if (ASE) 14126 DiagID = diag::warn_array_index_exceeds_bounds; 14127 14128 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 14129 PDiag(DiagID) << index.toString(10, true) 14130 << size.toString(10, true) 14131 << (unsigned)size.getLimitedValue(~0U) 14132 << IndexExpr->getSourceRange()); 14133 } else { 14134 unsigned DiagID = diag::warn_array_index_precedes_bounds; 14135 if (!ASE) { 14136 DiagID = diag::warn_ptr_arith_precedes_bounds; 14137 if (index.isNegative()) index = -index; 14138 } 14139 14140 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 14141 PDiag(DiagID) << index.toString(10, true) 14142 << IndexExpr->getSourceRange()); 14143 } 14144 14145 if (!ND) { 14146 // Try harder to find a NamedDecl to point at in the note. 14147 while (const ArraySubscriptExpr *ASE = 14148 dyn_cast<ArraySubscriptExpr>(BaseExpr)) 14149 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 14150 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 14151 ND = DRE->getDecl(); 14152 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 14153 ND = ME->getMemberDecl(); 14154 } 14155 14156 if (ND) 14157 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 14158 PDiag(diag::note_array_declared_here) << ND); 14159 } 14160 14161 void Sema::CheckArrayAccess(const Expr *expr) { 14162 int AllowOnePastEnd = 0; 14163 while (expr) { 14164 expr = expr->IgnoreParenImpCasts(); 14165 switch (expr->getStmtClass()) { 14166 case Stmt::ArraySubscriptExprClass: { 14167 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 14168 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 14169 AllowOnePastEnd > 0); 14170 expr = ASE->getBase(); 14171 break; 14172 } 14173 case Stmt::MemberExprClass: { 14174 expr = cast<MemberExpr>(expr)->getBase(); 14175 break; 14176 } 14177 case Stmt::OMPArraySectionExprClass: { 14178 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 14179 if (ASE->getLowerBound()) 14180 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 14181 /*ASE=*/nullptr, AllowOnePastEnd > 0); 14182 return; 14183 } 14184 case Stmt::UnaryOperatorClass: { 14185 // Only unwrap the * and & unary operators 14186 const UnaryOperator *UO = cast<UnaryOperator>(expr); 14187 expr = UO->getSubExpr(); 14188 switch (UO->getOpcode()) { 14189 case UO_AddrOf: 14190 AllowOnePastEnd++; 14191 break; 14192 case UO_Deref: 14193 AllowOnePastEnd--; 14194 break; 14195 default: 14196 return; 14197 } 14198 break; 14199 } 14200 case Stmt::ConditionalOperatorClass: { 14201 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 14202 if (const Expr *lhs = cond->getLHS()) 14203 CheckArrayAccess(lhs); 14204 if (const Expr *rhs = cond->getRHS()) 14205 CheckArrayAccess(rhs); 14206 return; 14207 } 14208 case Stmt::CXXOperatorCallExprClass: { 14209 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 14210 for (const auto *Arg : OCE->arguments()) 14211 CheckArrayAccess(Arg); 14212 return; 14213 } 14214 default: 14215 return; 14216 } 14217 } 14218 } 14219 14220 //===--- CHECK: Objective-C retain cycles ----------------------------------// 14221 14222 namespace { 14223 14224 struct RetainCycleOwner { 14225 VarDecl *Variable = nullptr; 14226 SourceRange Range; 14227 SourceLocation Loc; 14228 bool Indirect = false; 14229 14230 RetainCycleOwner() = default; 14231 14232 void setLocsFrom(Expr *e) { 14233 Loc = e->getExprLoc(); 14234 Range = e->getSourceRange(); 14235 } 14236 }; 14237 14238 } // namespace 14239 14240 /// Consider whether capturing the given variable can possibly lead to 14241 /// a retain cycle. 14242 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 14243 // In ARC, it's captured strongly iff the variable has __strong 14244 // lifetime. In MRR, it's captured strongly if the variable is 14245 // __block and has an appropriate type. 14246 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 14247 return false; 14248 14249 owner.Variable = var; 14250 if (ref) 14251 owner.setLocsFrom(ref); 14252 return true; 14253 } 14254 14255 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 14256 while (true) { 14257 e = e->IgnoreParens(); 14258 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 14259 switch (cast->getCastKind()) { 14260 case CK_BitCast: 14261 case CK_LValueBitCast: 14262 case CK_LValueToRValue: 14263 case CK_ARCReclaimReturnedObject: 14264 e = cast->getSubExpr(); 14265 continue; 14266 14267 default: 14268 return false; 14269 } 14270 } 14271 14272 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 14273 ObjCIvarDecl *ivar = ref->getDecl(); 14274 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 14275 return false; 14276 14277 // Try to find a retain cycle in the base. 14278 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 14279 return false; 14280 14281 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 14282 owner.Indirect = true; 14283 return true; 14284 } 14285 14286 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 14287 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 14288 if (!var) return false; 14289 return considerVariable(var, ref, owner); 14290 } 14291 14292 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 14293 if (member->isArrow()) return false; 14294 14295 // Don't count this as an indirect ownership. 14296 e = member->getBase(); 14297 continue; 14298 } 14299 14300 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 14301 // Only pay attention to pseudo-objects on property references. 14302 ObjCPropertyRefExpr *pre 14303 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 14304 ->IgnoreParens()); 14305 if (!pre) return false; 14306 if (pre->isImplicitProperty()) return false; 14307 ObjCPropertyDecl *property = pre->getExplicitProperty(); 14308 if (!property->isRetaining() && 14309 !(property->getPropertyIvarDecl() && 14310 property->getPropertyIvarDecl()->getType() 14311 .getObjCLifetime() == Qualifiers::OCL_Strong)) 14312 return false; 14313 14314 owner.Indirect = true; 14315 if (pre->isSuperReceiver()) { 14316 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 14317 if (!owner.Variable) 14318 return false; 14319 owner.Loc = pre->getLocation(); 14320 owner.Range = pre->getSourceRange(); 14321 return true; 14322 } 14323 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 14324 ->getSourceExpr()); 14325 continue; 14326 } 14327 14328 // Array ivars? 14329 14330 return false; 14331 } 14332 } 14333 14334 namespace { 14335 14336 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 14337 ASTContext &Context; 14338 VarDecl *Variable; 14339 Expr *Capturer = nullptr; 14340 bool VarWillBeReased = false; 14341 14342 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 14343 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 14344 Context(Context), Variable(variable) {} 14345 14346 void VisitDeclRefExpr(DeclRefExpr *ref) { 14347 if (ref->getDecl() == Variable && !Capturer) 14348 Capturer = ref; 14349 } 14350 14351 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 14352 if (Capturer) return; 14353 Visit(ref->getBase()); 14354 if (Capturer && ref->isFreeIvar()) 14355 Capturer = ref; 14356 } 14357 14358 void VisitBlockExpr(BlockExpr *block) { 14359 // Look inside nested blocks 14360 if (block->getBlockDecl()->capturesVariable(Variable)) 14361 Visit(block->getBlockDecl()->getBody()); 14362 } 14363 14364 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 14365 if (Capturer) return; 14366 if (OVE->getSourceExpr()) 14367 Visit(OVE->getSourceExpr()); 14368 } 14369 14370 void VisitBinaryOperator(BinaryOperator *BinOp) { 14371 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 14372 return; 14373 Expr *LHS = BinOp->getLHS(); 14374 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 14375 if (DRE->getDecl() != Variable) 14376 return; 14377 if (Expr *RHS = BinOp->getRHS()) { 14378 RHS = RHS->IgnoreParenCasts(); 14379 Optional<llvm::APSInt> Value; 14380 VarWillBeReased = 14381 (RHS && (Value = RHS->getIntegerConstantExpr(Context)) && 14382 *Value == 0); 14383 } 14384 } 14385 } 14386 }; 14387 14388 } // namespace 14389 14390 /// Check whether the given argument is a block which captures a 14391 /// variable. 14392 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 14393 assert(owner.Variable && owner.Loc.isValid()); 14394 14395 e = e->IgnoreParenCasts(); 14396 14397 // Look through [^{...} copy] and Block_copy(^{...}). 14398 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 14399 Selector Cmd = ME->getSelector(); 14400 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 14401 e = ME->getInstanceReceiver(); 14402 if (!e) 14403 return nullptr; 14404 e = e->IgnoreParenCasts(); 14405 } 14406 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 14407 if (CE->getNumArgs() == 1) { 14408 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 14409 if (Fn) { 14410 const IdentifierInfo *FnI = Fn->getIdentifier(); 14411 if (FnI && FnI->isStr("_Block_copy")) { 14412 e = CE->getArg(0)->IgnoreParenCasts(); 14413 } 14414 } 14415 } 14416 } 14417 14418 BlockExpr *block = dyn_cast<BlockExpr>(e); 14419 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 14420 return nullptr; 14421 14422 FindCaptureVisitor visitor(S.Context, owner.Variable); 14423 visitor.Visit(block->getBlockDecl()->getBody()); 14424 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 14425 } 14426 14427 static void diagnoseRetainCycle(Sema &S, Expr *capturer, 14428 RetainCycleOwner &owner) { 14429 assert(capturer); 14430 assert(owner.Variable && owner.Loc.isValid()); 14431 14432 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 14433 << owner.Variable << capturer->getSourceRange(); 14434 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 14435 << owner.Indirect << owner.Range; 14436 } 14437 14438 /// Check for a keyword selector that starts with the word 'add' or 14439 /// 'set'. 14440 static bool isSetterLikeSelector(Selector sel) { 14441 if (sel.isUnarySelector()) return false; 14442 14443 StringRef str = sel.getNameForSlot(0); 14444 while (!str.empty() && str.front() == '_') str = str.substr(1); 14445 if (str.startswith("set")) 14446 str = str.substr(3); 14447 else if (str.startswith("add")) { 14448 // Specially allow 'addOperationWithBlock:'. 14449 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) 14450 return false; 14451 str = str.substr(3); 14452 } 14453 else 14454 return false; 14455 14456 if (str.empty()) return true; 14457 return !isLowercase(str.front()); 14458 } 14459 14460 static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S, 14461 ObjCMessageExpr *Message) { 14462 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 14463 Message->getReceiverInterface(), 14464 NSAPI::ClassId_NSMutableArray); 14465 if (!IsMutableArray) { 14466 return None; 14467 } 14468 14469 Selector Sel = Message->getSelector(); 14470 14471 Optional<NSAPI::NSArrayMethodKind> MKOpt = 14472 S.NSAPIObj->getNSArrayMethodKind(Sel); 14473 if (!MKOpt) { 14474 return None; 14475 } 14476 14477 NSAPI::NSArrayMethodKind MK = *MKOpt; 14478 14479 switch (MK) { 14480 case NSAPI::NSMutableArr_addObject: 14481 case NSAPI::NSMutableArr_insertObjectAtIndex: 14482 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 14483 return 0; 14484 case NSAPI::NSMutableArr_replaceObjectAtIndex: 14485 return 1; 14486 14487 default: 14488 return None; 14489 } 14490 14491 return None; 14492 } 14493 14494 static 14495 Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S, 14496 ObjCMessageExpr *Message) { 14497 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 14498 Message->getReceiverInterface(), 14499 NSAPI::ClassId_NSMutableDictionary); 14500 if (!IsMutableDictionary) { 14501 return None; 14502 } 14503 14504 Selector Sel = Message->getSelector(); 14505 14506 Optional<NSAPI::NSDictionaryMethodKind> MKOpt = 14507 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 14508 if (!MKOpt) { 14509 return None; 14510 } 14511 14512 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 14513 14514 switch (MK) { 14515 case NSAPI::NSMutableDict_setObjectForKey: 14516 case NSAPI::NSMutableDict_setValueForKey: 14517 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 14518 return 0; 14519 14520 default: 14521 return None; 14522 } 14523 14524 return None; 14525 } 14526 14527 static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 14528 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 14529 Message->getReceiverInterface(), 14530 NSAPI::ClassId_NSMutableSet); 14531 14532 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 14533 Message->getReceiverInterface(), 14534 NSAPI::ClassId_NSMutableOrderedSet); 14535 if (!IsMutableSet && !IsMutableOrderedSet) { 14536 return None; 14537 } 14538 14539 Selector Sel = Message->getSelector(); 14540 14541 Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel); 14542 if (!MKOpt) { 14543 return None; 14544 } 14545 14546 NSAPI::NSSetMethodKind MK = *MKOpt; 14547 14548 switch (MK) { 14549 case NSAPI::NSMutableSet_addObject: 14550 case NSAPI::NSOrderedSet_setObjectAtIndex: 14551 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 14552 case NSAPI::NSOrderedSet_insertObjectAtIndex: 14553 return 0; 14554 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 14555 return 1; 14556 } 14557 14558 return None; 14559 } 14560 14561 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 14562 if (!Message->isInstanceMessage()) { 14563 return; 14564 } 14565 14566 Optional<int> ArgOpt; 14567 14568 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 14569 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 14570 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 14571 return; 14572 } 14573 14574 int ArgIndex = *ArgOpt; 14575 14576 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 14577 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 14578 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 14579 } 14580 14581 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 14582 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 14583 if (ArgRE->isObjCSelfExpr()) { 14584 Diag(Message->getSourceRange().getBegin(), 14585 diag::warn_objc_circular_container) 14586 << ArgRE->getDecl() << StringRef("'super'"); 14587 } 14588 } 14589 } else { 14590 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 14591 14592 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 14593 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 14594 } 14595 14596 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 14597 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 14598 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 14599 ValueDecl *Decl = ReceiverRE->getDecl(); 14600 Diag(Message->getSourceRange().getBegin(), 14601 diag::warn_objc_circular_container) 14602 << Decl << Decl; 14603 if (!ArgRE->isObjCSelfExpr()) { 14604 Diag(Decl->getLocation(), 14605 diag::note_objc_circular_container_declared_here) 14606 << Decl; 14607 } 14608 } 14609 } 14610 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 14611 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 14612 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 14613 ObjCIvarDecl *Decl = IvarRE->getDecl(); 14614 Diag(Message->getSourceRange().getBegin(), 14615 diag::warn_objc_circular_container) 14616 << Decl << Decl; 14617 Diag(Decl->getLocation(), 14618 diag::note_objc_circular_container_declared_here) 14619 << Decl; 14620 } 14621 } 14622 } 14623 } 14624 } 14625 14626 /// Check a message send to see if it's likely to cause a retain cycle. 14627 void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 14628 // Only check instance methods whose selector looks like a setter. 14629 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 14630 return; 14631 14632 // Try to find a variable that the receiver is strongly owned by. 14633 RetainCycleOwner owner; 14634 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 14635 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 14636 return; 14637 } else { 14638 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 14639 owner.Variable = getCurMethodDecl()->getSelfDecl(); 14640 owner.Loc = msg->getSuperLoc(); 14641 owner.Range = msg->getSuperLoc(); 14642 } 14643 14644 // Check whether the receiver is captured by any of the arguments. 14645 const ObjCMethodDecl *MD = msg->getMethodDecl(); 14646 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 14647 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 14648 // noescape blocks should not be retained by the method. 14649 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 14650 continue; 14651 return diagnoseRetainCycle(*this, capturer, owner); 14652 } 14653 } 14654 } 14655 14656 /// Check a property assign to see if it's likely to cause a retain cycle. 14657 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 14658 RetainCycleOwner owner; 14659 if (!findRetainCycleOwner(*this, receiver, owner)) 14660 return; 14661 14662 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 14663 diagnoseRetainCycle(*this, capturer, owner); 14664 } 14665 14666 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 14667 RetainCycleOwner Owner; 14668 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 14669 return; 14670 14671 // Because we don't have an expression for the variable, we have to set the 14672 // location explicitly here. 14673 Owner.Loc = Var->getLocation(); 14674 Owner.Range = Var->getSourceRange(); 14675 14676 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 14677 diagnoseRetainCycle(*this, Capturer, Owner); 14678 } 14679 14680 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 14681 Expr *RHS, bool isProperty) { 14682 // Check if RHS is an Objective-C object literal, which also can get 14683 // immediately zapped in a weak reference. Note that we explicitly 14684 // allow ObjCStringLiterals, since those are designed to never really die. 14685 RHS = RHS->IgnoreParenImpCasts(); 14686 14687 // This enum needs to match with the 'select' in 14688 // warn_objc_arc_literal_assign (off-by-1). 14689 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 14690 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 14691 return false; 14692 14693 S.Diag(Loc, diag::warn_arc_literal_assign) 14694 << (unsigned) Kind 14695 << (isProperty ? 0 : 1) 14696 << RHS->getSourceRange(); 14697 14698 return true; 14699 } 14700 14701 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 14702 Qualifiers::ObjCLifetime LT, 14703 Expr *RHS, bool isProperty) { 14704 // Strip off any implicit cast added to get to the one ARC-specific. 14705 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 14706 if (cast->getCastKind() == CK_ARCConsumeObject) { 14707 S.Diag(Loc, diag::warn_arc_retained_assign) 14708 << (LT == Qualifiers::OCL_ExplicitNone) 14709 << (isProperty ? 0 : 1) 14710 << RHS->getSourceRange(); 14711 return true; 14712 } 14713 RHS = cast->getSubExpr(); 14714 } 14715 14716 if (LT == Qualifiers::OCL_Weak && 14717 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 14718 return true; 14719 14720 return false; 14721 } 14722 14723 bool Sema::checkUnsafeAssigns(SourceLocation Loc, 14724 QualType LHS, Expr *RHS) { 14725 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 14726 14727 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 14728 return false; 14729 14730 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 14731 return true; 14732 14733 return false; 14734 } 14735 14736 void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 14737 Expr *LHS, Expr *RHS) { 14738 QualType LHSType; 14739 // PropertyRef on LHS type need be directly obtained from 14740 // its declaration as it has a PseudoType. 14741 ObjCPropertyRefExpr *PRE 14742 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 14743 if (PRE && !PRE->isImplicitProperty()) { 14744 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 14745 if (PD) 14746 LHSType = PD->getType(); 14747 } 14748 14749 if (LHSType.isNull()) 14750 LHSType = LHS->getType(); 14751 14752 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 14753 14754 if (LT == Qualifiers::OCL_Weak) { 14755 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 14756 getCurFunction()->markSafeWeakUse(LHS); 14757 } 14758 14759 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 14760 return; 14761 14762 // FIXME. Check for other life times. 14763 if (LT != Qualifiers::OCL_None) 14764 return; 14765 14766 if (PRE) { 14767 if (PRE->isImplicitProperty()) 14768 return; 14769 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 14770 if (!PD) 14771 return; 14772 14773 unsigned Attributes = PD->getPropertyAttributes(); 14774 if (Attributes & ObjCPropertyAttribute::kind_assign) { 14775 // when 'assign' attribute was not explicitly specified 14776 // by user, ignore it and rely on property type itself 14777 // for lifetime info. 14778 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 14779 if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) && 14780 LHSType->isObjCRetainableType()) 14781 return; 14782 14783 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 14784 if (cast->getCastKind() == CK_ARCConsumeObject) { 14785 Diag(Loc, diag::warn_arc_retained_property_assign) 14786 << RHS->getSourceRange(); 14787 return; 14788 } 14789 RHS = cast->getSubExpr(); 14790 } 14791 } else if (Attributes & ObjCPropertyAttribute::kind_weak) { 14792 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 14793 return; 14794 } 14795 } 14796 } 14797 14798 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 14799 14800 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 14801 SourceLocation StmtLoc, 14802 const NullStmt *Body) { 14803 // Do not warn if the body is a macro that expands to nothing, e.g: 14804 // 14805 // #define CALL(x) 14806 // if (condition) 14807 // CALL(0); 14808 if (Body->hasLeadingEmptyMacro()) 14809 return false; 14810 14811 // Get line numbers of statement and body. 14812 bool StmtLineInvalid; 14813 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 14814 &StmtLineInvalid); 14815 if (StmtLineInvalid) 14816 return false; 14817 14818 bool BodyLineInvalid; 14819 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 14820 &BodyLineInvalid); 14821 if (BodyLineInvalid) 14822 return false; 14823 14824 // Warn if null statement and body are on the same line. 14825 if (StmtLine != BodyLine) 14826 return false; 14827 14828 return true; 14829 } 14830 14831 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 14832 const Stmt *Body, 14833 unsigned DiagID) { 14834 // Since this is a syntactic check, don't emit diagnostic for template 14835 // instantiations, this just adds noise. 14836 if (CurrentInstantiationScope) 14837 return; 14838 14839 // The body should be a null statement. 14840 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 14841 if (!NBody) 14842 return; 14843 14844 // Do the usual checks. 14845 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 14846 return; 14847 14848 Diag(NBody->getSemiLoc(), DiagID); 14849 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 14850 } 14851 14852 void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 14853 const Stmt *PossibleBody) { 14854 assert(!CurrentInstantiationScope); // Ensured by caller 14855 14856 SourceLocation StmtLoc; 14857 const Stmt *Body; 14858 unsigned DiagID; 14859 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 14860 StmtLoc = FS->getRParenLoc(); 14861 Body = FS->getBody(); 14862 DiagID = diag::warn_empty_for_body; 14863 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 14864 StmtLoc = WS->getCond()->getSourceRange().getEnd(); 14865 Body = WS->getBody(); 14866 DiagID = diag::warn_empty_while_body; 14867 } else 14868 return; // Neither `for' nor `while'. 14869 14870 // The body should be a null statement. 14871 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 14872 if (!NBody) 14873 return; 14874 14875 // Skip expensive checks if diagnostic is disabled. 14876 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 14877 return; 14878 14879 // Do the usual checks. 14880 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 14881 return; 14882 14883 // `for(...);' and `while(...);' are popular idioms, so in order to keep 14884 // noise level low, emit diagnostics only if for/while is followed by a 14885 // CompoundStmt, e.g.: 14886 // for (int i = 0; i < n; i++); 14887 // { 14888 // a(i); 14889 // } 14890 // or if for/while is followed by a statement with more indentation 14891 // than for/while itself: 14892 // for (int i = 0; i < n; i++); 14893 // a(i); 14894 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 14895 if (!ProbableTypo) { 14896 bool BodyColInvalid; 14897 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 14898 PossibleBody->getBeginLoc(), &BodyColInvalid); 14899 if (BodyColInvalid) 14900 return; 14901 14902 bool StmtColInvalid; 14903 unsigned StmtCol = 14904 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 14905 if (StmtColInvalid) 14906 return; 14907 14908 if (BodyCol > StmtCol) 14909 ProbableTypo = true; 14910 } 14911 14912 if (ProbableTypo) { 14913 Diag(NBody->getSemiLoc(), DiagID); 14914 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 14915 } 14916 } 14917 14918 //===--- CHECK: Warn on self move with std::move. -------------------------===// 14919 14920 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 14921 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 14922 SourceLocation OpLoc) { 14923 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 14924 return; 14925 14926 if (inTemplateInstantiation()) 14927 return; 14928 14929 // Strip parens and casts away. 14930 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 14931 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 14932 14933 // Check for a call expression 14934 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 14935 if (!CE || CE->getNumArgs() != 1) 14936 return; 14937 14938 // Check for a call to std::move 14939 if (!CE->isCallToStdMove()) 14940 return; 14941 14942 // Get argument from std::move 14943 RHSExpr = CE->getArg(0); 14944 14945 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 14946 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 14947 14948 // Two DeclRefExpr's, check that the decls are the same. 14949 if (LHSDeclRef && RHSDeclRef) { 14950 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 14951 return; 14952 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 14953 RHSDeclRef->getDecl()->getCanonicalDecl()) 14954 return; 14955 14956 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 14957 << LHSExpr->getSourceRange() 14958 << RHSExpr->getSourceRange(); 14959 return; 14960 } 14961 14962 // Member variables require a different approach to check for self moves. 14963 // MemberExpr's are the same if every nested MemberExpr refers to the same 14964 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 14965 // the base Expr's are CXXThisExpr's. 14966 const Expr *LHSBase = LHSExpr; 14967 const Expr *RHSBase = RHSExpr; 14968 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 14969 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 14970 if (!LHSME || !RHSME) 14971 return; 14972 14973 while (LHSME && RHSME) { 14974 if (LHSME->getMemberDecl()->getCanonicalDecl() != 14975 RHSME->getMemberDecl()->getCanonicalDecl()) 14976 return; 14977 14978 LHSBase = LHSME->getBase(); 14979 RHSBase = RHSME->getBase(); 14980 LHSME = dyn_cast<MemberExpr>(LHSBase); 14981 RHSME = dyn_cast<MemberExpr>(RHSBase); 14982 } 14983 14984 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 14985 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 14986 if (LHSDeclRef && RHSDeclRef) { 14987 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 14988 return; 14989 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 14990 RHSDeclRef->getDecl()->getCanonicalDecl()) 14991 return; 14992 14993 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 14994 << LHSExpr->getSourceRange() 14995 << RHSExpr->getSourceRange(); 14996 return; 14997 } 14998 14999 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 15000 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 15001 << LHSExpr->getSourceRange() 15002 << RHSExpr->getSourceRange(); 15003 } 15004 15005 //===--- Layout compatibility ----------------------------------------------// 15006 15007 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 15008 15009 /// Check if two enumeration types are layout-compatible. 15010 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 15011 // C++11 [dcl.enum] p8: 15012 // Two enumeration types are layout-compatible if they have the same 15013 // underlying type. 15014 return ED1->isComplete() && ED2->isComplete() && 15015 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 15016 } 15017 15018 /// Check if two fields are layout-compatible. 15019 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 15020 FieldDecl *Field2) { 15021 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 15022 return false; 15023 15024 if (Field1->isBitField() != Field2->isBitField()) 15025 return false; 15026 15027 if (Field1->isBitField()) { 15028 // Make sure that the bit-fields are the same length. 15029 unsigned Bits1 = Field1->getBitWidthValue(C); 15030 unsigned Bits2 = Field2->getBitWidthValue(C); 15031 15032 if (Bits1 != Bits2) 15033 return false; 15034 } 15035 15036 return true; 15037 } 15038 15039 /// Check if two standard-layout structs are layout-compatible. 15040 /// (C++11 [class.mem] p17) 15041 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 15042 RecordDecl *RD2) { 15043 // If both records are C++ classes, check that base classes match. 15044 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 15045 // If one of records is a CXXRecordDecl we are in C++ mode, 15046 // thus the other one is a CXXRecordDecl, too. 15047 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 15048 // Check number of base classes. 15049 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 15050 return false; 15051 15052 // Check the base classes. 15053 for (CXXRecordDecl::base_class_const_iterator 15054 Base1 = D1CXX->bases_begin(), 15055 BaseEnd1 = D1CXX->bases_end(), 15056 Base2 = D2CXX->bases_begin(); 15057 Base1 != BaseEnd1; 15058 ++Base1, ++Base2) { 15059 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 15060 return false; 15061 } 15062 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 15063 // If only RD2 is a C++ class, it should have zero base classes. 15064 if (D2CXX->getNumBases() > 0) 15065 return false; 15066 } 15067 15068 // Check the fields. 15069 RecordDecl::field_iterator Field2 = RD2->field_begin(), 15070 Field2End = RD2->field_end(), 15071 Field1 = RD1->field_begin(), 15072 Field1End = RD1->field_end(); 15073 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 15074 if (!isLayoutCompatible(C, *Field1, *Field2)) 15075 return false; 15076 } 15077 if (Field1 != Field1End || Field2 != Field2End) 15078 return false; 15079 15080 return true; 15081 } 15082 15083 /// Check if two standard-layout unions are layout-compatible. 15084 /// (C++11 [class.mem] p18) 15085 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 15086 RecordDecl *RD2) { 15087 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 15088 for (auto *Field2 : RD2->fields()) 15089 UnmatchedFields.insert(Field2); 15090 15091 for (auto *Field1 : RD1->fields()) { 15092 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 15093 I = UnmatchedFields.begin(), 15094 E = UnmatchedFields.end(); 15095 15096 for ( ; I != E; ++I) { 15097 if (isLayoutCompatible(C, Field1, *I)) { 15098 bool Result = UnmatchedFields.erase(*I); 15099 (void) Result; 15100 assert(Result); 15101 break; 15102 } 15103 } 15104 if (I == E) 15105 return false; 15106 } 15107 15108 return UnmatchedFields.empty(); 15109 } 15110 15111 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 15112 RecordDecl *RD2) { 15113 if (RD1->isUnion() != RD2->isUnion()) 15114 return false; 15115 15116 if (RD1->isUnion()) 15117 return isLayoutCompatibleUnion(C, RD1, RD2); 15118 else 15119 return isLayoutCompatibleStruct(C, RD1, RD2); 15120 } 15121 15122 /// Check if two types are layout-compatible in C++11 sense. 15123 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 15124 if (T1.isNull() || T2.isNull()) 15125 return false; 15126 15127 // C++11 [basic.types] p11: 15128 // If two types T1 and T2 are the same type, then T1 and T2 are 15129 // layout-compatible types. 15130 if (C.hasSameType(T1, T2)) 15131 return true; 15132 15133 T1 = T1.getCanonicalType().getUnqualifiedType(); 15134 T2 = T2.getCanonicalType().getUnqualifiedType(); 15135 15136 const Type::TypeClass TC1 = T1->getTypeClass(); 15137 const Type::TypeClass TC2 = T2->getTypeClass(); 15138 15139 if (TC1 != TC2) 15140 return false; 15141 15142 if (TC1 == Type::Enum) { 15143 return isLayoutCompatible(C, 15144 cast<EnumType>(T1)->getDecl(), 15145 cast<EnumType>(T2)->getDecl()); 15146 } else if (TC1 == Type::Record) { 15147 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 15148 return false; 15149 15150 return isLayoutCompatible(C, 15151 cast<RecordType>(T1)->getDecl(), 15152 cast<RecordType>(T2)->getDecl()); 15153 } 15154 15155 return false; 15156 } 15157 15158 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 15159 15160 /// Given a type tag expression find the type tag itself. 15161 /// 15162 /// \param TypeExpr Type tag expression, as it appears in user's code. 15163 /// 15164 /// \param VD Declaration of an identifier that appears in a type tag. 15165 /// 15166 /// \param MagicValue Type tag magic value. 15167 /// 15168 /// \param isConstantEvaluated wether the evalaution should be performed in 15169 15170 /// constant context. 15171 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 15172 const ValueDecl **VD, uint64_t *MagicValue, 15173 bool isConstantEvaluated) { 15174 while(true) { 15175 if (!TypeExpr) 15176 return false; 15177 15178 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 15179 15180 switch (TypeExpr->getStmtClass()) { 15181 case Stmt::UnaryOperatorClass: { 15182 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 15183 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 15184 TypeExpr = UO->getSubExpr(); 15185 continue; 15186 } 15187 return false; 15188 } 15189 15190 case Stmt::DeclRefExprClass: { 15191 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 15192 *VD = DRE->getDecl(); 15193 return true; 15194 } 15195 15196 case Stmt::IntegerLiteralClass: { 15197 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 15198 llvm::APInt MagicValueAPInt = IL->getValue(); 15199 if (MagicValueAPInt.getActiveBits() <= 64) { 15200 *MagicValue = MagicValueAPInt.getZExtValue(); 15201 return true; 15202 } else 15203 return false; 15204 } 15205 15206 case Stmt::BinaryConditionalOperatorClass: 15207 case Stmt::ConditionalOperatorClass: { 15208 const AbstractConditionalOperator *ACO = 15209 cast<AbstractConditionalOperator>(TypeExpr); 15210 bool Result; 15211 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx, 15212 isConstantEvaluated)) { 15213 if (Result) 15214 TypeExpr = ACO->getTrueExpr(); 15215 else 15216 TypeExpr = ACO->getFalseExpr(); 15217 continue; 15218 } 15219 return false; 15220 } 15221 15222 case Stmt::BinaryOperatorClass: { 15223 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 15224 if (BO->getOpcode() == BO_Comma) { 15225 TypeExpr = BO->getRHS(); 15226 continue; 15227 } 15228 return false; 15229 } 15230 15231 default: 15232 return false; 15233 } 15234 } 15235 } 15236 15237 /// Retrieve the C type corresponding to type tag TypeExpr. 15238 /// 15239 /// \param TypeExpr Expression that specifies a type tag. 15240 /// 15241 /// \param MagicValues Registered magic values. 15242 /// 15243 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 15244 /// kind. 15245 /// 15246 /// \param TypeInfo Information about the corresponding C type. 15247 /// 15248 /// \param isConstantEvaluated wether the evalaution should be performed in 15249 /// constant context. 15250 /// 15251 /// \returns true if the corresponding C type was found. 15252 static bool GetMatchingCType( 15253 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr, 15254 const ASTContext &Ctx, 15255 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData> 15256 *MagicValues, 15257 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo, 15258 bool isConstantEvaluated) { 15259 FoundWrongKind = false; 15260 15261 // Variable declaration that has type_tag_for_datatype attribute. 15262 const ValueDecl *VD = nullptr; 15263 15264 uint64_t MagicValue; 15265 15266 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated)) 15267 return false; 15268 15269 if (VD) { 15270 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 15271 if (I->getArgumentKind() != ArgumentKind) { 15272 FoundWrongKind = true; 15273 return false; 15274 } 15275 TypeInfo.Type = I->getMatchingCType(); 15276 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 15277 TypeInfo.MustBeNull = I->getMustBeNull(); 15278 return true; 15279 } 15280 return false; 15281 } 15282 15283 if (!MagicValues) 15284 return false; 15285 15286 llvm::DenseMap<Sema::TypeTagMagicValue, 15287 Sema::TypeTagData>::const_iterator I = 15288 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 15289 if (I == MagicValues->end()) 15290 return false; 15291 15292 TypeInfo = I->second; 15293 return true; 15294 } 15295 15296 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 15297 uint64_t MagicValue, QualType Type, 15298 bool LayoutCompatible, 15299 bool MustBeNull) { 15300 if (!TypeTagForDatatypeMagicValues) 15301 TypeTagForDatatypeMagicValues.reset( 15302 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 15303 15304 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 15305 (*TypeTagForDatatypeMagicValues)[Magic] = 15306 TypeTagData(Type, LayoutCompatible, MustBeNull); 15307 } 15308 15309 static bool IsSameCharType(QualType T1, QualType T2) { 15310 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 15311 if (!BT1) 15312 return false; 15313 15314 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 15315 if (!BT2) 15316 return false; 15317 15318 BuiltinType::Kind T1Kind = BT1->getKind(); 15319 BuiltinType::Kind T2Kind = BT2->getKind(); 15320 15321 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 15322 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 15323 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 15324 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 15325 } 15326 15327 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 15328 const ArrayRef<const Expr *> ExprArgs, 15329 SourceLocation CallSiteLoc) { 15330 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 15331 bool IsPointerAttr = Attr->getIsPointer(); 15332 15333 // Retrieve the argument representing the 'type_tag'. 15334 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 15335 if (TypeTagIdxAST >= ExprArgs.size()) { 15336 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 15337 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 15338 return; 15339 } 15340 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 15341 bool FoundWrongKind; 15342 TypeTagData TypeInfo; 15343 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 15344 TypeTagForDatatypeMagicValues.get(), FoundWrongKind, 15345 TypeInfo, isConstantEvaluated())) { 15346 if (FoundWrongKind) 15347 Diag(TypeTagExpr->getExprLoc(), 15348 diag::warn_type_tag_for_datatype_wrong_kind) 15349 << TypeTagExpr->getSourceRange(); 15350 return; 15351 } 15352 15353 // Retrieve the argument representing the 'arg_idx'. 15354 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 15355 if (ArgumentIdxAST >= ExprArgs.size()) { 15356 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 15357 << 1 << Attr->getArgumentIdx().getSourceIndex(); 15358 return; 15359 } 15360 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 15361 if (IsPointerAttr) { 15362 // Skip implicit cast of pointer to `void *' (as a function argument). 15363 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 15364 if (ICE->getType()->isVoidPointerType() && 15365 ICE->getCastKind() == CK_BitCast) 15366 ArgumentExpr = ICE->getSubExpr(); 15367 } 15368 QualType ArgumentType = ArgumentExpr->getType(); 15369 15370 // Passing a `void*' pointer shouldn't trigger a warning. 15371 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 15372 return; 15373 15374 if (TypeInfo.MustBeNull) { 15375 // Type tag with matching void type requires a null pointer. 15376 if (!ArgumentExpr->isNullPointerConstant(Context, 15377 Expr::NPC_ValueDependentIsNotNull)) { 15378 Diag(ArgumentExpr->getExprLoc(), 15379 diag::warn_type_safety_null_pointer_required) 15380 << ArgumentKind->getName() 15381 << ArgumentExpr->getSourceRange() 15382 << TypeTagExpr->getSourceRange(); 15383 } 15384 return; 15385 } 15386 15387 QualType RequiredType = TypeInfo.Type; 15388 if (IsPointerAttr) 15389 RequiredType = Context.getPointerType(RequiredType); 15390 15391 bool mismatch = false; 15392 if (!TypeInfo.LayoutCompatible) { 15393 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 15394 15395 // C++11 [basic.fundamental] p1: 15396 // Plain char, signed char, and unsigned char are three distinct types. 15397 // 15398 // But we treat plain `char' as equivalent to `signed char' or `unsigned 15399 // char' depending on the current char signedness mode. 15400 if (mismatch) 15401 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 15402 RequiredType->getPointeeType())) || 15403 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 15404 mismatch = false; 15405 } else 15406 if (IsPointerAttr) 15407 mismatch = !isLayoutCompatible(Context, 15408 ArgumentType->getPointeeType(), 15409 RequiredType->getPointeeType()); 15410 else 15411 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 15412 15413 if (mismatch) 15414 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 15415 << ArgumentType << ArgumentKind 15416 << TypeInfo.LayoutCompatible << RequiredType 15417 << ArgumentExpr->getSourceRange() 15418 << TypeTagExpr->getSourceRange(); 15419 } 15420 15421 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 15422 CharUnits Alignment) { 15423 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 15424 } 15425 15426 void Sema::DiagnoseMisalignedMembers() { 15427 for (MisalignedMember &m : MisalignedMembers) { 15428 const NamedDecl *ND = m.RD; 15429 if (ND->getName().empty()) { 15430 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 15431 ND = TD; 15432 } 15433 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 15434 << m.MD << ND << m.E->getSourceRange(); 15435 } 15436 MisalignedMembers.clear(); 15437 } 15438 15439 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 15440 E = E->IgnoreParens(); 15441 if (!T->isPointerType() && !T->isIntegerType()) 15442 return; 15443 if (isa<UnaryOperator>(E) && 15444 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 15445 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 15446 if (isa<MemberExpr>(Op)) { 15447 auto MA = llvm::find(MisalignedMembers, MisalignedMember(Op)); 15448 if (MA != MisalignedMembers.end() && 15449 (T->isIntegerType() || 15450 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 15451 Context.getTypeAlignInChars( 15452 T->getPointeeType()) <= MA->Alignment)))) 15453 MisalignedMembers.erase(MA); 15454 } 15455 } 15456 } 15457 15458 void Sema::RefersToMemberWithReducedAlignment( 15459 Expr *E, 15460 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 15461 Action) { 15462 const auto *ME = dyn_cast<MemberExpr>(E); 15463 if (!ME) 15464 return; 15465 15466 // No need to check expressions with an __unaligned-qualified type. 15467 if (E->getType().getQualifiers().hasUnaligned()) 15468 return; 15469 15470 // For a chain of MemberExpr like "a.b.c.d" this list 15471 // will keep FieldDecl's like [d, c, b]. 15472 SmallVector<FieldDecl *, 4> ReverseMemberChain; 15473 const MemberExpr *TopME = nullptr; 15474 bool AnyIsPacked = false; 15475 do { 15476 QualType BaseType = ME->getBase()->getType(); 15477 if (BaseType->isDependentType()) 15478 return; 15479 if (ME->isArrow()) 15480 BaseType = BaseType->getPointeeType(); 15481 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl(); 15482 if (RD->isInvalidDecl()) 15483 return; 15484 15485 ValueDecl *MD = ME->getMemberDecl(); 15486 auto *FD = dyn_cast<FieldDecl>(MD); 15487 // We do not care about non-data members. 15488 if (!FD || FD->isInvalidDecl()) 15489 return; 15490 15491 AnyIsPacked = 15492 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 15493 ReverseMemberChain.push_back(FD); 15494 15495 TopME = ME; 15496 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 15497 } while (ME); 15498 assert(TopME && "We did not compute a topmost MemberExpr!"); 15499 15500 // Not the scope of this diagnostic. 15501 if (!AnyIsPacked) 15502 return; 15503 15504 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 15505 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 15506 // TODO: The innermost base of the member expression may be too complicated. 15507 // For now, just disregard these cases. This is left for future 15508 // improvement. 15509 if (!DRE && !isa<CXXThisExpr>(TopBase)) 15510 return; 15511 15512 // Alignment expected by the whole expression. 15513 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 15514 15515 // No need to do anything else with this case. 15516 if (ExpectedAlignment.isOne()) 15517 return; 15518 15519 // Synthesize offset of the whole access. 15520 CharUnits Offset; 15521 for (auto I = ReverseMemberChain.rbegin(); I != ReverseMemberChain.rend(); 15522 I++) { 15523 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(*I)); 15524 } 15525 15526 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 15527 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 15528 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 15529 15530 // The base expression of the innermost MemberExpr may give 15531 // stronger guarantees than the class containing the member. 15532 if (DRE && !TopME->isArrow()) { 15533 const ValueDecl *VD = DRE->getDecl(); 15534 if (!VD->getType()->isReferenceType()) 15535 CompleteObjectAlignment = 15536 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 15537 } 15538 15539 // Check if the synthesized offset fulfills the alignment. 15540 if (Offset % ExpectedAlignment != 0 || 15541 // It may fulfill the offset it but the effective alignment may still be 15542 // lower than the expected expression alignment. 15543 CompleteObjectAlignment < ExpectedAlignment) { 15544 // If this happens, we want to determine a sensible culprit of this. 15545 // Intuitively, watching the chain of member expressions from right to 15546 // left, we start with the required alignment (as required by the field 15547 // type) but some packed attribute in that chain has reduced the alignment. 15548 // It may happen that another packed structure increases it again. But if 15549 // we are here such increase has not been enough. So pointing the first 15550 // FieldDecl that either is packed or else its RecordDecl is, 15551 // seems reasonable. 15552 FieldDecl *FD = nullptr; 15553 CharUnits Alignment; 15554 for (FieldDecl *FDI : ReverseMemberChain) { 15555 if (FDI->hasAttr<PackedAttr>() || 15556 FDI->getParent()->hasAttr<PackedAttr>()) { 15557 FD = FDI; 15558 Alignment = std::min( 15559 Context.getTypeAlignInChars(FD->getType()), 15560 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 15561 break; 15562 } 15563 } 15564 assert(FD && "We did not find a packed FieldDecl!"); 15565 Action(E, FD->getParent(), FD, Alignment); 15566 } 15567 } 15568 15569 void Sema::CheckAddressOfPackedMember(Expr *rhs) { 15570 using namespace std::placeholders; 15571 15572 RefersToMemberWithReducedAlignment( 15573 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 15574 _2, _3, _4)); 15575 } 15576 15577 ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall, 15578 ExprResult CallResult) { 15579 if (checkArgCount(*this, TheCall, 1)) 15580 return ExprError(); 15581 15582 ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0)); 15583 if (MatrixArg.isInvalid()) 15584 return MatrixArg; 15585 Expr *Matrix = MatrixArg.get(); 15586 15587 auto *MType = Matrix->getType()->getAs<ConstantMatrixType>(); 15588 if (!MType) { 15589 Diag(Matrix->getBeginLoc(), diag::err_builtin_matrix_arg); 15590 return ExprError(); 15591 } 15592 15593 // Create returned matrix type by swapping rows and columns of the argument 15594 // matrix type. 15595 QualType ResultType = Context.getConstantMatrixType( 15596 MType->getElementType(), MType->getNumColumns(), MType->getNumRows()); 15597 15598 // Change the return type to the type of the returned matrix. 15599 TheCall->setType(ResultType); 15600 15601 // Update call argument to use the possibly converted matrix argument. 15602 TheCall->setArg(0, Matrix); 15603 return CallResult; 15604 } 15605 15606 // Get and verify the matrix dimensions. 15607 static llvm::Optional<unsigned> 15608 getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) { 15609 SourceLocation ErrorPos; 15610 Optional<llvm::APSInt> Value = 15611 Expr->getIntegerConstantExpr(S.Context, &ErrorPos); 15612 if (!Value) { 15613 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg) 15614 << Name; 15615 return {}; 15616 } 15617 uint64_t Dim = Value->getZExtValue(); 15618 if (!ConstantMatrixType::isDimensionValid(Dim)) { 15619 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension) 15620 << Name << ConstantMatrixType::getMaxElementsPerDimension(); 15621 return {}; 15622 } 15623 return Dim; 15624 } 15625 15626 ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, 15627 ExprResult CallResult) { 15628 if (!getLangOpts().MatrixTypes) { 15629 Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled); 15630 return ExprError(); 15631 } 15632 15633 if (checkArgCount(*this, TheCall, 4)) 15634 return ExprError(); 15635 15636 unsigned PtrArgIdx = 0; 15637 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 15638 Expr *RowsExpr = TheCall->getArg(1); 15639 Expr *ColumnsExpr = TheCall->getArg(2); 15640 Expr *StrideExpr = TheCall->getArg(3); 15641 15642 bool ArgError = false; 15643 15644 // Check pointer argument. 15645 { 15646 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 15647 if (PtrConv.isInvalid()) 15648 return PtrConv; 15649 PtrExpr = PtrConv.get(); 15650 TheCall->setArg(0, PtrExpr); 15651 if (PtrExpr->isTypeDependent()) { 15652 TheCall->setType(Context.DependentTy); 15653 return TheCall; 15654 } 15655 } 15656 15657 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 15658 QualType ElementTy; 15659 if (!PtrTy) { 15660 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg) 15661 << PtrArgIdx + 1; 15662 ArgError = true; 15663 } else { 15664 ElementTy = PtrTy->getPointeeType().getUnqualifiedType(); 15665 15666 if (!ConstantMatrixType::isValidElementType(ElementTy)) { 15667 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg) 15668 << PtrArgIdx + 1; 15669 ArgError = true; 15670 } 15671 } 15672 15673 // Apply default Lvalue conversions and convert the expression to size_t. 15674 auto ApplyArgumentConversions = [this](Expr *E) { 15675 ExprResult Conv = DefaultLvalueConversion(E); 15676 if (Conv.isInvalid()) 15677 return Conv; 15678 15679 return tryConvertExprToType(Conv.get(), Context.getSizeType()); 15680 }; 15681 15682 // Apply conversion to row and column expressions. 15683 ExprResult RowsConv = ApplyArgumentConversions(RowsExpr); 15684 if (!RowsConv.isInvalid()) { 15685 RowsExpr = RowsConv.get(); 15686 TheCall->setArg(1, RowsExpr); 15687 } else 15688 RowsExpr = nullptr; 15689 15690 ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr); 15691 if (!ColumnsConv.isInvalid()) { 15692 ColumnsExpr = ColumnsConv.get(); 15693 TheCall->setArg(2, ColumnsExpr); 15694 } else 15695 ColumnsExpr = nullptr; 15696 15697 // If any any part of the result matrix type is still pending, just use 15698 // Context.DependentTy, until all parts are resolved. 15699 if ((RowsExpr && RowsExpr->isTypeDependent()) || 15700 (ColumnsExpr && ColumnsExpr->isTypeDependent())) { 15701 TheCall->setType(Context.DependentTy); 15702 return CallResult; 15703 } 15704 15705 // Check row and column dimenions. 15706 llvm::Optional<unsigned> MaybeRows; 15707 if (RowsExpr) 15708 MaybeRows = getAndVerifyMatrixDimension(RowsExpr, "row", *this); 15709 15710 llvm::Optional<unsigned> MaybeColumns; 15711 if (ColumnsExpr) 15712 MaybeColumns = getAndVerifyMatrixDimension(ColumnsExpr, "column", *this); 15713 15714 // Check stride argument. 15715 ExprResult StrideConv = ApplyArgumentConversions(StrideExpr); 15716 if (StrideConv.isInvalid()) 15717 return ExprError(); 15718 StrideExpr = StrideConv.get(); 15719 TheCall->setArg(3, StrideExpr); 15720 15721 if (MaybeRows) { 15722 if (Optional<llvm::APSInt> Value = 15723 StrideExpr->getIntegerConstantExpr(Context)) { 15724 uint64_t Stride = Value->getZExtValue(); 15725 if (Stride < *MaybeRows) { 15726 Diag(StrideExpr->getBeginLoc(), 15727 diag::err_builtin_matrix_stride_too_small); 15728 ArgError = true; 15729 } 15730 } 15731 } 15732 15733 if (ArgError || !MaybeRows || !MaybeColumns) 15734 return ExprError(); 15735 15736 TheCall->setType( 15737 Context.getConstantMatrixType(ElementTy, *MaybeRows, *MaybeColumns)); 15738 return CallResult; 15739 } 15740 15741 ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, 15742 ExprResult CallResult) { 15743 if (checkArgCount(*this, TheCall, 3)) 15744 return ExprError(); 15745 15746 unsigned PtrArgIdx = 1; 15747 Expr *MatrixExpr = TheCall->getArg(0); 15748 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 15749 Expr *StrideExpr = TheCall->getArg(2); 15750 15751 bool ArgError = false; 15752 15753 { 15754 ExprResult MatrixConv = DefaultLvalueConversion(MatrixExpr); 15755 if (MatrixConv.isInvalid()) 15756 return MatrixConv; 15757 MatrixExpr = MatrixConv.get(); 15758 TheCall->setArg(0, MatrixExpr); 15759 } 15760 if (MatrixExpr->isTypeDependent()) { 15761 TheCall->setType(Context.DependentTy); 15762 return TheCall; 15763 } 15764 15765 auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>(); 15766 if (!MatrixTy) { 15767 Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_matrix_arg) << 0; 15768 ArgError = true; 15769 } 15770 15771 { 15772 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 15773 if (PtrConv.isInvalid()) 15774 return PtrConv; 15775 PtrExpr = PtrConv.get(); 15776 TheCall->setArg(1, PtrExpr); 15777 if (PtrExpr->isTypeDependent()) { 15778 TheCall->setType(Context.DependentTy); 15779 return TheCall; 15780 } 15781 } 15782 15783 // Check pointer argument. 15784 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 15785 if (!PtrTy) { 15786 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg) 15787 << PtrArgIdx + 1; 15788 ArgError = true; 15789 } else { 15790 QualType ElementTy = PtrTy->getPointeeType(); 15791 if (ElementTy.isConstQualified()) { 15792 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const); 15793 ArgError = true; 15794 } 15795 ElementTy = ElementTy.getUnqualifiedType().getCanonicalType(); 15796 if (MatrixTy && 15797 !Context.hasSameType(ElementTy, MatrixTy->getElementType())) { 15798 Diag(PtrExpr->getBeginLoc(), 15799 diag::err_builtin_matrix_pointer_arg_mismatch) 15800 << ElementTy << MatrixTy->getElementType(); 15801 ArgError = true; 15802 } 15803 } 15804 15805 // Apply default Lvalue conversions and convert the stride expression to 15806 // size_t. 15807 { 15808 ExprResult StrideConv = DefaultLvalueConversion(StrideExpr); 15809 if (StrideConv.isInvalid()) 15810 return StrideConv; 15811 15812 StrideConv = tryConvertExprToType(StrideConv.get(), Context.getSizeType()); 15813 if (StrideConv.isInvalid()) 15814 return StrideConv; 15815 StrideExpr = StrideConv.get(); 15816 TheCall->setArg(2, StrideExpr); 15817 } 15818 15819 // Check stride argument. 15820 if (MatrixTy) { 15821 if (Optional<llvm::APSInt> Value = 15822 StrideExpr->getIntegerConstantExpr(Context)) { 15823 uint64_t Stride = Value->getZExtValue(); 15824 if (Stride < MatrixTy->getNumRows()) { 15825 Diag(StrideExpr->getBeginLoc(), 15826 diag::err_builtin_matrix_stride_too_small); 15827 ArgError = true; 15828 } 15829 } 15830 } 15831 15832 if (ArgError) 15833 return ExprError(); 15834 15835 return CallResult; 15836 } 15837