1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements extra semantic analysis beyond what is enforced 10 // by the C type system. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/APValue.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/AttrIterator.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclBase.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/DeclarationName.h" 24 #include "clang/AST/EvaluatedExprVisitor.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/ExprCXX.h" 27 #include "clang/AST/ExprObjC.h" 28 #include "clang/AST/ExprOpenMP.h" 29 #include "clang/AST/FormatString.h" 30 #include "clang/AST/NSAPI.h" 31 #include "clang/AST/NonTrivialTypeVisitor.h" 32 #include "clang/AST/OperationKinds.h" 33 #include "clang/AST/RecordLayout.h" 34 #include "clang/AST/Stmt.h" 35 #include "clang/AST/TemplateBase.h" 36 #include "clang/AST/Type.h" 37 #include "clang/AST/TypeLoc.h" 38 #include "clang/AST/UnresolvedSet.h" 39 #include "clang/Basic/AddressSpaces.h" 40 #include "clang/Basic/CharInfo.h" 41 #include "clang/Basic/Diagnostic.h" 42 #include "clang/Basic/IdentifierTable.h" 43 #include "clang/Basic/LLVM.h" 44 #include "clang/Basic/LangOptions.h" 45 #include "clang/Basic/OpenCLOptions.h" 46 #include "clang/Basic/OperatorKinds.h" 47 #include "clang/Basic/PartialDiagnostic.h" 48 #include "clang/Basic/SourceLocation.h" 49 #include "clang/Basic/SourceManager.h" 50 #include "clang/Basic/Specifiers.h" 51 #include "clang/Basic/SyncScope.h" 52 #include "clang/Basic/TargetBuiltins.h" 53 #include "clang/Basic/TargetCXXABI.h" 54 #include "clang/Basic/TargetInfo.h" 55 #include "clang/Basic/TypeTraits.h" 56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 57 #include "clang/Sema/Initialization.h" 58 #include "clang/Sema/Lookup.h" 59 #include "clang/Sema/Ownership.h" 60 #include "clang/Sema/Scope.h" 61 #include "clang/Sema/ScopeInfo.h" 62 #include "clang/Sema/Sema.h" 63 #include "clang/Sema/SemaInternal.h" 64 #include "llvm/ADT/APFloat.h" 65 #include "llvm/ADT/APInt.h" 66 #include "llvm/ADT/APSInt.h" 67 #include "llvm/ADT/ArrayRef.h" 68 #include "llvm/ADT/DenseMap.h" 69 #include "llvm/ADT/FoldingSet.h" 70 #include "llvm/ADT/None.h" 71 #include "llvm/ADT/Optional.h" 72 #include "llvm/ADT/STLExtras.h" 73 #include "llvm/ADT/SmallBitVector.h" 74 #include "llvm/ADT/SmallPtrSet.h" 75 #include "llvm/ADT/SmallString.h" 76 #include "llvm/ADT/SmallVector.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/ADT/StringSwitch.h" 79 #include "llvm/ADT/Triple.h" 80 #include "llvm/Support/AtomicOrdering.h" 81 #include "llvm/Support/Casting.h" 82 #include "llvm/Support/Compiler.h" 83 #include "llvm/Support/ConvertUTF.h" 84 #include "llvm/Support/ErrorHandling.h" 85 #include "llvm/Support/Format.h" 86 #include "llvm/Support/Locale.h" 87 #include "llvm/Support/MathExtras.h" 88 #include "llvm/Support/SaveAndRestore.h" 89 #include "llvm/Support/raw_ostream.h" 90 #include <algorithm> 91 #include <bitset> 92 #include <cassert> 93 #include <cstddef> 94 #include <cstdint> 95 #include <functional> 96 #include <limits> 97 #include <string> 98 #include <tuple> 99 #include <utility> 100 101 using namespace clang; 102 using namespace sema; 103 104 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 105 unsigned ByteNo) const { 106 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 107 Context.getTargetInfo()); 108 } 109 110 /// Checks that a call expression's argument count is the desired number. 111 /// This is useful when doing custom type-checking. Returns true on error. 112 static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) { 113 unsigned argCount = call->getNumArgs(); 114 if (argCount == desiredArgCount) return false; 115 116 if (argCount < desiredArgCount) 117 return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args) 118 << 0 /*function call*/ << desiredArgCount << argCount 119 << call->getSourceRange(); 120 121 // Highlight all the excess arguments. 122 SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(), 123 call->getArg(argCount - 1)->getEndLoc()); 124 125 return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args) 126 << 0 /*function call*/ << desiredArgCount << argCount 127 << call->getArg(1)->getSourceRange(); 128 } 129 130 /// Check that the first argument to __builtin_annotation is an integer 131 /// and the second argument is a non-wide string literal. 132 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 133 if (checkArgCount(S, TheCall, 2)) 134 return true; 135 136 // First argument should be an integer. 137 Expr *ValArg = TheCall->getArg(0); 138 QualType Ty = ValArg->getType(); 139 if (!Ty->isIntegerType()) { 140 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 141 << ValArg->getSourceRange(); 142 return true; 143 } 144 145 // Second argument should be a constant string. 146 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 147 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 148 if (!Literal || !Literal->isAscii()) { 149 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 150 << StrArg->getSourceRange(); 151 return true; 152 } 153 154 TheCall->setType(Ty); 155 return false; 156 } 157 158 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 159 // We need at least one argument. 160 if (TheCall->getNumArgs() < 1) { 161 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 162 << 0 << 1 << TheCall->getNumArgs() 163 << TheCall->getCallee()->getSourceRange(); 164 return true; 165 } 166 167 // All arguments should be wide string literals. 168 for (Expr *Arg : TheCall->arguments()) { 169 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 170 if (!Literal || !Literal->isWide()) { 171 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 172 << Arg->getSourceRange(); 173 return true; 174 } 175 } 176 177 return false; 178 } 179 180 /// Check that the argument to __builtin_addressof is a glvalue, and set the 181 /// result type to the corresponding pointer type. 182 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 183 if (checkArgCount(S, TheCall, 1)) 184 return true; 185 186 ExprResult Arg(TheCall->getArg(0)); 187 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 188 if (ResultType.isNull()) 189 return true; 190 191 TheCall->setArg(0, Arg.get()); 192 TheCall->setType(ResultType); 193 return false; 194 } 195 196 /// Check the number of arguments and set the result type to 197 /// the argument type. 198 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) { 199 if (checkArgCount(S, TheCall, 1)) 200 return true; 201 202 TheCall->setType(TheCall->getArg(0)->getType()); 203 return false; 204 } 205 206 /// Check that the value argument for __builtin_is_aligned(value, alignment) and 207 /// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer 208 /// type (but not a function pointer) and that the alignment is a power-of-two. 209 static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) { 210 if (checkArgCount(S, TheCall, 2)) 211 return true; 212 213 clang::Expr *Source = TheCall->getArg(0); 214 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned; 215 216 auto IsValidIntegerType = [](QualType Ty) { 217 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType(); 218 }; 219 QualType SrcTy = Source->getType(); 220 // We should also be able to use it with arrays (but not functions!). 221 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) { 222 SrcTy = S.Context.getDecayedType(SrcTy); 223 } 224 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) || 225 SrcTy->isFunctionPointerType()) { 226 // FIXME: this is not quite the right error message since we don't allow 227 // floating point types, or member pointers. 228 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand) 229 << SrcTy; 230 return true; 231 } 232 233 clang::Expr *AlignOp = TheCall->getArg(1); 234 if (!IsValidIntegerType(AlignOp->getType())) { 235 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int) 236 << AlignOp->getType(); 237 return true; 238 } 239 Expr::EvalResult AlignResult; 240 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1; 241 // We can't check validity of alignment if it is value dependent. 242 if (!AlignOp->isValueDependent() && 243 AlignOp->EvaluateAsInt(AlignResult, S.Context, 244 Expr::SE_AllowSideEffects)) { 245 llvm::APSInt AlignValue = AlignResult.Val.getInt(); 246 llvm::APSInt MaxValue( 247 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits)); 248 if (AlignValue < 1) { 249 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1; 250 return true; 251 } 252 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) { 253 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big) 254 << MaxValue.toString(10); 255 return true; 256 } 257 if (!AlignValue.isPowerOf2()) { 258 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two); 259 return true; 260 } 261 if (AlignValue == 1) { 262 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless) 263 << IsBooleanAlignBuiltin; 264 } 265 } 266 267 ExprResult SrcArg = S.PerformCopyInitialization( 268 InitializedEntity::InitializeParameter(S.Context, SrcTy, false), 269 SourceLocation(), Source); 270 if (SrcArg.isInvalid()) 271 return true; 272 TheCall->setArg(0, SrcArg.get()); 273 ExprResult AlignArg = 274 S.PerformCopyInitialization(InitializedEntity::InitializeParameter( 275 S.Context, AlignOp->getType(), false), 276 SourceLocation(), AlignOp); 277 if (AlignArg.isInvalid()) 278 return true; 279 TheCall->setArg(1, AlignArg.get()); 280 // For align_up/align_down, the return type is the same as the (potentially 281 // decayed) argument type including qualifiers. For is_aligned(), the result 282 // is always bool. 283 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy); 284 return false; 285 } 286 287 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall, 288 unsigned BuiltinID) { 289 if (checkArgCount(S, TheCall, 3)) 290 return true; 291 292 // First two arguments should be integers. 293 for (unsigned I = 0; I < 2; ++I) { 294 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I)); 295 if (Arg.isInvalid()) return true; 296 TheCall->setArg(I, Arg.get()); 297 298 QualType Ty = Arg.get()->getType(); 299 if (!Ty->isIntegerType()) { 300 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 301 << Ty << Arg.get()->getSourceRange(); 302 return true; 303 } 304 } 305 306 // Third argument should be a pointer to a non-const integer. 307 // IRGen correctly handles volatile, restrict, and address spaces, and 308 // the other qualifiers aren't possible. 309 { 310 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2)); 311 if (Arg.isInvalid()) return true; 312 TheCall->setArg(2, Arg.get()); 313 314 QualType Ty = Arg.get()->getType(); 315 const auto *PtrTy = Ty->getAs<PointerType>(); 316 if (!PtrTy || 317 !PtrTy->getPointeeType()->isIntegerType() || 318 PtrTy->getPointeeType().isConstQualified()) { 319 S.Diag(Arg.get()->getBeginLoc(), 320 diag::err_overflow_builtin_must_be_ptr_int) 321 << Ty << Arg.get()->getSourceRange(); 322 return true; 323 } 324 } 325 326 // Disallow signed ExtIntType args larger than 128 bits to mul function until 327 // we improve backend support. 328 if (BuiltinID == Builtin::BI__builtin_mul_overflow) { 329 for (unsigned I = 0; I < 3; ++I) { 330 const auto Arg = TheCall->getArg(I); 331 // Third argument will be a pointer. 332 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType(); 333 if (Ty->isExtIntType() && Ty->isSignedIntegerType() && 334 S.getASTContext().getIntWidth(Ty) > 128) 335 return S.Diag(Arg->getBeginLoc(), 336 diag::err_overflow_builtin_ext_int_max_size) 337 << 128; 338 } 339 } 340 341 return false; 342 } 343 344 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 345 if (checkArgCount(S, BuiltinCall, 2)) 346 return true; 347 348 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 349 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 350 Expr *Call = BuiltinCall->getArg(0); 351 Expr *Chain = BuiltinCall->getArg(1); 352 353 if (Call->getStmtClass() != Stmt::CallExprClass) { 354 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 355 << Call->getSourceRange(); 356 return true; 357 } 358 359 auto CE = cast<CallExpr>(Call); 360 if (CE->getCallee()->getType()->isBlockPointerType()) { 361 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 362 << Call->getSourceRange(); 363 return true; 364 } 365 366 const Decl *TargetDecl = CE->getCalleeDecl(); 367 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 368 if (FD->getBuiltinID()) { 369 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 370 << Call->getSourceRange(); 371 return true; 372 } 373 374 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 375 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 376 << Call->getSourceRange(); 377 return true; 378 } 379 380 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 381 if (ChainResult.isInvalid()) 382 return true; 383 if (!ChainResult.get()->getType()->isPointerType()) { 384 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 385 << Chain->getSourceRange(); 386 return true; 387 } 388 389 QualType ReturnTy = CE->getCallReturnType(S.Context); 390 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 391 QualType BuiltinTy = S.Context.getFunctionType( 392 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 393 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 394 395 Builtin = 396 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 397 398 BuiltinCall->setType(CE->getType()); 399 BuiltinCall->setValueKind(CE->getValueKind()); 400 BuiltinCall->setObjectKind(CE->getObjectKind()); 401 BuiltinCall->setCallee(Builtin); 402 BuiltinCall->setArg(1, ChainResult.get()); 403 404 return false; 405 } 406 407 namespace { 408 409 class EstimateSizeFormatHandler 410 : public analyze_format_string::FormatStringHandler { 411 size_t Size; 412 413 public: 414 EstimateSizeFormatHandler(StringRef Format) 415 : Size(std::min(Format.find(0), Format.size()) + 416 1 /* null byte always written by sprintf */) {} 417 418 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 419 const char *, unsigned SpecifierLen) override { 420 421 const size_t FieldWidth = computeFieldWidth(FS); 422 const size_t Precision = computePrecision(FS); 423 424 // The actual format. 425 switch (FS.getConversionSpecifier().getKind()) { 426 // Just a char. 427 case analyze_format_string::ConversionSpecifier::cArg: 428 case analyze_format_string::ConversionSpecifier::CArg: 429 Size += std::max(FieldWidth, (size_t)1); 430 break; 431 // Just an integer. 432 case analyze_format_string::ConversionSpecifier::dArg: 433 case analyze_format_string::ConversionSpecifier::DArg: 434 case analyze_format_string::ConversionSpecifier::iArg: 435 case analyze_format_string::ConversionSpecifier::oArg: 436 case analyze_format_string::ConversionSpecifier::OArg: 437 case analyze_format_string::ConversionSpecifier::uArg: 438 case analyze_format_string::ConversionSpecifier::UArg: 439 case analyze_format_string::ConversionSpecifier::xArg: 440 case analyze_format_string::ConversionSpecifier::XArg: 441 Size += std::max(FieldWidth, Precision); 442 break; 443 444 // %g style conversion switches between %f or %e style dynamically. 445 // %f always takes less space, so default to it. 446 case analyze_format_string::ConversionSpecifier::gArg: 447 case analyze_format_string::ConversionSpecifier::GArg: 448 449 // Floating point number in the form '[+]ddd.ddd'. 450 case analyze_format_string::ConversionSpecifier::fArg: 451 case analyze_format_string::ConversionSpecifier::FArg: 452 Size += std::max(FieldWidth, 1 /* integer part */ + 453 (Precision ? 1 + Precision 454 : 0) /* period + decimal */); 455 break; 456 457 // Floating point number in the form '[-]d.ddde[+-]dd'. 458 case analyze_format_string::ConversionSpecifier::eArg: 459 case analyze_format_string::ConversionSpecifier::EArg: 460 Size += 461 std::max(FieldWidth, 462 1 /* integer part */ + 463 (Precision ? 1 + Precision : 0) /* period + decimal */ + 464 1 /* e or E letter */ + 2 /* exponent */); 465 break; 466 467 // Floating point number in the form '[-]0xh.hhhhp±dd'. 468 case analyze_format_string::ConversionSpecifier::aArg: 469 case analyze_format_string::ConversionSpecifier::AArg: 470 Size += 471 std::max(FieldWidth, 472 2 /* 0x */ + 1 /* integer part */ + 473 (Precision ? 1 + Precision : 0) /* period + decimal */ + 474 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */); 475 break; 476 477 // Just a string. 478 case analyze_format_string::ConversionSpecifier::sArg: 479 case analyze_format_string::ConversionSpecifier::SArg: 480 Size += FieldWidth; 481 break; 482 483 // Just a pointer in the form '0xddd'. 484 case analyze_format_string::ConversionSpecifier::pArg: 485 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision); 486 break; 487 488 // A plain percent. 489 case analyze_format_string::ConversionSpecifier::PercentArg: 490 Size += 1; 491 break; 492 493 default: 494 break; 495 } 496 497 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix(); 498 499 if (FS.hasAlternativeForm()) { 500 switch (FS.getConversionSpecifier().getKind()) { 501 default: 502 break; 503 // Force a leading '0'. 504 case analyze_format_string::ConversionSpecifier::oArg: 505 Size += 1; 506 break; 507 // Force a leading '0x'. 508 case analyze_format_string::ConversionSpecifier::xArg: 509 case analyze_format_string::ConversionSpecifier::XArg: 510 Size += 2; 511 break; 512 // Force a period '.' before decimal, even if precision is 0. 513 case analyze_format_string::ConversionSpecifier::aArg: 514 case analyze_format_string::ConversionSpecifier::AArg: 515 case analyze_format_string::ConversionSpecifier::eArg: 516 case analyze_format_string::ConversionSpecifier::EArg: 517 case analyze_format_string::ConversionSpecifier::fArg: 518 case analyze_format_string::ConversionSpecifier::FArg: 519 case analyze_format_string::ConversionSpecifier::gArg: 520 case analyze_format_string::ConversionSpecifier::GArg: 521 Size += (Precision ? 0 : 1); 522 break; 523 } 524 } 525 assert(SpecifierLen <= Size && "no underflow"); 526 Size -= SpecifierLen; 527 return true; 528 } 529 530 size_t getSizeLowerBound() const { return Size; } 531 532 private: 533 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) { 534 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth(); 535 size_t FieldWidth = 0; 536 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant) 537 FieldWidth = FW.getConstantAmount(); 538 return FieldWidth; 539 } 540 541 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) { 542 const analyze_format_string::OptionalAmount &FW = FS.getPrecision(); 543 size_t Precision = 0; 544 545 // See man 3 printf for default precision value based on the specifier. 546 switch (FW.getHowSpecified()) { 547 case analyze_format_string::OptionalAmount::NotSpecified: 548 switch (FS.getConversionSpecifier().getKind()) { 549 default: 550 break; 551 case analyze_format_string::ConversionSpecifier::dArg: // %d 552 case analyze_format_string::ConversionSpecifier::DArg: // %D 553 case analyze_format_string::ConversionSpecifier::iArg: // %i 554 Precision = 1; 555 break; 556 case analyze_format_string::ConversionSpecifier::oArg: // %d 557 case analyze_format_string::ConversionSpecifier::OArg: // %D 558 case analyze_format_string::ConversionSpecifier::uArg: // %d 559 case analyze_format_string::ConversionSpecifier::UArg: // %D 560 case analyze_format_string::ConversionSpecifier::xArg: // %d 561 case analyze_format_string::ConversionSpecifier::XArg: // %D 562 Precision = 1; 563 break; 564 case analyze_format_string::ConversionSpecifier::fArg: // %f 565 case analyze_format_string::ConversionSpecifier::FArg: // %F 566 case analyze_format_string::ConversionSpecifier::eArg: // %e 567 case analyze_format_string::ConversionSpecifier::EArg: // %E 568 case analyze_format_string::ConversionSpecifier::gArg: // %g 569 case analyze_format_string::ConversionSpecifier::GArg: // %G 570 Precision = 6; 571 break; 572 case analyze_format_string::ConversionSpecifier::pArg: // %d 573 Precision = 1; 574 break; 575 } 576 break; 577 case analyze_format_string::OptionalAmount::Constant: 578 Precision = FW.getConstantAmount(); 579 break; 580 default: 581 break; 582 } 583 return Precision; 584 } 585 }; 586 587 } // namespace 588 589 /// Check a call to BuiltinID for buffer overflows. If BuiltinID is a 590 /// __builtin_*_chk function, then use the object size argument specified in the 591 /// source. Otherwise, infer the object size using __builtin_object_size. 592 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, 593 CallExpr *TheCall) { 594 // FIXME: There are some more useful checks we could be doing here: 595 // - Evaluate strlen of strcpy arguments, use as object size. 596 597 if (TheCall->isValueDependent() || TheCall->isTypeDependent() || 598 isConstantEvaluated()) 599 return; 600 601 unsigned BuiltinID = FD->getBuiltinID(/*ConsiderWrappers=*/true); 602 if (!BuiltinID) 603 return; 604 605 const TargetInfo &TI = getASTContext().getTargetInfo(); 606 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType()); 607 608 unsigned DiagID = 0; 609 bool IsChkVariant = false; 610 Optional<llvm::APSInt> UsedSize; 611 unsigned SizeIndex, ObjectIndex; 612 switch (BuiltinID) { 613 default: 614 return; 615 case Builtin::BIsprintf: 616 case Builtin::BI__builtin___sprintf_chk: { 617 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3; 618 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 619 620 if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) { 621 622 if (!Format->isAscii() && !Format->isUTF8()) 623 return; 624 625 StringRef FormatStrRef = Format->getString(); 626 EstimateSizeFormatHandler H(FormatStrRef); 627 const char *FormatBytes = FormatStrRef.data(); 628 const ConstantArrayType *T = 629 Context.getAsConstantArrayType(Format->getType()); 630 assert(T && "String literal not of constant array type!"); 631 size_t TypeSize = T->getSize().getZExtValue(); 632 633 // In case there's a null byte somewhere. 634 size_t StrLen = 635 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 636 if (!analyze_format_string::ParsePrintfString( 637 H, FormatBytes, FormatBytes + StrLen, getLangOpts(), 638 Context.getTargetInfo(), false)) { 639 DiagID = diag::warn_fortify_source_format_overflow; 640 UsedSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound()) 641 .extOrTrunc(SizeTypeWidth); 642 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) { 643 IsChkVariant = true; 644 ObjectIndex = 2; 645 } else { 646 IsChkVariant = false; 647 ObjectIndex = 0; 648 } 649 break; 650 } 651 } 652 return; 653 } 654 case Builtin::BI__builtin___memcpy_chk: 655 case Builtin::BI__builtin___memmove_chk: 656 case Builtin::BI__builtin___memset_chk: 657 case Builtin::BI__builtin___strlcat_chk: 658 case Builtin::BI__builtin___strlcpy_chk: 659 case Builtin::BI__builtin___strncat_chk: 660 case Builtin::BI__builtin___strncpy_chk: 661 case Builtin::BI__builtin___stpncpy_chk: 662 case Builtin::BI__builtin___memccpy_chk: 663 case Builtin::BI__builtin___mempcpy_chk: { 664 DiagID = diag::warn_builtin_chk_overflow; 665 IsChkVariant = true; 666 SizeIndex = TheCall->getNumArgs() - 2; 667 ObjectIndex = TheCall->getNumArgs() - 1; 668 break; 669 } 670 671 case Builtin::BI__builtin___snprintf_chk: 672 case Builtin::BI__builtin___vsnprintf_chk: { 673 DiagID = diag::warn_builtin_chk_overflow; 674 IsChkVariant = true; 675 SizeIndex = 1; 676 ObjectIndex = 3; 677 break; 678 } 679 680 case Builtin::BIstrncat: 681 case Builtin::BI__builtin_strncat: 682 case Builtin::BIstrncpy: 683 case Builtin::BI__builtin_strncpy: 684 case Builtin::BIstpncpy: 685 case Builtin::BI__builtin_stpncpy: { 686 // Whether these functions overflow depends on the runtime strlen of the 687 // string, not just the buffer size, so emitting the "always overflow" 688 // diagnostic isn't quite right. We should still diagnose passing a buffer 689 // size larger than the destination buffer though; this is a runtime abort 690 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise. 691 DiagID = diag::warn_fortify_source_size_mismatch; 692 SizeIndex = TheCall->getNumArgs() - 1; 693 ObjectIndex = 0; 694 break; 695 } 696 697 case Builtin::BImemcpy: 698 case Builtin::BI__builtin_memcpy: 699 case Builtin::BImemmove: 700 case Builtin::BI__builtin_memmove: 701 case Builtin::BImemset: 702 case Builtin::BI__builtin_memset: 703 case Builtin::BImempcpy: 704 case Builtin::BI__builtin_mempcpy: { 705 DiagID = diag::warn_fortify_source_overflow; 706 SizeIndex = TheCall->getNumArgs() - 1; 707 ObjectIndex = 0; 708 break; 709 } 710 case Builtin::BIsnprintf: 711 case Builtin::BI__builtin_snprintf: 712 case Builtin::BIvsnprintf: 713 case Builtin::BI__builtin_vsnprintf: { 714 DiagID = diag::warn_fortify_source_size_mismatch; 715 SizeIndex = 1; 716 ObjectIndex = 0; 717 break; 718 } 719 } 720 721 llvm::APSInt ObjectSize; 722 // For __builtin___*_chk, the object size is explicitly provided by the caller 723 // (usually using __builtin_object_size). Use that value to check this call. 724 if (IsChkVariant) { 725 Expr::EvalResult Result; 726 Expr *SizeArg = TheCall->getArg(ObjectIndex); 727 if (!SizeArg->EvaluateAsInt(Result, getASTContext())) 728 return; 729 ObjectSize = Result.Val.getInt(); 730 731 // Otherwise, try to evaluate an imaginary call to __builtin_object_size. 732 } else { 733 // If the parameter has a pass_object_size attribute, then we should use its 734 // (potentially) more strict checking mode. Otherwise, conservatively assume 735 // type 0. 736 int BOSType = 0; 737 if (const auto *POS = 738 FD->getParamDecl(ObjectIndex)->getAttr<PassObjectSizeAttr>()) 739 BOSType = POS->getType(); 740 741 Expr *ObjArg = TheCall->getArg(ObjectIndex); 742 uint64_t Result; 743 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType)) 744 return; 745 // Get the object size in the target's size_t width. 746 ObjectSize = llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth); 747 } 748 749 // Evaluate the number of bytes of the object that this call will use. 750 if (!UsedSize) { 751 Expr::EvalResult Result; 752 Expr *UsedSizeArg = TheCall->getArg(SizeIndex); 753 if (!UsedSizeArg->EvaluateAsInt(Result, getASTContext())) 754 return; 755 UsedSize = Result.Val.getInt().extOrTrunc(SizeTypeWidth); 756 } 757 758 if (UsedSize.getValue().ule(ObjectSize)) 759 return; 760 761 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID); 762 // Skim off the details of whichever builtin was called to produce a better 763 // diagnostic, as it's unlikley that the user wrote the __builtin explicitly. 764 if (IsChkVariant) { 765 FunctionName = FunctionName.drop_front(std::strlen("__builtin___")); 766 FunctionName = FunctionName.drop_back(std::strlen("_chk")); 767 } else if (FunctionName.startswith("__builtin_")) { 768 FunctionName = FunctionName.drop_front(std::strlen("__builtin_")); 769 } 770 771 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 772 PDiag(DiagID) 773 << FunctionName << ObjectSize.toString(/*Radix=*/10) 774 << UsedSize.getValue().toString(/*Radix=*/10)); 775 } 776 777 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 778 Scope::ScopeFlags NeededScopeFlags, 779 unsigned DiagID) { 780 // Scopes aren't available during instantiation. Fortunately, builtin 781 // functions cannot be template args so they cannot be formed through template 782 // instantiation. Therefore checking once during the parse is sufficient. 783 if (SemaRef.inTemplateInstantiation()) 784 return false; 785 786 Scope *S = SemaRef.getCurScope(); 787 while (S && !S->isSEHExceptScope()) 788 S = S->getParent(); 789 if (!S || !(S->getFlags() & NeededScopeFlags)) { 790 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 791 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 792 << DRE->getDecl()->getIdentifier(); 793 return true; 794 } 795 796 return false; 797 } 798 799 static inline bool isBlockPointer(Expr *Arg) { 800 return Arg->getType()->isBlockPointerType(); 801 } 802 803 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 804 /// void*, which is a requirement of device side enqueue. 805 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 806 const BlockPointerType *BPT = 807 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 808 ArrayRef<QualType> Params = 809 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes(); 810 unsigned ArgCounter = 0; 811 bool IllegalParams = false; 812 // Iterate through the block parameters until either one is found that is not 813 // a local void*, or the block is valid. 814 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 815 I != E; ++I, ++ArgCounter) { 816 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 817 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 818 LangAS::opencl_local) { 819 // Get the location of the error. If a block literal has been passed 820 // (BlockExpr) then we can point straight to the offending argument, 821 // else we just point to the variable reference. 822 SourceLocation ErrorLoc; 823 if (isa<BlockExpr>(BlockArg)) { 824 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 825 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 826 } else if (isa<DeclRefExpr>(BlockArg)) { 827 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 828 } 829 S.Diag(ErrorLoc, 830 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 831 IllegalParams = true; 832 } 833 } 834 835 return IllegalParams; 836 } 837 838 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 839 if (!S.getOpenCLOptions().isEnabled("cl_khr_subgroups")) { 840 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 841 << 1 << Call->getDirectCallee() << "cl_khr_subgroups"; 842 return true; 843 } 844 return false; 845 } 846 847 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 848 if (checkArgCount(S, TheCall, 2)) 849 return true; 850 851 if (checkOpenCLSubgroupExt(S, TheCall)) 852 return true; 853 854 // First argument is an ndrange_t type. 855 Expr *NDRangeArg = TheCall->getArg(0); 856 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 857 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 858 << TheCall->getDirectCallee() << "'ndrange_t'"; 859 return true; 860 } 861 862 Expr *BlockArg = TheCall->getArg(1); 863 if (!isBlockPointer(BlockArg)) { 864 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 865 << TheCall->getDirectCallee() << "block"; 866 return true; 867 } 868 return checkOpenCLBlockArgs(S, BlockArg); 869 } 870 871 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 872 /// get_kernel_work_group_size 873 /// and get_kernel_preferred_work_group_size_multiple builtin functions. 874 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 875 if (checkArgCount(S, TheCall, 1)) 876 return true; 877 878 Expr *BlockArg = TheCall->getArg(0); 879 if (!isBlockPointer(BlockArg)) { 880 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 881 << TheCall->getDirectCallee() << "block"; 882 return true; 883 } 884 return checkOpenCLBlockArgs(S, BlockArg); 885 } 886 887 /// Diagnose integer type and any valid implicit conversion to it. 888 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 889 const QualType &IntType); 890 891 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 892 unsigned Start, unsigned End) { 893 bool IllegalParams = false; 894 for (unsigned I = Start; I <= End; ++I) 895 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 896 S.Context.getSizeType()); 897 return IllegalParams; 898 } 899 900 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 901 /// 'local void*' parameter of passed block. 902 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 903 Expr *BlockArg, 904 unsigned NumNonVarArgs) { 905 const BlockPointerType *BPT = 906 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 907 unsigned NumBlockParams = 908 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams(); 909 unsigned TotalNumArgs = TheCall->getNumArgs(); 910 911 // For each argument passed to the block, a corresponding uint needs to 912 // be passed to describe the size of the local memory. 913 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 914 S.Diag(TheCall->getBeginLoc(), 915 diag::err_opencl_enqueue_kernel_local_size_args); 916 return true; 917 } 918 919 // Check that the sizes of the local memory are specified by integers. 920 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 921 TotalNumArgs - 1); 922 } 923 924 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 925 /// overload formats specified in Table 6.13.17.1. 926 /// int enqueue_kernel(queue_t queue, 927 /// kernel_enqueue_flags_t flags, 928 /// const ndrange_t ndrange, 929 /// void (^block)(void)) 930 /// int enqueue_kernel(queue_t queue, 931 /// kernel_enqueue_flags_t flags, 932 /// const ndrange_t ndrange, 933 /// uint num_events_in_wait_list, 934 /// clk_event_t *event_wait_list, 935 /// clk_event_t *event_ret, 936 /// void (^block)(void)) 937 /// int enqueue_kernel(queue_t queue, 938 /// kernel_enqueue_flags_t flags, 939 /// const ndrange_t ndrange, 940 /// void (^block)(local void*, ...), 941 /// uint size0, ...) 942 /// int enqueue_kernel(queue_t queue, 943 /// kernel_enqueue_flags_t flags, 944 /// const ndrange_t ndrange, 945 /// uint num_events_in_wait_list, 946 /// clk_event_t *event_wait_list, 947 /// clk_event_t *event_ret, 948 /// void (^block)(local void*, ...), 949 /// uint size0, ...) 950 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 951 unsigned NumArgs = TheCall->getNumArgs(); 952 953 if (NumArgs < 4) { 954 S.Diag(TheCall->getBeginLoc(), 955 diag::err_typecheck_call_too_few_args_at_least) 956 << 0 << 4 << NumArgs; 957 return true; 958 } 959 960 Expr *Arg0 = TheCall->getArg(0); 961 Expr *Arg1 = TheCall->getArg(1); 962 Expr *Arg2 = TheCall->getArg(2); 963 Expr *Arg3 = TheCall->getArg(3); 964 965 // First argument always needs to be a queue_t type. 966 if (!Arg0->getType()->isQueueT()) { 967 S.Diag(TheCall->getArg(0)->getBeginLoc(), 968 diag::err_opencl_builtin_expected_type) 969 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 970 return true; 971 } 972 973 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 974 if (!Arg1->getType()->isIntegerType()) { 975 S.Diag(TheCall->getArg(1)->getBeginLoc(), 976 diag::err_opencl_builtin_expected_type) 977 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 978 return true; 979 } 980 981 // Third argument is always an ndrange_t type. 982 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 983 S.Diag(TheCall->getArg(2)->getBeginLoc(), 984 diag::err_opencl_builtin_expected_type) 985 << TheCall->getDirectCallee() << "'ndrange_t'"; 986 return true; 987 } 988 989 // With four arguments, there is only one form that the function could be 990 // called in: no events and no variable arguments. 991 if (NumArgs == 4) { 992 // check that the last argument is the right block type. 993 if (!isBlockPointer(Arg3)) { 994 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 995 << TheCall->getDirectCallee() << "block"; 996 return true; 997 } 998 // we have a block type, check the prototype 999 const BlockPointerType *BPT = 1000 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 1001 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) { 1002 S.Diag(Arg3->getBeginLoc(), 1003 diag::err_opencl_enqueue_kernel_blocks_no_args); 1004 return true; 1005 } 1006 return false; 1007 } 1008 // we can have block + varargs. 1009 if (isBlockPointer(Arg3)) 1010 return (checkOpenCLBlockArgs(S, Arg3) || 1011 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 1012 // last two cases with either exactly 7 args or 7 args and varargs. 1013 if (NumArgs >= 7) { 1014 // check common block argument. 1015 Expr *Arg6 = TheCall->getArg(6); 1016 if (!isBlockPointer(Arg6)) { 1017 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1018 << TheCall->getDirectCallee() << "block"; 1019 return true; 1020 } 1021 if (checkOpenCLBlockArgs(S, Arg6)) 1022 return true; 1023 1024 // Forth argument has to be any integer type. 1025 if (!Arg3->getType()->isIntegerType()) { 1026 S.Diag(TheCall->getArg(3)->getBeginLoc(), 1027 diag::err_opencl_builtin_expected_type) 1028 << TheCall->getDirectCallee() << "integer"; 1029 return true; 1030 } 1031 // check remaining common arguments. 1032 Expr *Arg4 = TheCall->getArg(4); 1033 Expr *Arg5 = TheCall->getArg(5); 1034 1035 // Fifth argument is always passed as a pointer to clk_event_t. 1036 if (!Arg4->isNullPointerConstant(S.Context, 1037 Expr::NPC_ValueDependentIsNotNull) && 1038 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 1039 S.Diag(TheCall->getArg(4)->getBeginLoc(), 1040 diag::err_opencl_builtin_expected_type) 1041 << TheCall->getDirectCallee() 1042 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1043 return true; 1044 } 1045 1046 // Sixth argument is always passed as a pointer to clk_event_t. 1047 if (!Arg5->isNullPointerConstant(S.Context, 1048 Expr::NPC_ValueDependentIsNotNull) && 1049 !(Arg5->getType()->isPointerType() && 1050 Arg5->getType()->getPointeeType()->isClkEventT())) { 1051 S.Diag(TheCall->getArg(5)->getBeginLoc(), 1052 diag::err_opencl_builtin_expected_type) 1053 << TheCall->getDirectCallee() 1054 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1055 return true; 1056 } 1057 1058 if (NumArgs == 7) 1059 return false; 1060 1061 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 1062 } 1063 1064 // None of the specific case has been detected, give generic error 1065 S.Diag(TheCall->getBeginLoc(), 1066 diag::err_opencl_enqueue_kernel_incorrect_args); 1067 return true; 1068 } 1069 1070 /// Returns OpenCL access qual. 1071 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 1072 return D->getAttr<OpenCLAccessAttr>(); 1073 } 1074 1075 /// Returns true if pipe element type is different from the pointer. 1076 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 1077 const Expr *Arg0 = Call->getArg(0); 1078 // First argument type should always be pipe. 1079 if (!Arg0->getType()->isPipeType()) { 1080 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1081 << Call->getDirectCallee() << Arg0->getSourceRange(); 1082 return true; 1083 } 1084 OpenCLAccessAttr *AccessQual = 1085 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 1086 // Validates the access qualifier is compatible with the call. 1087 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 1088 // read_only and write_only, and assumed to be read_only if no qualifier is 1089 // specified. 1090 switch (Call->getDirectCallee()->getBuiltinID()) { 1091 case Builtin::BIread_pipe: 1092 case Builtin::BIreserve_read_pipe: 1093 case Builtin::BIcommit_read_pipe: 1094 case Builtin::BIwork_group_reserve_read_pipe: 1095 case Builtin::BIsub_group_reserve_read_pipe: 1096 case Builtin::BIwork_group_commit_read_pipe: 1097 case Builtin::BIsub_group_commit_read_pipe: 1098 if (!(!AccessQual || AccessQual->isReadOnly())) { 1099 S.Diag(Arg0->getBeginLoc(), 1100 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1101 << "read_only" << Arg0->getSourceRange(); 1102 return true; 1103 } 1104 break; 1105 case Builtin::BIwrite_pipe: 1106 case Builtin::BIreserve_write_pipe: 1107 case Builtin::BIcommit_write_pipe: 1108 case Builtin::BIwork_group_reserve_write_pipe: 1109 case Builtin::BIsub_group_reserve_write_pipe: 1110 case Builtin::BIwork_group_commit_write_pipe: 1111 case Builtin::BIsub_group_commit_write_pipe: 1112 if (!(AccessQual && AccessQual->isWriteOnly())) { 1113 S.Diag(Arg0->getBeginLoc(), 1114 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1115 << "write_only" << Arg0->getSourceRange(); 1116 return true; 1117 } 1118 break; 1119 default: 1120 break; 1121 } 1122 return false; 1123 } 1124 1125 /// Returns true if pipe element type is different from the pointer. 1126 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 1127 const Expr *Arg0 = Call->getArg(0); 1128 const Expr *ArgIdx = Call->getArg(Idx); 1129 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 1130 const QualType EltTy = PipeTy->getElementType(); 1131 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 1132 // The Idx argument should be a pointer and the type of the pointer and 1133 // the type of pipe element should also be the same. 1134 if (!ArgTy || 1135 !S.Context.hasSameType( 1136 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 1137 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1138 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 1139 << ArgIdx->getType() << ArgIdx->getSourceRange(); 1140 return true; 1141 } 1142 return false; 1143 } 1144 1145 // Performs semantic analysis for the read/write_pipe call. 1146 // \param S Reference to the semantic analyzer. 1147 // \param Call A pointer to the builtin call. 1148 // \return True if a semantic error has been found, false otherwise. 1149 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 1150 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 1151 // functions have two forms. 1152 switch (Call->getNumArgs()) { 1153 case 2: 1154 if (checkOpenCLPipeArg(S, Call)) 1155 return true; 1156 // The call with 2 arguments should be 1157 // read/write_pipe(pipe T, T*). 1158 // Check packet type T. 1159 if (checkOpenCLPipePacketType(S, Call, 1)) 1160 return true; 1161 break; 1162 1163 case 4: { 1164 if (checkOpenCLPipeArg(S, Call)) 1165 return true; 1166 // The call with 4 arguments should be 1167 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 1168 // Check reserve_id_t. 1169 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1170 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1171 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1172 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1173 return true; 1174 } 1175 1176 // Check the index. 1177 const Expr *Arg2 = Call->getArg(2); 1178 if (!Arg2->getType()->isIntegerType() && 1179 !Arg2->getType()->isUnsignedIntegerType()) { 1180 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1181 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1182 << Arg2->getType() << Arg2->getSourceRange(); 1183 return true; 1184 } 1185 1186 // Check packet type T. 1187 if (checkOpenCLPipePacketType(S, Call, 3)) 1188 return true; 1189 } break; 1190 default: 1191 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 1192 << Call->getDirectCallee() << Call->getSourceRange(); 1193 return true; 1194 } 1195 1196 return false; 1197 } 1198 1199 // Performs a semantic analysis on the {work_group_/sub_group_ 1200 // /_}reserve_{read/write}_pipe 1201 // \param S Reference to the semantic analyzer. 1202 // \param Call The call to the builtin function to be analyzed. 1203 // \return True if a semantic error was found, false otherwise. 1204 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 1205 if (checkArgCount(S, Call, 2)) 1206 return true; 1207 1208 if (checkOpenCLPipeArg(S, Call)) 1209 return true; 1210 1211 // Check the reserve size. 1212 if (!Call->getArg(1)->getType()->isIntegerType() && 1213 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 1214 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1215 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1216 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1217 return true; 1218 } 1219 1220 // Since return type of reserve_read/write_pipe built-in function is 1221 // reserve_id_t, which is not defined in the builtin def file , we used int 1222 // as return type and need to override the return type of these functions. 1223 Call->setType(S.Context.OCLReserveIDTy); 1224 1225 return false; 1226 } 1227 1228 // Performs a semantic analysis on {work_group_/sub_group_ 1229 // /_}commit_{read/write}_pipe 1230 // \param S Reference to the semantic analyzer. 1231 // \param Call The call to the builtin function to be analyzed. 1232 // \return True if a semantic error was found, false otherwise. 1233 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 1234 if (checkArgCount(S, Call, 2)) 1235 return true; 1236 1237 if (checkOpenCLPipeArg(S, Call)) 1238 return true; 1239 1240 // Check reserve_id_t. 1241 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1242 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1243 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1244 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1245 return true; 1246 } 1247 1248 return false; 1249 } 1250 1251 // Performs a semantic analysis on the call to built-in Pipe 1252 // Query Functions. 1253 // \param S Reference to the semantic analyzer. 1254 // \param Call The call to the builtin function to be analyzed. 1255 // \return True if a semantic error was found, false otherwise. 1256 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 1257 if (checkArgCount(S, Call, 1)) 1258 return true; 1259 1260 if (!Call->getArg(0)->getType()->isPipeType()) { 1261 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1262 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 1263 return true; 1264 } 1265 1266 return false; 1267 } 1268 1269 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. 1270 // Performs semantic analysis for the to_global/local/private call. 1271 // \param S Reference to the semantic analyzer. 1272 // \param BuiltinID ID of the builtin function. 1273 // \param Call A pointer to the builtin call. 1274 // \return True if a semantic error has been found, false otherwise. 1275 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 1276 CallExpr *Call) { 1277 if (checkArgCount(S, Call, 1)) 1278 return true; 1279 1280 auto RT = Call->getArg(0)->getType(); 1281 if (!RT->isPointerType() || RT->getPointeeType() 1282 .getAddressSpace() == LangAS::opencl_constant) { 1283 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 1284 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 1285 return true; 1286 } 1287 1288 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 1289 S.Diag(Call->getArg(0)->getBeginLoc(), 1290 diag::warn_opencl_generic_address_space_arg) 1291 << Call->getDirectCallee()->getNameInfo().getAsString() 1292 << Call->getArg(0)->getSourceRange(); 1293 } 1294 1295 RT = RT->getPointeeType(); 1296 auto Qual = RT.getQualifiers(); 1297 switch (BuiltinID) { 1298 case Builtin::BIto_global: 1299 Qual.setAddressSpace(LangAS::opencl_global); 1300 break; 1301 case Builtin::BIto_local: 1302 Qual.setAddressSpace(LangAS::opencl_local); 1303 break; 1304 case Builtin::BIto_private: 1305 Qual.setAddressSpace(LangAS::opencl_private); 1306 break; 1307 default: 1308 llvm_unreachable("Invalid builtin function"); 1309 } 1310 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 1311 RT.getUnqualifiedType(), Qual))); 1312 1313 return false; 1314 } 1315 1316 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { 1317 if (checkArgCount(S, TheCall, 1)) 1318 return ExprError(); 1319 1320 // Compute __builtin_launder's parameter type from the argument. 1321 // The parameter type is: 1322 // * The type of the argument if it's not an array or function type, 1323 // Otherwise, 1324 // * The decayed argument type. 1325 QualType ParamTy = [&]() { 1326 QualType ArgTy = TheCall->getArg(0)->getType(); 1327 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe()) 1328 return S.Context.getPointerType(Ty->getElementType()); 1329 if (ArgTy->isFunctionType()) { 1330 return S.Context.getPointerType(ArgTy); 1331 } 1332 return ArgTy; 1333 }(); 1334 1335 TheCall->setType(ParamTy); 1336 1337 auto DiagSelect = [&]() -> llvm::Optional<unsigned> { 1338 if (!ParamTy->isPointerType()) 1339 return 0; 1340 if (ParamTy->isFunctionPointerType()) 1341 return 1; 1342 if (ParamTy->isVoidPointerType()) 1343 return 2; 1344 return llvm::Optional<unsigned>{}; 1345 }(); 1346 if (DiagSelect.hasValue()) { 1347 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) 1348 << DiagSelect.getValue() << TheCall->getSourceRange(); 1349 return ExprError(); 1350 } 1351 1352 // We either have an incomplete class type, or we have a class template 1353 // whose instantiation has not been forced. Example: 1354 // 1355 // template <class T> struct Foo { T value; }; 1356 // Foo<int> *p = nullptr; 1357 // auto *d = __builtin_launder(p); 1358 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(), 1359 diag::err_incomplete_type)) 1360 return ExprError(); 1361 1362 assert(ParamTy->getPointeeType()->isObjectType() && 1363 "Unhandled non-object pointer case"); 1364 1365 InitializedEntity Entity = 1366 InitializedEntity::InitializeParameter(S.Context, ParamTy, false); 1367 ExprResult Arg = 1368 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0)); 1369 if (Arg.isInvalid()) 1370 return ExprError(); 1371 TheCall->setArg(0, Arg.get()); 1372 1373 return TheCall; 1374 } 1375 1376 // Emit an error and return true if the current architecture is not in the list 1377 // of supported architectures. 1378 static bool 1379 CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1380 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 1381 llvm::Triple::ArchType CurArch = 1382 S.getASTContext().getTargetInfo().getTriple().getArch(); 1383 if (llvm::is_contained(SupportedArchs, CurArch)) 1384 return false; 1385 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1386 << TheCall->getSourceRange(); 1387 return true; 1388 } 1389 1390 static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr, 1391 SourceLocation CallSiteLoc); 1392 1393 bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 1394 CallExpr *TheCall) { 1395 switch (TI.getTriple().getArch()) { 1396 default: 1397 // Some builtins don't require additional checking, so just consider these 1398 // acceptable. 1399 return false; 1400 case llvm::Triple::arm: 1401 case llvm::Triple::armeb: 1402 case llvm::Triple::thumb: 1403 case llvm::Triple::thumbeb: 1404 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall); 1405 case llvm::Triple::aarch64: 1406 case llvm::Triple::aarch64_32: 1407 case llvm::Triple::aarch64_be: 1408 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall); 1409 case llvm::Triple::bpfeb: 1410 case llvm::Triple::bpfel: 1411 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall); 1412 case llvm::Triple::hexagon: 1413 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall); 1414 case llvm::Triple::mips: 1415 case llvm::Triple::mipsel: 1416 case llvm::Triple::mips64: 1417 case llvm::Triple::mips64el: 1418 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall); 1419 case llvm::Triple::systemz: 1420 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall); 1421 case llvm::Triple::x86: 1422 case llvm::Triple::x86_64: 1423 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall); 1424 case llvm::Triple::ppc: 1425 case llvm::Triple::ppc64: 1426 case llvm::Triple::ppc64le: 1427 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall); 1428 case llvm::Triple::amdgcn: 1429 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall); 1430 } 1431 } 1432 1433 ExprResult 1434 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 1435 CallExpr *TheCall) { 1436 ExprResult TheCallResult(TheCall); 1437 1438 // Find out if any arguments are required to be integer constant expressions. 1439 unsigned ICEArguments = 0; 1440 ASTContext::GetBuiltinTypeError Error; 1441 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 1442 if (Error != ASTContext::GE_None) 1443 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 1444 1445 // If any arguments are required to be ICE's, check and diagnose. 1446 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 1447 // Skip arguments not required to be ICE's. 1448 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 1449 1450 llvm::APSInt Result; 1451 if (SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 1452 return true; 1453 ICEArguments &= ~(1 << ArgNo); 1454 } 1455 1456 switch (BuiltinID) { 1457 case Builtin::BI__builtin___CFStringMakeConstantString: 1458 assert(TheCall->getNumArgs() == 1 && 1459 "Wrong # arguments to builtin CFStringMakeConstantString"); 1460 if (CheckObjCString(TheCall->getArg(0))) 1461 return ExprError(); 1462 break; 1463 case Builtin::BI__builtin_ms_va_start: 1464 case Builtin::BI__builtin_stdarg_start: 1465 case Builtin::BI__builtin_va_start: 1466 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1467 return ExprError(); 1468 break; 1469 case Builtin::BI__va_start: { 1470 switch (Context.getTargetInfo().getTriple().getArch()) { 1471 case llvm::Triple::aarch64: 1472 case llvm::Triple::arm: 1473 case llvm::Triple::thumb: 1474 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 1475 return ExprError(); 1476 break; 1477 default: 1478 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1479 return ExprError(); 1480 break; 1481 } 1482 break; 1483 } 1484 1485 // The acquire, release, and no fence variants are ARM and AArch64 only. 1486 case Builtin::BI_interlockedbittestandset_acq: 1487 case Builtin::BI_interlockedbittestandset_rel: 1488 case Builtin::BI_interlockedbittestandset_nf: 1489 case Builtin::BI_interlockedbittestandreset_acq: 1490 case Builtin::BI_interlockedbittestandreset_rel: 1491 case Builtin::BI_interlockedbittestandreset_nf: 1492 if (CheckBuiltinTargetSupport( 1493 *this, BuiltinID, TheCall, 1494 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 1495 return ExprError(); 1496 break; 1497 1498 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 1499 case Builtin::BI_bittest64: 1500 case Builtin::BI_bittestandcomplement64: 1501 case Builtin::BI_bittestandreset64: 1502 case Builtin::BI_bittestandset64: 1503 case Builtin::BI_interlockedbittestandreset64: 1504 case Builtin::BI_interlockedbittestandset64: 1505 if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall, 1506 {llvm::Triple::x86_64, llvm::Triple::arm, 1507 llvm::Triple::thumb, llvm::Triple::aarch64})) 1508 return ExprError(); 1509 break; 1510 1511 case Builtin::BI__builtin_isgreater: 1512 case Builtin::BI__builtin_isgreaterequal: 1513 case Builtin::BI__builtin_isless: 1514 case Builtin::BI__builtin_islessequal: 1515 case Builtin::BI__builtin_islessgreater: 1516 case Builtin::BI__builtin_isunordered: 1517 if (SemaBuiltinUnorderedCompare(TheCall)) 1518 return ExprError(); 1519 break; 1520 case Builtin::BI__builtin_fpclassify: 1521 if (SemaBuiltinFPClassification(TheCall, 6)) 1522 return ExprError(); 1523 break; 1524 case Builtin::BI__builtin_isfinite: 1525 case Builtin::BI__builtin_isinf: 1526 case Builtin::BI__builtin_isinf_sign: 1527 case Builtin::BI__builtin_isnan: 1528 case Builtin::BI__builtin_isnormal: 1529 case Builtin::BI__builtin_signbit: 1530 case Builtin::BI__builtin_signbitf: 1531 case Builtin::BI__builtin_signbitl: 1532 if (SemaBuiltinFPClassification(TheCall, 1)) 1533 return ExprError(); 1534 break; 1535 case Builtin::BI__builtin_shufflevector: 1536 return SemaBuiltinShuffleVector(TheCall); 1537 // TheCall will be freed by the smart pointer here, but that's fine, since 1538 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 1539 case Builtin::BI__builtin_prefetch: 1540 if (SemaBuiltinPrefetch(TheCall)) 1541 return ExprError(); 1542 break; 1543 case Builtin::BI__builtin_alloca_with_align: 1544 if (SemaBuiltinAllocaWithAlign(TheCall)) 1545 return ExprError(); 1546 LLVM_FALLTHROUGH; 1547 case Builtin::BI__builtin_alloca: 1548 Diag(TheCall->getBeginLoc(), diag::warn_alloca) 1549 << TheCall->getDirectCallee(); 1550 break; 1551 case Builtin::BI__assume: 1552 case Builtin::BI__builtin_assume: 1553 if (SemaBuiltinAssume(TheCall)) 1554 return ExprError(); 1555 break; 1556 case Builtin::BI__builtin_assume_aligned: 1557 if (SemaBuiltinAssumeAligned(TheCall)) 1558 return ExprError(); 1559 break; 1560 case Builtin::BI__builtin_dynamic_object_size: 1561 case Builtin::BI__builtin_object_size: 1562 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 1563 return ExprError(); 1564 break; 1565 case Builtin::BI__builtin_longjmp: 1566 if (SemaBuiltinLongjmp(TheCall)) 1567 return ExprError(); 1568 break; 1569 case Builtin::BI__builtin_setjmp: 1570 if (SemaBuiltinSetjmp(TheCall)) 1571 return ExprError(); 1572 break; 1573 case Builtin::BI__builtin_classify_type: 1574 if (checkArgCount(*this, TheCall, 1)) return true; 1575 TheCall->setType(Context.IntTy); 1576 break; 1577 case Builtin::BI__builtin_complex: 1578 if (SemaBuiltinComplex(TheCall)) 1579 return ExprError(); 1580 break; 1581 case Builtin::BI__builtin_constant_p: { 1582 if (checkArgCount(*this, TheCall, 1)) return true; 1583 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 1584 if (Arg.isInvalid()) return true; 1585 TheCall->setArg(0, Arg.get()); 1586 TheCall->setType(Context.IntTy); 1587 break; 1588 } 1589 case Builtin::BI__builtin_launder: 1590 return SemaBuiltinLaunder(*this, TheCall); 1591 case Builtin::BI__sync_fetch_and_add: 1592 case Builtin::BI__sync_fetch_and_add_1: 1593 case Builtin::BI__sync_fetch_and_add_2: 1594 case Builtin::BI__sync_fetch_and_add_4: 1595 case Builtin::BI__sync_fetch_and_add_8: 1596 case Builtin::BI__sync_fetch_and_add_16: 1597 case Builtin::BI__sync_fetch_and_sub: 1598 case Builtin::BI__sync_fetch_and_sub_1: 1599 case Builtin::BI__sync_fetch_and_sub_2: 1600 case Builtin::BI__sync_fetch_and_sub_4: 1601 case Builtin::BI__sync_fetch_and_sub_8: 1602 case Builtin::BI__sync_fetch_and_sub_16: 1603 case Builtin::BI__sync_fetch_and_or: 1604 case Builtin::BI__sync_fetch_and_or_1: 1605 case Builtin::BI__sync_fetch_and_or_2: 1606 case Builtin::BI__sync_fetch_and_or_4: 1607 case Builtin::BI__sync_fetch_and_or_8: 1608 case Builtin::BI__sync_fetch_and_or_16: 1609 case Builtin::BI__sync_fetch_and_and: 1610 case Builtin::BI__sync_fetch_and_and_1: 1611 case Builtin::BI__sync_fetch_and_and_2: 1612 case Builtin::BI__sync_fetch_and_and_4: 1613 case Builtin::BI__sync_fetch_and_and_8: 1614 case Builtin::BI__sync_fetch_and_and_16: 1615 case Builtin::BI__sync_fetch_and_xor: 1616 case Builtin::BI__sync_fetch_and_xor_1: 1617 case Builtin::BI__sync_fetch_and_xor_2: 1618 case Builtin::BI__sync_fetch_and_xor_4: 1619 case Builtin::BI__sync_fetch_and_xor_8: 1620 case Builtin::BI__sync_fetch_and_xor_16: 1621 case Builtin::BI__sync_fetch_and_nand: 1622 case Builtin::BI__sync_fetch_and_nand_1: 1623 case Builtin::BI__sync_fetch_and_nand_2: 1624 case Builtin::BI__sync_fetch_and_nand_4: 1625 case Builtin::BI__sync_fetch_and_nand_8: 1626 case Builtin::BI__sync_fetch_and_nand_16: 1627 case Builtin::BI__sync_add_and_fetch: 1628 case Builtin::BI__sync_add_and_fetch_1: 1629 case Builtin::BI__sync_add_and_fetch_2: 1630 case Builtin::BI__sync_add_and_fetch_4: 1631 case Builtin::BI__sync_add_and_fetch_8: 1632 case Builtin::BI__sync_add_and_fetch_16: 1633 case Builtin::BI__sync_sub_and_fetch: 1634 case Builtin::BI__sync_sub_and_fetch_1: 1635 case Builtin::BI__sync_sub_and_fetch_2: 1636 case Builtin::BI__sync_sub_and_fetch_4: 1637 case Builtin::BI__sync_sub_and_fetch_8: 1638 case Builtin::BI__sync_sub_and_fetch_16: 1639 case Builtin::BI__sync_and_and_fetch: 1640 case Builtin::BI__sync_and_and_fetch_1: 1641 case Builtin::BI__sync_and_and_fetch_2: 1642 case Builtin::BI__sync_and_and_fetch_4: 1643 case Builtin::BI__sync_and_and_fetch_8: 1644 case Builtin::BI__sync_and_and_fetch_16: 1645 case Builtin::BI__sync_or_and_fetch: 1646 case Builtin::BI__sync_or_and_fetch_1: 1647 case Builtin::BI__sync_or_and_fetch_2: 1648 case Builtin::BI__sync_or_and_fetch_4: 1649 case Builtin::BI__sync_or_and_fetch_8: 1650 case Builtin::BI__sync_or_and_fetch_16: 1651 case Builtin::BI__sync_xor_and_fetch: 1652 case Builtin::BI__sync_xor_and_fetch_1: 1653 case Builtin::BI__sync_xor_and_fetch_2: 1654 case Builtin::BI__sync_xor_and_fetch_4: 1655 case Builtin::BI__sync_xor_and_fetch_8: 1656 case Builtin::BI__sync_xor_and_fetch_16: 1657 case Builtin::BI__sync_nand_and_fetch: 1658 case Builtin::BI__sync_nand_and_fetch_1: 1659 case Builtin::BI__sync_nand_and_fetch_2: 1660 case Builtin::BI__sync_nand_and_fetch_4: 1661 case Builtin::BI__sync_nand_and_fetch_8: 1662 case Builtin::BI__sync_nand_and_fetch_16: 1663 case Builtin::BI__sync_val_compare_and_swap: 1664 case Builtin::BI__sync_val_compare_and_swap_1: 1665 case Builtin::BI__sync_val_compare_and_swap_2: 1666 case Builtin::BI__sync_val_compare_and_swap_4: 1667 case Builtin::BI__sync_val_compare_and_swap_8: 1668 case Builtin::BI__sync_val_compare_and_swap_16: 1669 case Builtin::BI__sync_bool_compare_and_swap: 1670 case Builtin::BI__sync_bool_compare_and_swap_1: 1671 case Builtin::BI__sync_bool_compare_and_swap_2: 1672 case Builtin::BI__sync_bool_compare_and_swap_4: 1673 case Builtin::BI__sync_bool_compare_and_swap_8: 1674 case Builtin::BI__sync_bool_compare_and_swap_16: 1675 case Builtin::BI__sync_lock_test_and_set: 1676 case Builtin::BI__sync_lock_test_and_set_1: 1677 case Builtin::BI__sync_lock_test_and_set_2: 1678 case Builtin::BI__sync_lock_test_and_set_4: 1679 case Builtin::BI__sync_lock_test_and_set_8: 1680 case Builtin::BI__sync_lock_test_and_set_16: 1681 case Builtin::BI__sync_lock_release: 1682 case Builtin::BI__sync_lock_release_1: 1683 case Builtin::BI__sync_lock_release_2: 1684 case Builtin::BI__sync_lock_release_4: 1685 case Builtin::BI__sync_lock_release_8: 1686 case Builtin::BI__sync_lock_release_16: 1687 case Builtin::BI__sync_swap: 1688 case Builtin::BI__sync_swap_1: 1689 case Builtin::BI__sync_swap_2: 1690 case Builtin::BI__sync_swap_4: 1691 case Builtin::BI__sync_swap_8: 1692 case Builtin::BI__sync_swap_16: 1693 return SemaBuiltinAtomicOverloaded(TheCallResult); 1694 case Builtin::BI__sync_synchronize: 1695 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 1696 << TheCall->getCallee()->getSourceRange(); 1697 break; 1698 case Builtin::BI__builtin_nontemporal_load: 1699 case Builtin::BI__builtin_nontemporal_store: 1700 return SemaBuiltinNontemporalOverloaded(TheCallResult); 1701 case Builtin::BI__builtin_memcpy_inline: { 1702 clang::Expr *SizeOp = TheCall->getArg(2); 1703 // We warn about copying to or from `nullptr` pointers when `size` is 1704 // greater than 0. When `size` is value dependent we cannot evaluate its 1705 // value so we bail out. 1706 if (SizeOp->isValueDependent()) 1707 break; 1708 if (!SizeOp->EvaluateKnownConstInt(Context).isNullValue()) { 1709 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 1710 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc()); 1711 } 1712 break; 1713 } 1714 #define BUILTIN(ID, TYPE, ATTRS) 1715 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 1716 case Builtin::BI##ID: \ 1717 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 1718 #include "clang/Basic/Builtins.def" 1719 case Builtin::BI__annotation: 1720 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 1721 return ExprError(); 1722 break; 1723 case Builtin::BI__builtin_annotation: 1724 if (SemaBuiltinAnnotation(*this, TheCall)) 1725 return ExprError(); 1726 break; 1727 case Builtin::BI__builtin_addressof: 1728 if (SemaBuiltinAddressof(*this, TheCall)) 1729 return ExprError(); 1730 break; 1731 case Builtin::BI__builtin_is_aligned: 1732 case Builtin::BI__builtin_align_up: 1733 case Builtin::BI__builtin_align_down: 1734 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID)) 1735 return ExprError(); 1736 break; 1737 case Builtin::BI__builtin_add_overflow: 1738 case Builtin::BI__builtin_sub_overflow: 1739 case Builtin::BI__builtin_mul_overflow: 1740 if (SemaBuiltinOverflow(*this, TheCall, BuiltinID)) 1741 return ExprError(); 1742 break; 1743 case Builtin::BI__builtin_operator_new: 1744 case Builtin::BI__builtin_operator_delete: { 1745 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 1746 ExprResult Res = 1747 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 1748 if (Res.isInvalid()) 1749 CorrectDelayedTyposInExpr(TheCallResult.get()); 1750 return Res; 1751 } 1752 case Builtin::BI__builtin_dump_struct: { 1753 // We first want to ensure we are called with 2 arguments 1754 if (checkArgCount(*this, TheCall, 2)) 1755 return ExprError(); 1756 // Ensure that the first argument is of type 'struct XX *' 1757 const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts(); 1758 const QualType PtrArgType = PtrArg->getType(); 1759 if (!PtrArgType->isPointerType() || 1760 !PtrArgType->getPointeeType()->isRecordType()) { 1761 Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1762 << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType 1763 << "structure pointer"; 1764 return ExprError(); 1765 } 1766 1767 // Ensure that the second argument is of type 'FunctionType' 1768 const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts(); 1769 const QualType FnPtrArgType = FnPtrArg->getType(); 1770 if (!FnPtrArgType->isPointerType()) { 1771 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1772 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1773 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1774 return ExprError(); 1775 } 1776 1777 const auto *FuncType = 1778 FnPtrArgType->getPointeeType()->getAs<FunctionType>(); 1779 1780 if (!FuncType) { 1781 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1782 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1783 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1784 return ExprError(); 1785 } 1786 1787 if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) { 1788 if (!FT->getNumParams()) { 1789 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1790 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1791 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1792 return ExprError(); 1793 } 1794 QualType PT = FT->getParamType(0); 1795 if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy || 1796 !PT->isPointerType() || !PT->getPointeeType()->isCharType() || 1797 !PT->getPointeeType().isConstQualified()) { 1798 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1799 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1800 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1801 return ExprError(); 1802 } 1803 } 1804 1805 TheCall->setType(Context.IntTy); 1806 break; 1807 } 1808 case Builtin::BI__builtin_expect_with_probability: { 1809 // We first want to ensure we are called with 3 arguments 1810 if (checkArgCount(*this, TheCall, 3)) 1811 return ExprError(); 1812 // then check probability is constant float in range [0.0, 1.0] 1813 const Expr *ProbArg = TheCall->getArg(2); 1814 SmallVector<PartialDiagnosticAt, 8> Notes; 1815 Expr::EvalResult Eval; 1816 Eval.Diag = &Notes; 1817 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Expr::EvaluateForCodeGen, 1818 Context)) || 1819 !Eval.Val.isFloat()) { 1820 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float) 1821 << ProbArg->getSourceRange(); 1822 for (const PartialDiagnosticAt &PDiag : Notes) 1823 Diag(PDiag.first, PDiag.second); 1824 return ExprError(); 1825 } 1826 llvm::APFloat Probability = Eval.Val.getFloat(); 1827 bool LoseInfo = false; 1828 Probability.convert(llvm::APFloat::IEEEdouble(), 1829 llvm::RoundingMode::Dynamic, &LoseInfo); 1830 if (!(Probability >= llvm::APFloat(0.0) && 1831 Probability <= llvm::APFloat(1.0))) { 1832 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range) 1833 << ProbArg->getSourceRange(); 1834 return ExprError(); 1835 } 1836 break; 1837 } 1838 case Builtin::BI__builtin_preserve_access_index: 1839 if (SemaBuiltinPreserveAI(*this, TheCall)) 1840 return ExprError(); 1841 break; 1842 case Builtin::BI__builtin_call_with_static_chain: 1843 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 1844 return ExprError(); 1845 break; 1846 case Builtin::BI__exception_code: 1847 case Builtin::BI_exception_code: 1848 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 1849 diag::err_seh___except_block)) 1850 return ExprError(); 1851 break; 1852 case Builtin::BI__exception_info: 1853 case Builtin::BI_exception_info: 1854 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 1855 diag::err_seh___except_filter)) 1856 return ExprError(); 1857 break; 1858 case Builtin::BI__GetExceptionInfo: 1859 if (checkArgCount(*this, TheCall, 1)) 1860 return ExprError(); 1861 1862 if (CheckCXXThrowOperand( 1863 TheCall->getBeginLoc(), 1864 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 1865 TheCall)) 1866 return ExprError(); 1867 1868 TheCall->setType(Context.VoidPtrTy); 1869 break; 1870 // OpenCL v2.0, s6.13.16 - Pipe functions 1871 case Builtin::BIread_pipe: 1872 case Builtin::BIwrite_pipe: 1873 // Since those two functions are declared with var args, we need a semantic 1874 // check for the argument. 1875 if (SemaBuiltinRWPipe(*this, TheCall)) 1876 return ExprError(); 1877 break; 1878 case Builtin::BIreserve_read_pipe: 1879 case Builtin::BIreserve_write_pipe: 1880 case Builtin::BIwork_group_reserve_read_pipe: 1881 case Builtin::BIwork_group_reserve_write_pipe: 1882 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 1883 return ExprError(); 1884 break; 1885 case Builtin::BIsub_group_reserve_read_pipe: 1886 case Builtin::BIsub_group_reserve_write_pipe: 1887 if (checkOpenCLSubgroupExt(*this, TheCall) || 1888 SemaBuiltinReserveRWPipe(*this, TheCall)) 1889 return ExprError(); 1890 break; 1891 case Builtin::BIcommit_read_pipe: 1892 case Builtin::BIcommit_write_pipe: 1893 case Builtin::BIwork_group_commit_read_pipe: 1894 case Builtin::BIwork_group_commit_write_pipe: 1895 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 1896 return ExprError(); 1897 break; 1898 case Builtin::BIsub_group_commit_read_pipe: 1899 case Builtin::BIsub_group_commit_write_pipe: 1900 if (checkOpenCLSubgroupExt(*this, TheCall) || 1901 SemaBuiltinCommitRWPipe(*this, TheCall)) 1902 return ExprError(); 1903 break; 1904 case Builtin::BIget_pipe_num_packets: 1905 case Builtin::BIget_pipe_max_packets: 1906 if (SemaBuiltinPipePackets(*this, TheCall)) 1907 return ExprError(); 1908 break; 1909 case Builtin::BIto_global: 1910 case Builtin::BIto_local: 1911 case Builtin::BIto_private: 1912 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 1913 return ExprError(); 1914 break; 1915 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 1916 case Builtin::BIenqueue_kernel: 1917 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 1918 return ExprError(); 1919 break; 1920 case Builtin::BIget_kernel_work_group_size: 1921 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 1922 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 1923 return ExprError(); 1924 break; 1925 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 1926 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 1927 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 1928 return ExprError(); 1929 break; 1930 case Builtin::BI__builtin_os_log_format: 1931 Cleanup.setExprNeedsCleanups(true); 1932 LLVM_FALLTHROUGH; 1933 case Builtin::BI__builtin_os_log_format_buffer_size: 1934 if (SemaBuiltinOSLogFormat(TheCall)) 1935 return ExprError(); 1936 break; 1937 case Builtin::BI__builtin_frame_address: 1938 case Builtin::BI__builtin_return_address: { 1939 if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF)) 1940 return ExprError(); 1941 1942 // -Wframe-address warning if non-zero passed to builtin 1943 // return/frame address. 1944 Expr::EvalResult Result; 1945 if (TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) && 1946 Result.Val.getInt() != 0) 1947 Diag(TheCall->getBeginLoc(), diag::warn_frame_address) 1948 << ((BuiltinID == Builtin::BI__builtin_return_address) 1949 ? "__builtin_return_address" 1950 : "__builtin_frame_address") 1951 << TheCall->getSourceRange(); 1952 break; 1953 } 1954 1955 case Builtin::BI__builtin_matrix_transpose: 1956 return SemaBuiltinMatrixTranspose(TheCall, TheCallResult); 1957 1958 case Builtin::BI__builtin_matrix_column_major_load: 1959 return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult); 1960 1961 case Builtin::BI__builtin_matrix_column_major_store: 1962 return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult); 1963 } 1964 1965 // Since the target specific builtins for each arch overlap, only check those 1966 // of the arch we are compiling for. 1967 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 1968 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) { 1969 assert(Context.getAuxTargetInfo() && 1970 "Aux Target Builtin, but not an aux target?"); 1971 1972 if (CheckTSBuiltinFunctionCall( 1973 *Context.getAuxTargetInfo(), 1974 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall)) 1975 return ExprError(); 1976 } else { 1977 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID, 1978 TheCall)) 1979 return ExprError(); 1980 } 1981 } 1982 1983 return TheCallResult; 1984 } 1985 1986 // Get the valid immediate range for the specified NEON type code. 1987 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 1988 NeonTypeFlags Type(t); 1989 int IsQuad = ForceQuad ? true : Type.isQuad(); 1990 switch (Type.getEltType()) { 1991 case NeonTypeFlags::Int8: 1992 case NeonTypeFlags::Poly8: 1993 return shift ? 7 : (8 << IsQuad) - 1; 1994 case NeonTypeFlags::Int16: 1995 case NeonTypeFlags::Poly16: 1996 return shift ? 15 : (4 << IsQuad) - 1; 1997 case NeonTypeFlags::Int32: 1998 return shift ? 31 : (2 << IsQuad) - 1; 1999 case NeonTypeFlags::Int64: 2000 case NeonTypeFlags::Poly64: 2001 return shift ? 63 : (1 << IsQuad) - 1; 2002 case NeonTypeFlags::Poly128: 2003 return shift ? 127 : (1 << IsQuad) - 1; 2004 case NeonTypeFlags::Float16: 2005 assert(!shift && "cannot shift float types!"); 2006 return (4 << IsQuad) - 1; 2007 case NeonTypeFlags::Float32: 2008 assert(!shift && "cannot shift float types!"); 2009 return (2 << IsQuad) - 1; 2010 case NeonTypeFlags::Float64: 2011 assert(!shift && "cannot shift float types!"); 2012 return (1 << IsQuad) - 1; 2013 case NeonTypeFlags::BFloat16: 2014 assert(!shift && "cannot shift float types!"); 2015 return (4 << IsQuad) - 1; 2016 } 2017 llvm_unreachable("Invalid NeonTypeFlag!"); 2018 } 2019 2020 /// getNeonEltType - Return the QualType corresponding to the elements of 2021 /// the vector type specified by the NeonTypeFlags. This is used to check 2022 /// the pointer arguments for Neon load/store intrinsics. 2023 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 2024 bool IsPolyUnsigned, bool IsInt64Long) { 2025 switch (Flags.getEltType()) { 2026 case NeonTypeFlags::Int8: 2027 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 2028 case NeonTypeFlags::Int16: 2029 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 2030 case NeonTypeFlags::Int32: 2031 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 2032 case NeonTypeFlags::Int64: 2033 if (IsInt64Long) 2034 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 2035 else 2036 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 2037 : Context.LongLongTy; 2038 case NeonTypeFlags::Poly8: 2039 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 2040 case NeonTypeFlags::Poly16: 2041 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 2042 case NeonTypeFlags::Poly64: 2043 if (IsInt64Long) 2044 return Context.UnsignedLongTy; 2045 else 2046 return Context.UnsignedLongLongTy; 2047 case NeonTypeFlags::Poly128: 2048 break; 2049 case NeonTypeFlags::Float16: 2050 return Context.HalfTy; 2051 case NeonTypeFlags::Float32: 2052 return Context.FloatTy; 2053 case NeonTypeFlags::Float64: 2054 return Context.DoubleTy; 2055 case NeonTypeFlags::BFloat16: 2056 return Context.BFloat16Ty; 2057 } 2058 llvm_unreachable("Invalid NeonTypeFlag!"); 2059 } 2060 2061 bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2062 // Range check SVE intrinsics that take immediate values. 2063 SmallVector<std::tuple<int,int,int>, 3> ImmChecks; 2064 2065 switch (BuiltinID) { 2066 default: 2067 return false; 2068 #define GET_SVE_IMMEDIATE_CHECK 2069 #include "clang/Basic/arm_sve_sema_rangechecks.inc" 2070 #undef GET_SVE_IMMEDIATE_CHECK 2071 } 2072 2073 // Perform all the immediate checks for this builtin call. 2074 bool HasError = false; 2075 for (auto &I : ImmChecks) { 2076 int ArgNum, CheckTy, ElementSizeInBits; 2077 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I; 2078 2079 typedef bool(*OptionSetCheckFnTy)(int64_t Value); 2080 2081 // Function that checks whether the operand (ArgNum) is an immediate 2082 // that is one of the predefined values. 2083 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm, 2084 int ErrDiag) -> bool { 2085 // We can't check the value of a dependent argument. 2086 Expr *Arg = TheCall->getArg(ArgNum); 2087 if (Arg->isTypeDependent() || Arg->isValueDependent()) 2088 return false; 2089 2090 // Check constant-ness first. 2091 llvm::APSInt Imm; 2092 if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm)) 2093 return true; 2094 2095 if (!CheckImm(Imm.getSExtValue())) 2096 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange(); 2097 return false; 2098 }; 2099 2100 switch ((SVETypeFlags::ImmCheckType)CheckTy) { 2101 case SVETypeFlags::ImmCheck0_31: 2102 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31)) 2103 HasError = true; 2104 break; 2105 case SVETypeFlags::ImmCheck0_13: 2106 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13)) 2107 HasError = true; 2108 break; 2109 case SVETypeFlags::ImmCheck1_16: 2110 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16)) 2111 HasError = true; 2112 break; 2113 case SVETypeFlags::ImmCheck0_7: 2114 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7)) 2115 HasError = true; 2116 break; 2117 case SVETypeFlags::ImmCheckExtract: 2118 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2119 (2048 / ElementSizeInBits) - 1)) 2120 HasError = true; 2121 break; 2122 case SVETypeFlags::ImmCheckShiftRight: 2123 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits)) 2124 HasError = true; 2125 break; 2126 case SVETypeFlags::ImmCheckShiftRightNarrow: 2127 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 2128 ElementSizeInBits / 2)) 2129 HasError = true; 2130 break; 2131 case SVETypeFlags::ImmCheckShiftLeft: 2132 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2133 ElementSizeInBits - 1)) 2134 HasError = true; 2135 break; 2136 case SVETypeFlags::ImmCheckLaneIndex: 2137 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2138 (128 / (1 * ElementSizeInBits)) - 1)) 2139 HasError = true; 2140 break; 2141 case SVETypeFlags::ImmCheckLaneIndexCompRotate: 2142 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2143 (128 / (2 * ElementSizeInBits)) - 1)) 2144 HasError = true; 2145 break; 2146 case SVETypeFlags::ImmCheckLaneIndexDot: 2147 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2148 (128 / (4 * ElementSizeInBits)) - 1)) 2149 HasError = true; 2150 break; 2151 case SVETypeFlags::ImmCheckComplexRot90_270: 2152 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; }, 2153 diag::err_rotation_argument_to_cadd)) 2154 HasError = true; 2155 break; 2156 case SVETypeFlags::ImmCheckComplexRotAll90: 2157 if (CheckImmediateInSet( 2158 [](int64_t V) { 2159 return V == 0 || V == 90 || V == 180 || V == 270; 2160 }, 2161 diag::err_rotation_argument_to_cmla)) 2162 HasError = true; 2163 break; 2164 case SVETypeFlags::ImmCheck0_1: 2165 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1)) 2166 HasError = true; 2167 break; 2168 case SVETypeFlags::ImmCheck0_2: 2169 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2)) 2170 HasError = true; 2171 break; 2172 case SVETypeFlags::ImmCheck0_3: 2173 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3)) 2174 HasError = true; 2175 break; 2176 } 2177 } 2178 2179 return HasError; 2180 } 2181 2182 bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI, 2183 unsigned BuiltinID, CallExpr *TheCall) { 2184 llvm::APSInt Result; 2185 uint64_t mask = 0; 2186 unsigned TV = 0; 2187 int PtrArgNum = -1; 2188 bool HasConstPtr = false; 2189 switch (BuiltinID) { 2190 #define GET_NEON_OVERLOAD_CHECK 2191 #include "clang/Basic/arm_neon.inc" 2192 #include "clang/Basic/arm_fp16.inc" 2193 #undef GET_NEON_OVERLOAD_CHECK 2194 } 2195 2196 // For NEON intrinsics which are overloaded on vector element type, validate 2197 // the immediate which specifies which variant to emit. 2198 unsigned ImmArg = TheCall->getNumArgs()-1; 2199 if (mask) { 2200 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 2201 return true; 2202 2203 TV = Result.getLimitedValue(64); 2204 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 2205 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 2206 << TheCall->getArg(ImmArg)->getSourceRange(); 2207 } 2208 2209 if (PtrArgNum >= 0) { 2210 // Check that pointer arguments have the specified type. 2211 Expr *Arg = TheCall->getArg(PtrArgNum); 2212 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 2213 Arg = ICE->getSubExpr(); 2214 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 2215 QualType RHSTy = RHS.get()->getType(); 2216 2217 llvm::Triple::ArchType Arch = TI.getTriple().getArch(); 2218 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 2219 Arch == llvm::Triple::aarch64_32 || 2220 Arch == llvm::Triple::aarch64_be; 2221 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong; 2222 QualType EltTy = 2223 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 2224 if (HasConstPtr) 2225 EltTy = EltTy.withConst(); 2226 QualType LHSTy = Context.getPointerType(EltTy); 2227 AssignConvertType ConvTy; 2228 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 2229 if (RHS.isInvalid()) 2230 return true; 2231 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 2232 RHS.get(), AA_Assigning)) 2233 return true; 2234 } 2235 2236 // For NEON intrinsics which take an immediate value as part of the 2237 // instruction, range check them here. 2238 unsigned i = 0, l = 0, u = 0; 2239 switch (BuiltinID) { 2240 default: 2241 return false; 2242 #define GET_NEON_IMMEDIATE_CHECK 2243 #include "clang/Basic/arm_neon.inc" 2244 #include "clang/Basic/arm_fp16.inc" 2245 #undef GET_NEON_IMMEDIATE_CHECK 2246 } 2247 2248 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2249 } 2250 2251 bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2252 switch (BuiltinID) { 2253 default: 2254 return false; 2255 #include "clang/Basic/arm_mve_builtin_sema.inc" 2256 } 2257 } 2258 2259 bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2260 CallExpr *TheCall) { 2261 bool Err = false; 2262 switch (BuiltinID) { 2263 default: 2264 return false; 2265 #include "clang/Basic/arm_cde_builtin_sema.inc" 2266 } 2267 2268 if (Err) 2269 return true; 2270 2271 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true); 2272 } 2273 2274 bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI, 2275 const Expr *CoprocArg, bool WantCDE) { 2276 if (isConstantEvaluated()) 2277 return false; 2278 2279 // We can't check the value of a dependent argument. 2280 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent()) 2281 return false; 2282 2283 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context); 2284 int64_t CoprocNo = CoprocNoAP.getExtValue(); 2285 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative"); 2286 2287 uint32_t CDECoprocMask = TI.getARMCDECoprocMask(); 2288 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo)); 2289 2290 if (IsCDECoproc != WantCDE) 2291 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc) 2292 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange(); 2293 2294 return false; 2295 } 2296 2297 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 2298 unsigned MaxWidth) { 2299 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 2300 BuiltinID == ARM::BI__builtin_arm_ldaex || 2301 BuiltinID == ARM::BI__builtin_arm_strex || 2302 BuiltinID == ARM::BI__builtin_arm_stlex || 2303 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2304 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2305 BuiltinID == AArch64::BI__builtin_arm_strex || 2306 BuiltinID == AArch64::BI__builtin_arm_stlex) && 2307 "unexpected ARM builtin"); 2308 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 2309 BuiltinID == ARM::BI__builtin_arm_ldaex || 2310 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2311 BuiltinID == AArch64::BI__builtin_arm_ldaex; 2312 2313 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 2314 2315 // Ensure that we have the proper number of arguments. 2316 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 2317 return true; 2318 2319 // Inspect the pointer argument of the atomic builtin. This should always be 2320 // a pointer type, whose element is an integral scalar or pointer type. 2321 // Because it is a pointer type, we don't have to worry about any implicit 2322 // casts here. 2323 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 2324 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 2325 if (PointerArgRes.isInvalid()) 2326 return true; 2327 PointerArg = PointerArgRes.get(); 2328 2329 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 2330 if (!pointerType) { 2331 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 2332 << PointerArg->getType() << PointerArg->getSourceRange(); 2333 return true; 2334 } 2335 2336 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 2337 // task is to insert the appropriate casts into the AST. First work out just 2338 // what the appropriate type is. 2339 QualType ValType = pointerType->getPointeeType(); 2340 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 2341 if (IsLdrex) 2342 AddrType.addConst(); 2343 2344 // Issue a warning if the cast is dodgy. 2345 CastKind CastNeeded = CK_NoOp; 2346 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 2347 CastNeeded = CK_BitCast; 2348 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 2349 << PointerArg->getType() << Context.getPointerType(AddrType) 2350 << AA_Passing << PointerArg->getSourceRange(); 2351 } 2352 2353 // Finally, do the cast and replace the argument with the corrected version. 2354 AddrType = Context.getPointerType(AddrType); 2355 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 2356 if (PointerArgRes.isInvalid()) 2357 return true; 2358 PointerArg = PointerArgRes.get(); 2359 2360 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 2361 2362 // In general, we allow ints, floats and pointers to be loaded and stored. 2363 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 2364 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 2365 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 2366 << PointerArg->getType() << PointerArg->getSourceRange(); 2367 return true; 2368 } 2369 2370 // But ARM doesn't have instructions to deal with 128-bit versions. 2371 if (Context.getTypeSize(ValType) > MaxWidth) { 2372 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 2373 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 2374 << PointerArg->getType() << PointerArg->getSourceRange(); 2375 return true; 2376 } 2377 2378 switch (ValType.getObjCLifetime()) { 2379 case Qualifiers::OCL_None: 2380 case Qualifiers::OCL_ExplicitNone: 2381 // okay 2382 break; 2383 2384 case Qualifiers::OCL_Weak: 2385 case Qualifiers::OCL_Strong: 2386 case Qualifiers::OCL_Autoreleasing: 2387 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 2388 << ValType << PointerArg->getSourceRange(); 2389 return true; 2390 } 2391 2392 if (IsLdrex) { 2393 TheCall->setType(ValType); 2394 return false; 2395 } 2396 2397 // Initialize the argument to be stored. 2398 ExprResult ValArg = TheCall->getArg(0); 2399 InitializedEntity Entity = InitializedEntity::InitializeParameter( 2400 Context, ValType, /*consume*/ false); 2401 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 2402 if (ValArg.isInvalid()) 2403 return true; 2404 TheCall->setArg(0, ValArg.get()); 2405 2406 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 2407 // but the custom checker bypasses all default analysis. 2408 TheCall->setType(Context.IntTy); 2409 return false; 2410 } 2411 2412 bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2413 CallExpr *TheCall) { 2414 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 2415 BuiltinID == ARM::BI__builtin_arm_ldaex || 2416 BuiltinID == ARM::BI__builtin_arm_strex || 2417 BuiltinID == ARM::BI__builtin_arm_stlex) { 2418 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 2419 } 2420 2421 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 2422 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 2423 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 2424 } 2425 2426 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 2427 BuiltinID == ARM::BI__builtin_arm_wsr64) 2428 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 2429 2430 if (BuiltinID == ARM::BI__builtin_arm_rsr || 2431 BuiltinID == ARM::BI__builtin_arm_rsrp || 2432 BuiltinID == ARM::BI__builtin_arm_wsr || 2433 BuiltinID == ARM::BI__builtin_arm_wsrp) 2434 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2435 2436 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2437 return true; 2438 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall)) 2439 return true; 2440 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2441 return true; 2442 2443 // For intrinsics which take an immediate value as part of the instruction, 2444 // range check them here. 2445 // FIXME: VFP Intrinsics should error if VFP not present. 2446 switch (BuiltinID) { 2447 default: return false; 2448 case ARM::BI__builtin_arm_ssat: 2449 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 2450 case ARM::BI__builtin_arm_usat: 2451 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 2452 case ARM::BI__builtin_arm_ssat16: 2453 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 2454 case ARM::BI__builtin_arm_usat16: 2455 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 2456 case ARM::BI__builtin_arm_vcvtr_f: 2457 case ARM::BI__builtin_arm_vcvtr_d: 2458 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 2459 case ARM::BI__builtin_arm_dmb: 2460 case ARM::BI__builtin_arm_dsb: 2461 case ARM::BI__builtin_arm_isb: 2462 case ARM::BI__builtin_arm_dbg: 2463 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 2464 case ARM::BI__builtin_arm_cdp: 2465 case ARM::BI__builtin_arm_cdp2: 2466 case ARM::BI__builtin_arm_mcr: 2467 case ARM::BI__builtin_arm_mcr2: 2468 case ARM::BI__builtin_arm_mrc: 2469 case ARM::BI__builtin_arm_mrc2: 2470 case ARM::BI__builtin_arm_mcrr: 2471 case ARM::BI__builtin_arm_mcrr2: 2472 case ARM::BI__builtin_arm_mrrc: 2473 case ARM::BI__builtin_arm_mrrc2: 2474 case ARM::BI__builtin_arm_ldc: 2475 case ARM::BI__builtin_arm_ldcl: 2476 case ARM::BI__builtin_arm_ldc2: 2477 case ARM::BI__builtin_arm_ldc2l: 2478 case ARM::BI__builtin_arm_stc: 2479 case ARM::BI__builtin_arm_stcl: 2480 case ARM::BI__builtin_arm_stc2: 2481 case ARM::BI__builtin_arm_stc2l: 2482 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) || 2483 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), 2484 /*WantCDE*/ false); 2485 } 2486 } 2487 2488 bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, 2489 unsigned BuiltinID, 2490 CallExpr *TheCall) { 2491 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 2492 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2493 BuiltinID == AArch64::BI__builtin_arm_strex || 2494 BuiltinID == AArch64::BI__builtin_arm_stlex) { 2495 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 2496 } 2497 2498 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 2499 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 2500 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || 2501 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 2502 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 2503 } 2504 2505 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 2506 BuiltinID == AArch64::BI__builtin_arm_wsr64) 2507 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2508 2509 // Memory Tagging Extensions (MTE) Intrinsics 2510 if (BuiltinID == AArch64::BI__builtin_arm_irg || 2511 BuiltinID == AArch64::BI__builtin_arm_addg || 2512 BuiltinID == AArch64::BI__builtin_arm_gmi || 2513 BuiltinID == AArch64::BI__builtin_arm_ldg || 2514 BuiltinID == AArch64::BI__builtin_arm_stg || 2515 BuiltinID == AArch64::BI__builtin_arm_subp) { 2516 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 2517 } 2518 2519 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 2520 BuiltinID == AArch64::BI__builtin_arm_rsrp || 2521 BuiltinID == AArch64::BI__builtin_arm_wsr || 2522 BuiltinID == AArch64::BI__builtin_arm_wsrp) 2523 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2524 2525 // Only check the valid encoding range. Any constant in this range would be 2526 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 2527 // an exception for incorrect registers. This matches MSVC behavior. 2528 if (BuiltinID == AArch64::BI_ReadStatusReg || 2529 BuiltinID == AArch64::BI_WriteStatusReg) 2530 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 2531 2532 if (BuiltinID == AArch64::BI__getReg) 2533 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 2534 2535 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2536 return true; 2537 2538 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall)) 2539 return true; 2540 2541 // For intrinsics which take an immediate value as part of the instruction, 2542 // range check them here. 2543 unsigned i = 0, l = 0, u = 0; 2544 switch (BuiltinID) { 2545 default: return false; 2546 case AArch64::BI__builtin_arm_dmb: 2547 case AArch64::BI__builtin_arm_dsb: 2548 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 2549 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break; 2550 } 2551 2552 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2553 } 2554 2555 static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) { 2556 if (Arg->getType()->getAsPlaceholderType()) 2557 return false; 2558 2559 // The first argument needs to be a record field access. 2560 // If it is an array element access, we delay decision 2561 // to BPF backend to check whether the access is a 2562 // field access or not. 2563 return (Arg->IgnoreParens()->getObjectKind() == OK_BitField || 2564 dyn_cast<MemberExpr>(Arg->IgnoreParens()) || 2565 dyn_cast<ArraySubscriptExpr>(Arg->IgnoreParens())); 2566 } 2567 2568 static bool isEltOfVectorTy(ASTContext &Context, CallExpr *Call, Sema &S, 2569 QualType VectorTy, QualType EltTy) { 2570 QualType VectorEltTy = VectorTy->castAs<VectorType>()->getElementType(); 2571 if (!Context.hasSameType(VectorEltTy, EltTy)) { 2572 S.Diag(Call->getBeginLoc(), diag::err_typecheck_call_different_arg_types) 2573 << Call->getSourceRange() << VectorEltTy << EltTy; 2574 return false; 2575 } 2576 return true; 2577 } 2578 2579 static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) { 2580 QualType ArgType = Arg->getType(); 2581 if (ArgType->getAsPlaceholderType()) 2582 return false; 2583 2584 // for TYPE_EXISTENCE/TYPE_SIZEOF reloc type 2585 // format: 2586 // 1. __builtin_preserve_type_info(*(<type> *)0, flag); 2587 // 2. <type> var; 2588 // __builtin_preserve_type_info(var, flag); 2589 if (!dyn_cast<DeclRefExpr>(Arg->IgnoreParens()) && 2590 !dyn_cast<UnaryOperator>(Arg->IgnoreParens())) 2591 return false; 2592 2593 // Typedef type. 2594 if (ArgType->getAs<TypedefType>()) 2595 return true; 2596 2597 // Record type or Enum type. 2598 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 2599 if (const auto *RT = Ty->getAs<RecordType>()) { 2600 if (!RT->getDecl()->getDeclName().isEmpty()) 2601 return true; 2602 } else if (const auto *ET = Ty->getAs<EnumType>()) { 2603 if (!ET->getDecl()->getDeclName().isEmpty()) 2604 return true; 2605 } 2606 2607 return false; 2608 } 2609 2610 static bool isValidBPFPreserveEnumValueArg(Expr *Arg) { 2611 QualType ArgType = Arg->getType(); 2612 if (ArgType->getAsPlaceholderType()) 2613 return false; 2614 2615 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type 2616 // format: 2617 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>, 2618 // flag); 2619 const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens()); 2620 if (!UO) 2621 return false; 2622 2623 const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr()); 2624 if (!CE || CE->getCastKind() != CK_IntegralToPointer) 2625 return false; 2626 2627 // The integer must be from an EnumConstantDecl. 2628 const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr()); 2629 if (!DR) 2630 return false; 2631 2632 const EnumConstantDecl *Enumerator = 2633 dyn_cast<EnumConstantDecl>(DR->getDecl()); 2634 if (!Enumerator) 2635 return false; 2636 2637 // The type must be EnumType. 2638 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 2639 const auto *ET = Ty->getAs<EnumType>(); 2640 if (!ET) 2641 return false; 2642 2643 // The enum value must be supported. 2644 for (auto *EDI : ET->getDecl()->enumerators()) { 2645 if (EDI == Enumerator) 2646 return true; 2647 } 2648 2649 return false; 2650 } 2651 2652 bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID, 2653 CallExpr *TheCall) { 2654 assert((BuiltinID == BPF::BI__builtin_preserve_field_info || 2655 BuiltinID == BPF::BI__builtin_btf_type_id || 2656 BuiltinID == BPF::BI__builtin_preserve_type_info || 2657 BuiltinID == BPF::BI__builtin_preserve_enum_value) && 2658 "unexpected BPF builtin"); 2659 2660 if (checkArgCount(*this, TheCall, 2)) 2661 return true; 2662 2663 // The second argument needs to be a constant int 2664 Expr *Arg = TheCall->getArg(1); 2665 Optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context); 2666 diag::kind kind; 2667 if (!Value) { 2668 if (BuiltinID == BPF::BI__builtin_preserve_field_info) 2669 kind = diag::err_preserve_field_info_not_const; 2670 else if (BuiltinID == BPF::BI__builtin_btf_type_id) 2671 kind = diag::err_btf_type_id_not_const; 2672 else if (BuiltinID == BPF::BI__builtin_preserve_type_info) 2673 kind = diag::err_preserve_type_info_not_const; 2674 else 2675 kind = diag::err_preserve_enum_value_not_const; 2676 Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange(); 2677 return true; 2678 } 2679 2680 // The first argument 2681 Arg = TheCall->getArg(0); 2682 bool InvalidArg = false; 2683 bool ReturnUnsignedInt = true; 2684 if (BuiltinID == BPF::BI__builtin_preserve_field_info) { 2685 if (!isValidBPFPreserveFieldInfoArg(Arg)) { 2686 InvalidArg = true; 2687 kind = diag::err_preserve_field_info_not_field; 2688 } 2689 } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) { 2690 if (!isValidBPFPreserveTypeInfoArg(Arg)) { 2691 InvalidArg = true; 2692 kind = diag::err_preserve_type_info_invalid; 2693 } 2694 } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) { 2695 if (!isValidBPFPreserveEnumValueArg(Arg)) { 2696 InvalidArg = true; 2697 kind = diag::err_preserve_enum_value_invalid; 2698 } 2699 ReturnUnsignedInt = false; 2700 } 2701 2702 if (InvalidArg) { 2703 Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange(); 2704 return true; 2705 } 2706 2707 if (ReturnUnsignedInt) 2708 TheCall->setType(Context.UnsignedIntTy); 2709 else 2710 TheCall->setType(Context.UnsignedLongTy); 2711 return false; 2712 } 2713 2714 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 2715 struct ArgInfo { 2716 uint8_t OpNum; 2717 bool IsSigned; 2718 uint8_t BitWidth; 2719 uint8_t Align; 2720 }; 2721 struct BuiltinInfo { 2722 unsigned BuiltinID; 2723 ArgInfo Infos[2]; 2724 }; 2725 2726 static BuiltinInfo Infos[] = { 2727 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 2728 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 2729 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 2730 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} }, 2731 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 2732 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 2733 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 2734 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 2735 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 2736 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 2737 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 2738 2739 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 2740 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 2741 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 2742 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 2743 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 2744 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 2745 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 2746 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 2747 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 2748 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 2749 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 2750 2751 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 2752 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 2753 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 2754 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 2755 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 2756 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 2757 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 2758 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 2759 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 2760 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 2761 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 2762 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 2763 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 2764 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 2765 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 2766 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 2767 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 2768 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 2769 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 2770 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 2771 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 2772 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 2773 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 2774 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 2775 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 2776 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 2777 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 2778 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 2779 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 2780 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 2781 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 2782 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 2783 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 2784 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 2785 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 2786 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 2787 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 2788 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 2789 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 2790 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 2791 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 2792 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 2793 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 2794 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 2795 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 2796 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 2797 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 2798 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 2799 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 2800 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 2801 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 2802 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 2803 {{ 1, false, 6, 0 }} }, 2804 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 2805 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 2806 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 2807 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 2808 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 2809 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 2810 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 2811 {{ 1, false, 5, 0 }} }, 2812 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 2813 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 2814 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 2815 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 2816 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 2817 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 2818 { 2, false, 5, 0 }} }, 2819 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 2820 { 2, false, 6, 0 }} }, 2821 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 2822 { 3, false, 5, 0 }} }, 2823 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 2824 { 3, false, 6, 0 }} }, 2825 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 2826 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 2827 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 2828 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 2829 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 2830 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 2831 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 2832 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 2833 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 2834 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 2835 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 2836 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 2837 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 2838 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 2839 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 2840 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 2841 {{ 2, false, 4, 0 }, 2842 { 3, false, 5, 0 }} }, 2843 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 2844 {{ 2, false, 4, 0 }, 2845 { 3, false, 5, 0 }} }, 2846 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 2847 {{ 2, false, 4, 0 }, 2848 { 3, false, 5, 0 }} }, 2849 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 2850 {{ 2, false, 4, 0 }, 2851 { 3, false, 5, 0 }} }, 2852 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 2853 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 2854 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 2855 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 2856 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 2857 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 2858 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 2859 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 2860 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 2861 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 2862 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 2863 { 2, false, 5, 0 }} }, 2864 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 2865 { 2, false, 6, 0 }} }, 2866 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 2867 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 2868 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 2869 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 2870 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 2871 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 2872 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 2873 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 2874 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 2875 {{ 1, false, 4, 0 }} }, 2876 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 2877 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 2878 {{ 1, false, 4, 0 }} }, 2879 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 2880 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 2881 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 2882 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 2883 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 2884 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 2885 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 2886 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 2887 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 2888 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 2889 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 2890 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 2891 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 2892 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 2893 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 2894 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 2895 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 2896 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 2897 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 2898 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 2899 {{ 3, false, 1, 0 }} }, 2900 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 2901 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 2902 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 2903 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 2904 {{ 3, false, 1, 0 }} }, 2905 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 2906 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 2907 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 2908 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 2909 {{ 3, false, 1, 0 }} }, 2910 }; 2911 2912 // Use a dynamically initialized static to sort the table exactly once on 2913 // first run. 2914 static const bool SortOnce = 2915 (llvm::sort(Infos, 2916 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 2917 return LHS.BuiltinID < RHS.BuiltinID; 2918 }), 2919 true); 2920 (void)SortOnce; 2921 2922 const BuiltinInfo *F = llvm::partition_point( 2923 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; }); 2924 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 2925 return false; 2926 2927 bool Error = false; 2928 2929 for (const ArgInfo &A : F->Infos) { 2930 // Ignore empty ArgInfo elements. 2931 if (A.BitWidth == 0) 2932 continue; 2933 2934 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 2935 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 2936 if (!A.Align) { 2937 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 2938 } else { 2939 unsigned M = 1 << A.Align; 2940 Min *= M; 2941 Max *= M; 2942 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max) | 2943 SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 2944 } 2945 } 2946 return Error; 2947 } 2948 2949 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 2950 CallExpr *TheCall) { 2951 return CheckHexagonBuiltinArgument(BuiltinID, TheCall); 2952 } 2953 2954 bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI, 2955 unsigned BuiltinID, CallExpr *TheCall) { 2956 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) || 2957 CheckMipsBuiltinArgument(BuiltinID, TheCall); 2958 } 2959 2960 bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, 2961 CallExpr *TheCall) { 2962 2963 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID && 2964 BuiltinID <= Mips::BI__builtin_mips_lwx) { 2965 if (!TI.hasFeature("dsp")) 2966 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp); 2967 } 2968 2969 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID && 2970 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) { 2971 if (!TI.hasFeature("dspr2")) 2972 return Diag(TheCall->getBeginLoc(), 2973 diag::err_mips_builtin_requires_dspr2); 2974 } 2975 2976 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID && 2977 BuiltinID <= Mips::BI__builtin_msa_xori_b) { 2978 if (!TI.hasFeature("msa")) 2979 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa); 2980 } 2981 2982 return false; 2983 } 2984 2985 // CheckMipsBuiltinArgument - Checks the constant value passed to the 2986 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The 2987 // ordering for DSP is unspecified. MSA is ordered by the data format used 2988 // by the underlying instruction i.e., df/m, df/n and then by size. 2989 // 2990 // FIXME: The size tests here should instead be tablegen'd along with the 2991 // definitions from include/clang/Basic/BuiltinsMips.def. 2992 // FIXME: GCC is strict on signedness for some of these intrinsics, we should 2993 // be too. 2994 bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 2995 unsigned i = 0, l = 0, u = 0, m = 0; 2996 switch (BuiltinID) { 2997 default: return false; 2998 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 2999 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 3000 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 3001 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 3002 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 3003 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 3004 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 3005 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the 3006 // df/m field. 3007 // These intrinsics take an unsigned 3 bit immediate. 3008 case Mips::BI__builtin_msa_bclri_b: 3009 case Mips::BI__builtin_msa_bnegi_b: 3010 case Mips::BI__builtin_msa_bseti_b: 3011 case Mips::BI__builtin_msa_sat_s_b: 3012 case Mips::BI__builtin_msa_sat_u_b: 3013 case Mips::BI__builtin_msa_slli_b: 3014 case Mips::BI__builtin_msa_srai_b: 3015 case Mips::BI__builtin_msa_srari_b: 3016 case Mips::BI__builtin_msa_srli_b: 3017 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 3018 case Mips::BI__builtin_msa_binsli_b: 3019 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 3020 // These intrinsics take an unsigned 4 bit immediate. 3021 case Mips::BI__builtin_msa_bclri_h: 3022 case Mips::BI__builtin_msa_bnegi_h: 3023 case Mips::BI__builtin_msa_bseti_h: 3024 case Mips::BI__builtin_msa_sat_s_h: 3025 case Mips::BI__builtin_msa_sat_u_h: 3026 case Mips::BI__builtin_msa_slli_h: 3027 case Mips::BI__builtin_msa_srai_h: 3028 case Mips::BI__builtin_msa_srari_h: 3029 case Mips::BI__builtin_msa_srli_h: 3030 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 3031 case Mips::BI__builtin_msa_binsli_h: 3032 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 3033 // These intrinsics take an unsigned 5 bit immediate. 3034 // The first block of intrinsics actually have an unsigned 5 bit field, 3035 // not a df/n field. 3036 case Mips::BI__builtin_msa_cfcmsa: 3037 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; 3038 case Mips::BI__builtin_msa_clei_u_b: 3039 case Mips::BI__builtin_msa_clei_u_h: 3040 case Mips::BI__builtin_msa_clei_u_w: 3041 case Mips::BI__builtin_msa_clei_u_d: 3042 case Mips::BI__builtin_msa_clti_u_b: 3043 case Mips::BI__builtin_msa_clti_u_h: 3044 case Mips::BI__builtin_msa_clti_u_w: 3045 case Mips::BI__builtin_msa_clti_u_d: 3046 case Mips::BI__builtin_msa_maxi_u_b: 3047 case Mips::BI__builtin_msa_maxi_u_h: 3048 case Mips::BI__builtin_msa_maxi_u_w: 3049 case Mips::BI__builtin_msa_maxi_u_d: 3050 case Mips::BI__builtin_msa_mini_u_b: 3051 case Mips::BI__builtin_msa_mini_u_h: 3052 case Mips::BI__builtin_msa_mini_u_w: 3053 case Mips::BI__builtin_msa_mini_u_d: 3054 case Mips::BI__builtin_msa_addvi_b: 3055 case Mips::BI__builtin_msa_addvi_h: 3056 case Mips::BI__builtin_msa_addvi_w: 3057 case Mips::BI__builtin_msa_addvi_d: 3058 case Mips::BI__builtin_msa_bclri_w: 3059 case Mips::BI__builtin_msa_bnegi_w: 3060 case Mips::BI__builtin_msa_bseti_w: 3061 case Mips::BI__builtin_msa_sat_s_w: 3062 case Mips::BI__builtin_msa_sat_u_w: 3063 case Mips::BI__builtin_msa_slli_w: 3064 case Mips::BI__builtin_msa_srai_w: 3065 case Mips::BI__builtin_msa_srari_w: 3066 case Mips::BI__builtin_msa_srli_w: 3067 case Mips::BI__builtin_msa_srlri_w: 3068 case Mips::BI__builtin_msa_subvi_b: 3069 case Mips::BI__builtin_msa_subvi_h: 3070 case Mips::BI__builtin_msa_subvi_w: 3071 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 3072 case Mips::BI__builtin_msa_binsli_w: 3073 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 3074 // These intrinsics take an unsigned 6 bit immediate. 3075 case Mips::BI__builtin_msa_bclri_d: 3076 case Mips::BI__builtin_msa_bnegi_d: 3077 case Mips::BI__builtin_msa_bseti_d: 3078 case Mips::BI__builtin_msa_sat_s_d: 3079 case Mips::BI__builtin_msa_sat_u_d: 3080 case Mips::BI__builtin_msa_slli_d: 3081 case Mips::BI__builtin_msa_srai_d: 3082 case Mips::BI__builtin_msa_srari_d: 3083 case Mips::BI__builtin_msa_srli_d: 3084 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 3085 case Mips::BI__builtin_msa_binsli_d: 3086 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 3087 // These intrinsics take a signed 5 bit immediate. 3088 case Mips::BI__builtin_msa_ceqi_b: 3089 case Mips::BI__builtin_msa_ceqi_h: 3090 case Mips::BI__builtin_msa_ceqi_w: 3091 case Mips::BI__builtin_msa_ceqi_d: 3092 case Mips::BI__builtin_msa_clti_s_b: 3093 case Mips::BI__builtin_msa_clti_s_h: 3094 case Mips::BI__builtin_msa_clti_s_w: 3095 case Mips::BI__builtin_msa_clti_s_d: 3096 case Mips::BI__builtin_msa_clei_s_b: 3097 case Mips::BI__builtin_msa_clei_s_h: 3098 case Mips::BI__builtin_msa_clei_s_w: 3099 case Mips::BI__builtin_msa_clei_s_d: 3100 case Mips::BI__builtin_msa_maxi_s_b: 3101 case Mips::BI__builtin_msa_maxi_s_h: 3102 case Mips::BI__builtin_msa_maxi_s_w: 3103 case Mips::BI__builtin_msa_maxi_s_d: 3104 case Mips::BI__builtin_msa_mini_s_b: 3105 case Mips::BI__builtin_msa_mini_s_h: 3106 case Mips::BI__builtin_msa_mini_s_w: 3107 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 3108 // These intrinsics take an unsigned 8 bit immediate. 3109 case Mips::BI__builtin_msa_andi_b: 3110 case Mips::BI__builtin_msa_nori_b: 3111 case Mips::BI__builtin_msa_ori_b: 3112 case Mips::BI__builtin_msa_shf_b: 3113 case Mips::BI__builtin_msa_shf_h: 3114 case Mips::BI__builtin_msa_shf_w: 3115 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 3116 case Mips::BI__builtin_msa_bseli_b: 3117 case Mips::BI__builtin_msa_bmnzi_b: 3118 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 3119 // df/n format 3120 // These intrinsics take an unsigned 4 bit immediate. 3121 case Mips::BI__builtin_msa_copy_s_b: 3122 case Mips::BI__builtin_msa_copy_u_b: 3123 case Mips::BI__builtin_msa_insve_b: 3124 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 3125 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 3126 // These intrinsics take an unsigned 3 bit immediate. 3127 case Mips::BI__builtin_msa_copy_s_h: 3128 case Mips::BI__builtin_msa_copy_u_h: 3129 case Mips::BI__builtin_msa_insve_h: 3130 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 3131 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 3132 // These intrinsics take an unsigned 2 bit immediate. 3133 case Mips::BI__builtin_msa_copy_s_w: 3134 case Mips::BI__builtin_msa_copy_u_w: 3135 case Mips::BI__builtin_msa_insve_w: 3136 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 3137 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 3138 // These intrinsics take an unsigned 1 bit immediate. 3139 case Mips::BI__builtin_msa_copy_s_d: 3140 case Mips::BI__builtin_msa_copy_u_d: 3141 case Mips::BI__builtin_msa_insve_d: 3142 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 3143 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 3144 // Memory offsets and immediate loads. 3145 // These intrinsics take a signed 10 bit immediate. 3146 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 3147 case Mips::BI__builtin_msa_ldi_h: 3148 case Mips::BI__builtin_msa_ldi_w: 3149 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 3150 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 3151 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 3152 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 3153 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 3154 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break; 3155 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break; 3156 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 3157 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 3158 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 3159 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 3160 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break; 3161 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break; 3162 } 3163 3164 if (!m) 3165 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3166 3167 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 3168 SemaBuiltinConstantArgMultiple(TheCall, i, m); 3169 } 3170 3171 bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3172 CallExpr *TheCall) { 3173 unsigned i = 0, l = 0, u = 0; 3174 bool Is64BitBltin = BuiltinID == PPC::BI__builtin_divde || 3175 BuiltinID == PPC::BI__builtin_divdeu || 3176 BuiltinID == PPC::BI__builtin_bpermd; 3177 bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64; 3178 bool IsBltinExtDiv = BuiltinID == PPC::BI__builtin_divwe || 3179 BuiltinID == PPC::BI__builtin_divweu || 3180 BuiltinID == PPC::BI__builtin_divde || 3181 BuiltinID == PPC::BI__builtin_divdeu; 3182 3183 if (Is64BitBltin && !IsTarget64Bit) 3184 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 3185 << TheCall->getSourceRange(); 3186 3187 if ((IsBltinExtDiv && !TI.hasFeature("extdiv")) || 3188 (BuiltinID == PPC::BI__builtin_bpermd && !TI.hasFeature("bpermd"))) 3189 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7) 3190 << TheCall->getSourceRange(); 3191 3192 auto SemaVSXCheck = [&](CallExpr *TheCall) -> bool { 3193 if (!TI.hasFeature("vsx")) 3194 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7) 3195 << TheCall->getSourceRange(); 3196 return false; 3197 }; 3198 3199 switch (BuiltinID) { 3200 default: return false; 3201 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 3202 case PPC::BI__builtin_altivec_crypto_vshasigmad: 3203 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3204 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3205 case PPC::BI__builtin_altivec_dss: 3206 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3); 3207 case PPC::BI__builtin_tbegin: 3208 case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break; 3209 case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break; 3210 case PPC::BI__builtin_tabortwc: 3211 case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break; 3212 case PPC::BI__builtin_tabortwci: 3213 case PPC::BI__builtin_tabortdci: 3214 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 3215 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); 3216 case PPC::BI__builtin_altivec_dst: 3217 case PPC::BI__builtin_altivec_dstt: 3218 case PPC::BI__builtin_altivec_dstst: 3219 case PPC::BI__builtin_altivec_dststt: 3220 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 3221 case PPC::BI__builtin_vsx_xxpermdi: 3222 case PPC::BI__builtin_vsx_xxsldwi: 3223 return SemaBuiltinVSX(TheCall); 3224 case PPC::BI__builtin_unpack_vector_int128: 3225 return SemaVSXCheck(TheCall) || 3226 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3227 case PPC::BI__builtin_pack_vector_int128: 3228 return SemaVSXCheck(TheCall); 3229 case PPC::BI__builtin_altivec_vgnb: 3230 return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7); 3231 case PPC::BI__builtin_altivec_vec_replace_elt: 3232 case PPC::BI__builtin_altivec_vec_replace_unaligned: { 3233 QualType VecTy = TheCall->getArg(0)->getType(); 3234 QualType EltTy = TheCall->getArg(1)->getType(); 3235 unsigned Width = Context.getIntWidth(EltTy); 3236 return SemaBuiltinConstantArgRange(TheCall, 2, 0, Width == 32 ? 12 : 8) || 3237 !isEltOfVectorTy(Context, TheCall, *this, VecTy, EltTy); 3238 } 3239 case PPC::BI__builtin_vsx_xxeval: 3240 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255); 3241 case PPC::BI__builtin_altivec_vsldbi: 3242 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 3243 case PPC::BI__builtin_altivec_vsrdbi: 3244 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 3245 case PPC::BI__builtin_vsx_xxpermx: 3246 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7); 3247 } 3248 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3249 } 3250 3251 bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, 3252 CallExpr *TheCall) { 3253 // position of memory order and scope arguments in the builtin 3254 unsigned OrderIndex, ScopeIndex; 3255 switch (BuiltinID) { 3256 case AMDGPU::BI__builtin_amdgcn_atomic_inc32: 3257 case AMDGPU::BI__builtin_amdgcn_atomic_inc64: 3258 case AMDGPU::BI__builtin_amdgcn_atomic_dec32: 3259 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: 3260 OrderIndex = 2; 3261 ScopeIndex = 3; 3262 break; 3263 case AMDGPU::BI__builtin_amdgcn_fence: 3264 OrderIndex = 0; 3265 ScopeIndex = 1; 3266 break; 3267 default: 3268 return false; 3269 } 3270 3271 ExprResult Arg = TheCall->getArg(OrderIndex); 3272 auto ArgExpr = Arg.get(); 3273 Expr::EvalResult ArgResult; 3274 3275 if (!ArgExpr->EvaluateAsInt(ArgResult, Context)) 3276 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int) 3277 << ArgExpr->getType(); 3278 int ord = ArgResult.Val.getInt().getZExtValue(); 3279 3280 // Check valididty of memory ordering as per C11 / C++11's memody model. 3281 switch (static_cast<llvm::AtomicOrderingCABI>(ord)) { 3282 case llvm::AtomicOrderingCABI::acquire: 3283 case llvm::AtomicOrderingCABI::release: 3284 case llvm::AtomicOrderingCABI::acq_rel: 3285 case llvm::AtomicOrderingCABI::seq_cst: 3286 break; 3287 default: { 3288 return Diag(ArgExpr->getBeginLoc(), 3289 diag::warn_atomic_op_has_invalid_memory_order) 3290 << ArgExpr->getSourceRange(); 3291 } 3292 } 3293 3294 Arg = TheCall->getArg(ScopeIndex); 3295 ArgExpr = Arg.get(); 3296 Expr::EvalResult ArgResult1; 3297 // Check that sync scope is a constant literal 3298 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Expr::EvaluateForCodeGen, 3299 Context)) 3300 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal) 3301 << ArgExpr->getType(); 3302 3303 return false; 3304 } 3305 3306 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 3307 CallExpr *TheCall) { 3308 if (BuiltinID == SystemZ::BI__builtin_tabort) { 3309 Expr *Arg = TheCall->getArg(0); 3310 if (Optional<llvm::APSInt> AbortCode = Arg->getIntegerConstantExpr(Context)) 3311 if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256) 3312 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 3313 << Arg->getSourceRange(); 3314 } 3315 3316 // For intrinsics which take an immediate value as part of the instruction, 3317 // range check them here. 3318 unsigned i = 0, l = 0, u = 0; 3319 switch (BuiltinID) { 3320 default: return false; 3321 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 3322 case SystemZ::BI__builtin_s390_verimb: 3323 case SystemZ::BI__builtin_s390_verimh: 3324 case SystemZ::BI__builtin_s390_verimf: 3325 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 3326 case SystemZ::BI__builtin_s390_vfaeb: 3327 case SystemZ::BI__builtin_s390_vfaeh: 3328 case SystemZ::BI__builtin_s390_vfaef: 3329 case SystemZ::BI__builtin_s390_vfaebs: 3330 case SystemZ::BI__builtin_s390_vfaehs: 3331 case SystemZ::BI__builtin_s390_vfaefs: 3332 case SystemZ::BI__builtin_s390_vfaezb: 3333 case SystemZ::BI__builtin_s390_vfaezh: 3334 case SystemZ::BI__builtin_s390_vfaezf: 3335 case SystemZ::BI__builtin_s390_vfaezbs: 3336 case SystemZ::BI__builtin_s390_vfaezhs: 3337 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 3338 case SystemZ::BI__builtin_s390_vfisb: 3339 case SystemZ::BI__builtin_s390_vfidb: 3340 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 3341 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3342 case SystemZ::BI__builtin_s390_vftcisb: 3343 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 3344 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 3345 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 3346 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 3347 case SystemZ::BI__builtin_s390_vstrcb: 3348 case SystemZ::BI__builtin_s390_vstrch: 3349 case SystemZ::BI__builtin_s390_vstrcf: 3350 case SystemZ::BI__builtin_s390_vstrczb: 3351 case SystemZ::BI__builtin_s390_vstrczh: 3352 case SystemZ::BI__builtin_s390_vstrczf: 3353 case SystemZ::BI__builtin_s390_vstrcbs: 3354 case SystemZ::BI__builtin_s390_vstrchs: 3355 case SystemZ::BI__builtin_s390_vstrcfs: 3356 case SystemZ::BI__builtin_s390_vstrczbs: 3357 case SystemZ::BI__builtin_s390_vstrczhs: 3358 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 3359 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 3360 case SystemZ::BI__builtin_s390_vfminsb: 3361 case SystemZ::BI__builtin_s390_vfmaxsb: 3362 case SystemZ::BI__builtin_s390_vfmindb: 3363 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 3364 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break; 3365 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break; 3366 } 3367 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3368 } 3369 3370 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 3371 /// This checks that the target supports __builtin_cpu_supports and 3372 /// that the string argument is constant and valid. 3373 static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI, 3374 CallExpr *TheCall) { 3375 Expr *Arg = TheCall->getArg(0); 3376 3377 // Check if the argument is a string literal. 3378 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3379 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3380 << Arg->getSourceRange(); 3381 3382 // Check the contents of the string. 3383 StringRef Feature = 3384 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3385 if (!TI.validateCpuSupports(Feature)) 3386 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 3387 << Arg->getSourceRange(); 3388 return false; 3389 } 3390 3391 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 3392 /// This checks that the target supports __builtin_cpu_is and 3393 /// that the string argument is constant and valid. 3394 static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) { 3395 Expr *Arg = TheCall->getArg(0); 3396 3397 // Check if the argument is a string literal. 3398 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3399 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3400 << Arg->getSourceRange(); 3401 3402 // Check the contents of the string. 3403 StringRef Feature = 3404 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3405 if (!TI.validateCpuIs(Feature)) 3406 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 3407 << Arg->getSourceRange(); 3408 return false; 3409 } 3410 3411 // Check if the rounding mode is legal. 3412 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 3413 // Indicates if this instruction has rounding control or just SAE. 3414 bool HasRC = false; 3415 3416 unsigned ArgNum = 0; 3417 switch (BuiltinID) { 3418 default: 3419 return false; 3420 case X86::BI__builtin_ia32_vcvttsd2si32: 3421 case X86::BI__builtin_ia32_vcvttsd2si64: 3422 case X86::BI__builtin_ia32_vcvttsd2usi32: 3423 case X86::BI__builtin_ia32_vcvttsd2usi64: 3424 case X86::BI__builtin_ia32_vcvttss2si32: 3425 case X86::BI__builtin_ia32_vcvttss2si64: 3426 case X86::BI__builtin_ia32_vcvttss2usi32: 3427 case X86::BI__builtin_ia32_vcvttss2usi64: 3428 ArgNum = 1; 3429 break; 3430 case X86::BI__builtin_ia32_maxpd512: 3431 case X86::BI__builtin_ia32_maxps512: 3432 case X86::BI__builtin_ia32_minpd512: 3433 case X86::BI__builtin_ia32_minps512: 3434 ArgNum = 2; 3435 break; 3436 case X86::BI__builtin_ia32_cvtps2pd512_mask: 3437 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 3438 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 3439 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 3440 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 3441 case X86::BI__builtin_ia32_cvttps2dq512_mask: 3442 case X86::BI__builtin_ia32_cvttps2qq512_mask: 3443 case X86::BI__builtin_ia32_cvttps2udq512_mask: 3444 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 3445 case X86::BI__builtin_ia32_exp2pd_mask: 3446 case X86::BI__builtin_ia32_exp2ps_mask: 3447 case X86::BI__builtin_ia32_getexppd512_mask: 3448 case X86::BI__builtin_ia32_getexpps512_mask: 3449 case X86::BI__builtin_ia32_rcp28pd_mask: 3450 case X86::BI__builtin_ia32_rcp28ps_mask: 3451 case X86::BI__builtin_ia32_rsqrt28pd_mask: 3452 case X86::BI__builtin_ia32_rsqrt28ps_mask: 3453 case X86::BI__builtin_ia32_vcomisd: 3454 case X86::BI__builtin_ia32_vcomiss: 3455 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 3456 ArgNum = 3; 3457 break; 3458 case X86::BI__builtin_ia32_cmppd512_mask: 3459 case X86::BI__builtin_ia32_cmpps512_mask: 3460 case X86::BI__builtin_ia32_cmpsd_mask: 3461 case X86::BI__builtin_ia32_cmpss_mask: 3462 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 3463 case X86::BI__builtin_ia32_getexpsd128_round_mask: 3464 case X86::BI__builtin_ia32_getexpss128_round_mask: 3465 case X86::BI__builtin_ia32_getmantpd512_mask: 3466 case X86::BI__builtin_ia32_getmantps512_mask: 3467 case X86::BI__builtin_ia32_maxsd_round_mask: 3468 case X86::BI__builtin_ia32_maxss_round_mask: 3469 case X86::BI__builtin_ia32_minsd_round_mask: 3470 case X86::BI__builtin_ia32_minss_round_mask: 3471 case X86::BI__builtin_ia32_rcp28sd_round_mask: 3472 case X86::BI__builtin_ia32_rcp28ss_round_mask: 3473 case X86::BI__builtin_ia32_reducepd512_mask: 3474 case X86::BI__builtin_ia32_reduceps512_mask: 3475 case X86::BI__builtin_ia32_rndscalepd_mask: 3476 case X86::BI__builtin_ia32_rndscaleps_mask: 3477 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 3478 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 3479 ArgNum = 4; 3480 break; 3481 case X86::BI__builtin_ia32_fixupimmpd512_mask: 3482 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 3483 case X86::BI__builtin_ia32_fixupimmps512_mask: 3484 case X86::BI__builtin_ia32_fixupimmps512_maskz: 3485 case X86::BI__builtin_ia32_fixupimmsd_mask: 3486 case X86::BI__builtin_ia32_fixupimmsd_maskz: 3487 case X86::BI__builtin_ia32_fixupimmss_mask: 3488 case X86::BI__builtin_ia32_fixupimmss_maskz: 3489 case X86::BI__builtin_ia32_getmantsd_round_mask: 3490 case X86::BI__builtin_ia32_getmantss_round_mask: 3491 case X86::BI__builtin_ia32_rangepd512_mask: 3492 case X86::BI__builtin_ia32_rangeps512_mask: 3493 case X86::BI__builtin_ia32_rangesd128_round_mask: 3494 case X86::BI__builtin_ia32_rangess128_round_mask: 3495 case X86::BI__builtin_ia32_reducesd_mask: 3496 case X86::BI__builtin_ia32_reducess_mask: 3497 case X86::BI__builtin_ia32_rndscalesd_round_mask: 3498 case X86::BI__builtin_ia32_rndscaless_round_mask: 3499 ArgNum = 5; 3500 break; 3501 case X86::BI__builtin_ia32_vcvtsd2si64: 3502 case X86::BI__builtin_ia32_vcvtsd2si32: 3503 case X86::BI__builtin_ia32_vcvtsd2usi32: 3504 case X86::BI__builtin_ia32_vcvtsd2usi64: 3505 case X86::BI__builtin_ia32_vcvtss2si32: 3506 case X86::BI__builtin_ia32_vcvtss2si64: 3507 case X86::BI__builtin_ia32_vcvtss2usi32: 3508 case X86::BI__builtin_ia32_vcvtss2usi64: 3509 case X86::BI__builtin_ia32_sqrtpd512: 3510 case X86::BI__builtin_ia32_sqrtps512: 3511 ArgNum = 1; 3512 HasRC = true; 3513 break; 3514 case X86::BI__builtin_ia32_addpd512: 3515 case X86::BI__builtin_ia32_addps512: 3516 case X86::BI__builtin_ia32_divpd512: 3517 case X86::BI__builtin_ia32_divps512: 3518 case X86::BI__builtin_ia32_mulpd512: 3519 case X86::BI__builtin_ia32_mulps512: 3520 case X86::BI__builtin_ia32_subpd512: 3521 case X86::BI__builtin_ia32_subps512: 3522 case X86::BI__builtin_ia32_cvtsi2sd64: 3523 case X86::BI__builtin_ia32_cvtsi2ss32: 3524 case X86::BI__builtin_ia32_cvtsi2ss64: 3525 case X86::BI__builtin_ia32_cvtusi2sd64: 3526 case X86::BI__builtin_ia32_cvtusi2ss32: 3527 case X86::BI__builtin_ia32_cvtusi2ss64: 3528 ArgNum = 2; 3529 HasRC = true; 3530 break; 3531 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 3532 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 3533 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 3534 case X86::BI__builtin_ia32_cvtpd2dq512_mask: 3535 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 3536 case X86::BI__builtin_ia32_cvtpd2udq512_mask: 3537 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 3538 case X86::BI__builtin_ia32_cvtps2dq512_mask: 3539 case X86::BI__builtin_ia32_cvtps2qq512_mask: 3540 case X86::BI__builtin_ia32_cvtps2udq512_mask: 3541 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 3542 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 3543 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 3544 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 3545 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 3546 ArgNum = 3; 3547 HasRC = true; 3548 break; 3549 case X86::BI__builtin_ia32_addss_round_mask: 3550 case X86::BI__builtin_ia32_addsd_round_mask: 3551 case X86::BI__builtin_ia32_divss_round_mask: 3552 case X86::BI__builtin_ia32_divsd_round_mask: 3553 case X86::BI__builtin_ia32_mulss_round_mask: 3554 case X86::BI__builtin_ia32_mulsd_round_mask: 3555 case X86::BI__builtin_ia32_subss_round_mask: 3556 case X86::BI__builtin_ia32_subsd_round_mask: 3557 case X86::BI__builtin_ia32_scalefpd512_mask: 3558 case X86::BI__builtin_ia32_scalefps512_mask: 3559 case X86::BI__builtin_ia32_scalefsd_round_mask: 3560 case X86::BI__builtin_ia32_scalefss_round_mask: 3561 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 3562 case X86::BI__builtin_ia32_sqrtsd_round_mask: 3563 case X86::BI__builtin_ia32_sqrtss_round_mask: 3564 case X86::BI__builtin_ia32_vfmaddsd3_mask: 3565 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 3566 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 3567 case X86::BI__builtin_ia32_vfmaddss3_mask: 3568 case X86::BI__builtin_ia32_vfmaddss3_maskz: 3569 case X86::BI__builtin_ia32_vfmaddss3_mask3: 3570 case X86::BI__builtin_ia32_vfmaddpd512_mask: 3571 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 3572 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 3573 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 3574 case X86::BI__builtin_ia32_vfmaddps512_mask: 3575 case X86::BI__builtin_ia32_vfmaddps512_maskz: 3576 case X86::BI__builtin_ia32_vfmaddps512_mask3: 3577 case X86::BI__builtin_ia32_vfmsubps512_mask3: 3578 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 3579 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 3580 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 3581 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 3582 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 3583 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 3584 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 3585 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 3586 ArgNum = 4; 3587 HasRC = true; 3588 break; 3589 } 3590 3591 llvm::APSInt Result; 3592 3593 // We can't check the value of a dependent argument. 3594 Expr *Arg = TheCall->getArg(ArgNum); 3595 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3596 return false; 3597 3598 // Check constant-ness first. 3599 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3600 return true; 3601 3602 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 3603 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 3604 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding 3605 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together. 3606 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 3607 Result == 8/*ROUND_NO_EXC*/ || 3608 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) || 3609 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 3610 return false; 3611 3612 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 3613 << Arg->getSourceRange(); 3614 } 3615 3616 // Check if the gather/scatter scale is legal. 3617 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 3618 CallExpr *TheCall) { 3619 unsigned ArgNum = 0; 3620 switch (BuiltinID) { 3621 default: 3622 return false; 3623 case X86::BI__builtin_ia32_gatherpfdpd: 3624 case X86::BI__builtin_ia32_gatherpfdps: 3625 case X86::BI__builtin_ia32_gatherpfqpd: 3626 case X86::BI__builtin_ia32_gatherpfqps: 3627 case X86::BI__builtin_ia32_scatterpfdpd: 3628 case X86::BI__builtin_ia32_scatterpfdps: 3629 case X86::BI__builtin_ia32_scatterpfqpd: 3630 case X86::BI__builtin_ia32_scatterpfqps: 3631 ArgNum = 3; 3632 break; 3633 case X86::BI__builtin_ia32_gatherd_pd: 3634 case X86::BI__builtin_ia32_gatherd_pd256: 3635 case X86::BI__builtin_ia32_gatherq_pd: 3636 case X86::BI__builtin_ia32_gatherq_pd256: 3637 case X86::BI__builtin_ia32_gatherd_ps: 3638 case X86::BI__builtin_ia32_gatherd_ps256: 3639 case X86::BI__builtin_ia32_gatherq_ps: 3640 case X86::BI__builtin_ia32_gatherq_ps256: 3641 case X86::BI__builtin_ia32_gatherd_q: 3642 case X86::BI__builtin_ia32_gatherd_q256: 3643 case X86::BI__builtin_ia32_gatherq_q: 3644 case X86::BI__builtin_ia32_gatherq_q256: 3645 case X86::BI__builtin_ia32_gatherd_d: 3646 case X86::BI__builtin_ia32_gatherd_d256: 3647 case X86::BI__builtin_ia32_gatherq_d: 3648 case X86::BI__builtin_ia32_gatherq_d256: 3649 case X86::BI__builtin_ia32_gather3div2df: 3650 case X86::BI__builtin_ia32_gather3div2di: 3651 case X86::BI__builtin_ia32_gather3div4df: 3652 case X86::BI__builtin_ia32_gather3div4di: 3653 case X86::BI__builtin_ia32_gather3div4sf: 3654 case X86::BI__builtin_ia32_gather3div4si: 3655 case X86::BI__builtin_ia32_gather3div8sf: 3656 case X86::BI__builtin_ia32_gather3div8si: 3657 case X86::BI__builtin_ia32_gather3siv2df: 3658 case X86::BI__builtin_ia32_gather3siv2di: 3659 case X86::BI__builtin_ia32_gather3siv4df: 3660 case X86::BI__builtin_ia32_gather3siv4di: 3661 case X86::BI__builtin_ia32_gather3siv4sf: 3662 case X86::BI__builtin_ia32_gather3siv4si: 3663 case X86::BI__builtin_ia32_gather3siv8sf: 3664 case X86::BI__builtin_ia32_gather3siv8si: 3665 case X86::BI__builtin_ia32_gathersiv8df: 3666 case X86::BI__builtin_ia32_gathersiv16sf: 3667 case X86::BI__builtin_ia32_gatherdiv8df: 3668 case X86::BI__builtin_ia32_gatherdiv16sf: 3669 case X86::BI__builtin_ia32_gathersiv8di: 3670 case X86::BI__builtin_ia32_gathersiv16si: 3671 case X86::BI__builtin_ia32_gatherdiv8di: 3672 case X86::BI__builtin_ia32_gatherdiv16si: 3673 case X86::BI__builtin_ia32_scatterdiv2df: 3674 case X86::BI__builtin_ia32_scatterdiv2di: 3675 case X86::BI__builtin_ia32_scatterdiv4df: 3676 case X86::BI__builtin_ia32_scatterdiv4di: 3677 case X86::BI__builtin_ia32_scatterdiv4sf: 3678 case X86::BI__builtin_ia32_scatterdiv4si: 3679 case X86::BI__builtin_ia32_scatterdiv8sf: 3680 case X86::BI__builtin_ia32_scatterdiv8si: 3681 case X86::BI__builtin_ia32_scattersiv2df: 3682 case X86::BI__builtin_ia32_scattersiv2di: 3683 case X86::BI__builtin_ia32_scattersiv4df: 3684 case X86::BI__builtin_ia32_scattersiv4di: 3685 case X86::BI__builtin_ia32_scattersiv4sf: 3686 case X86::BI__builtin_ia32_scattersiv4si: 3687 case X86::BI__builtin_ia32_scattersiv8sf: 3688 case X86::BI__builtin_ia32_scattersiv8si: 3689 case X86::BI__builtin_ia32_scattersiv8df: 3690 case X86::BI__builtin_ia32_scattersiv16sf: 3691 case X86::BI__builtin_ia32_scatterdiv8df: 3692 case X86::BI__builtin_ia32_scatterdiv16sf: 3693 case X86::BI__builtin_ia32_scattersiv8di: 3694 case X86::BI__builtin_ia32_scattersiv16si: 3695 case X86::BI__builtin_ia32_scatterdiv8di: 3696 case X86::BI__builtin_ia32_scatterdiv16si: 3697 ArgNum = 4; 3698 break; 3699 } 3700 3701 llvm::APSInt Result; 3702 3703 // We can't check the value of a dependent argument. 3704 Expr *Arg = TheCall->getArg(ArgNum); 3705 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3706 return false; 3707 3708 // Check constant-ness first. 3709 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3710 return true; 3711 3712 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 3713 return false; 3714 3715 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 3716 << Arg->getSourceRange(); 3717 } 3718 3719 enum { TileRegLow = 0, TileRegHigh = 7 }; 3720 3721 bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, 3722 ArrayRef<int> ArgNums) { 3723 for (int ArgNum : ArgNums) { 3724 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh)) 3725 return true; 3726 } 3727 return false; 3728 } 3729 3730 bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall, 3731 ArrayRef<int> ArgNums) { 3732 // Because the max number of tile register is TileRegHigh + 1, so here we use 3733 // each bit to represent the usage of them in bitset. 3734 std::bitset<TileRegHigh + 1> ArgValues; 3735 for (int ArgNum : ArgNums) { 3736 Expr *Arg = TheCall->getArg(ArgNum); 3737 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3738 continue; 3739 3740 llvm::APSInt Result; 3741 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3742 return true; 3743 int ArgExtValue = Result.getExtValue(); 3744 assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) && 3745 "Incorrect tile register num."); 3746 if (ArgValues.test(ArgExtValue)) 3747 return Diag(TheCall->getBeginLoc(), 3748 diag::err_x86_builtin_tile_arg_duplicate) 3749 << TheCall->getArg(ArgNum)->getSourceRange(); 3750 ArgValues.set(ArgExtValue); 3751 } 3752 return false; 3753 } 3754 3755 bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, 3756 ArrayRef<int> ArgNums) { 3757 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) || 3758 CheckX86BuiltinTileDuplicate(TheCall, ArgNums); 3759 } 3760 3761 bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) { 3762 switch (BuiltinID) { 3763 default: 3764 return false; 3765 case X86::BI__builtin_ia32_tileloadd64: 3766 case X86::BI__builtin_ia32_tileloaddt164: 3767 case X86::BI__builtin_ia32_tilestored64: 3768 case X86::BI__builtin_ia32_tilezero: 3769 return CheckX86BuiltinTileArgumentsRange(TheCall, 0); 3770 case X86::BI__builtin_ia32_tdpbssd: 3771 case X86::BI__builtin_ia32_tdpbsud: 3772 case X86::BI__builtin_ia32_tdpbusd: 3773 case X86::BI__builtin_ia32_tdpbuud: 3774 case X86::BI__builtin_ia32_tdpbf16ps: 3775 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2}); 3776 } 3777 } 3778 static bool isX86_32Builtin(unsigned BuiltinID) { 3779 // These builtins only work on x86-32 targets. 3780 switch (BuiltinID) { 3781 case X86::BI__builtin_ia32_readeflags_u32: 3782 case X86::BI__builtin_ia32_writeeflags_u32: 3783 return true; 3784 } 3785 3786 return false; 3787 } 3788 3789 bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3790 CallExpr *TheCall) { 3791 if (BuiltinID == X86::BI__builtin_cpu_supports) 3792 return SemaBuiltinCpuSupports(*this, TI, TheCall); 3793 3794 if (BuiltinID == X86::BI__builtin_cpu_is) 3795 return SemaBuiltinCpuIs(*this, TI, TheCall); 3796 3797 // Check for 32-bit only builtins on a 64-bit target. 3798 const llvm::Triple &TT = TI.getTriple(); 3799 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 3800 return Diag(TheCall->getCallee()->getBeginLoc(), 3801 diag::err_32_bit_builtin_64_bit_tgt); 3802 3803 // If the intrinsic has rounding or SAE make sure its valid. 3804 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 3805 return true; 3806 3807 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 3808 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 3809 return true; 3810 3811 // If the intrinsic has a tile arguments, make sure they are valid. 3812 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall)) 3813 return true; 3814 3815 // For intrinsics which take an immediate value as part of the instruction, 3816 // range check them here. 3817 int i = 0, l = 0, u = 0; 3818 switch (BuiltinID) { 3819 default: 3820 return false; 3821 case X86::BI__builtin_ia32_vec_ext_v2si: 3822 case X86::BI__builtin_ia32_vec_ext_v2di: 3823 case X86::BI__builtin_ia32_vextractf128_pd256: 3824 case X86::BI__builtin_ia32_vextractf128_ps256: 3825 case X86::BI__builtin_ia32_vextractf128_si256: 3826 case X86::BI__builtin_ia32_extract128i256: 3827 case X86::BI__builtin_ia32_extractf64x4_mask: 3828 case X86::BI__builtin_ia32_extracti64x4_mask: 3829 case X86::BI__builtin_ia32_extractf32x8_mask: 3830 case X86::BI__builtin_ia32_extracti32x8_mask: 3831 case X86::BI__builtin_ia32_extractf64x2_256_mask: 3832 case X86::BI__builtin_ia32_extracti64x2_256_mask: 3833 case X86::BI__builtin_ia32_extractf32x4_256_mask: 3834 case X86::BI__builtin_ia32_extracti32x4_256_mask: 3835 i = 1; l = 0; u = 1; 3836 break; 3837 case X86::BI__builtin_ia32_vec_set_v2di: 3838 case X86::BI__builtin_ia32_vinsertf128_pd256: 3839 case X86::BI__builtin_ia32_vinsertf128_ps256: 3840 case X86::BI__builtin_ia32_vinsertf128_si256: 3841 case X86::BI__builtin_ia32_insert128i256: 3842 case X86::BI__builtin_ia32_insertf32x8: 3843 case X86::BI__builtin_ia32_inserti32x8: 3844 case X86::BI__builtin_ia32_insertf64x4: 3845 case X86::BI__builtin_ia32_inserti64x4: 3846 case X86::BI__builtin_ia32_insertf64x2_256: 3847 case X86::BI__builtin_ia32_inserti64x2_256: 3848 case X86::BI__builtin_ia32_insertf32x4_256: 3849 case X86::BI__builtin_ia32_inserti32x4_256: 3850 i = 2; l = 0; u = 1; 3851 break; 3852 case X86::BI__builtin_ia32_vpermilpd: 3853 case X86::BI__builtin_ia32_vec_ext_v4hi: 3854 case X86::BI__builtin_ia32_vec_ext_v4si: 3855 case X86::BI__builtin_ia32_vec_ext_v4sf: 3856 case X86::BI__builtin_ia32_vec_ext_v4di: 3857 case X86::BI__builtin_ia32_extractf32x4_mask: 3858 case X86::BI__builtin_ia32_extracti32x4_mask: 3859 case X86::BI__builtin_ia32_extractf64x2_512_mask: 3860 case X86::BI__builtin_ia32_extracti64x2_512_mask: 3861 i = 1; l = 0; u = 3; 3862 break; 3863 case X86::BI_mm_prefetch: 3864 case X86::BI__builtin_ia32_vec_ext_v8hi: 3865 case X86::BI__builtin_ia32_vec_ext_v8si: 3866 i = 1; l = 0; u = 7; 3867 break; 3868 case X86::BI__builtin_ia32_sha1rnds4: 3869 case X86::BI__builtin_ia32_blendpd: 3870 case X86::BI__builtin_ia32_shufpd: 3871 case X86::BI__builtin_ia32_vec_set_v4hi: 3872 case X86::BI__builtin_ia32_vec_set_v4si: 3873 case X86::BI__builtin_ia32_vec_set_v4di: 3874 case X86::BI__builtin_ia32_shuf_f32x4_256: 3875 case X86::BI__builtin_ia32_shuf_f64x2_256: 3876 case X86::BI__builtin_ia32_shuf_i32x4_256: 3877 case X86::BI__builtin_ia32_shuf_i64x2_256: 3878 case X86::BI__builtin_ia32_insertf64x2_512: 3879 case X86::BI__builtin_ia32_inserti64x2_512: 3880 case X86::BI__builtin_ia32_insertf32x4: 3881 case X86::BI__builtin_ia32_inserti32x4: 3882 i = 2; l = 0; u = 3; 3883 break; 3884 case X86::BI__builtin_ia32_vpermil2pd: 3885 case X86::BI__builtin_ia32_vpermil2pd256: 3886 case X86::BI__builtin_ia32_vpermil2ps: 3887 case X86::BI__builtin_ia32_vpermil2ps256: 3888 i = 3; l = 0; u = 3; 3889 break; 3890 case X86::BI__builtin_ia32_cmpb128_mask: 3891 case X86::BI__builtin_ia32_cmpw128_mask: 3892 case X86::BI__builtin_ia32_cmpd128_mask: 3893 case X86::BI__builtin_ia32_cmpq128_mask: 3894 case X86::BI__builtin_ia32_cmpb256_mask: 3895 case X86::BI__builtin_ia32_cmpw256_mask: 3896 case X86::BI__builtin_ia32_cmpd256_mask: 3897 case X86::BI__builtin_ia32_cmpq256_mask: 3898 case X86::BI__builtin_ia32_cmpb512_mask: 3899 case X86::BI__builtin_ia32_cmpw512_mask: 3900 case X86::BI__builtin_ia32_cmpd512_mask: 3901 case X86::BI__builtin_ia32_cmpq512_mask: 3902 case X86::BI__builtin_ia32_ucmpb128_mask: 3903 case X86::BI__builtin_ia32_ucmpw128_mask: 3904 case X86::BI__builtin_ia32_ucmpd128_mask: 3905 case X86::BI__builtin_ia32_ucmpq128_mask: 3906 case X86::BI__builtin_ia32_ucmpb256_mask: 3907 case X86::BI__builtin_ia32_ucmpw256_mask: 3908 case X86::BI__builtin_ia32_ucmpd256_mask: 3909 case X86::BI__builtin_ia32_ucmpq256_mask: 3910 case X86::BI__builtin_ia32_ucmpb512_mask: 3911 case X86::BI__builtin_ia32_ucmpw512_mask: 3912 case X86::BI__builtin_ia32_ucmpd512_mask: 3913 case X86::BI__builtin_ia32_ucmpq512_mask: 3914 case X86::BI__builtin_ia32_vpcomub: 3915 case X86::BI__builtin_ia32_vpcomuw: 3916 case X86::BI__builtin_ia32_vpcomud: 3917 case X86::BI__builtin_ia32_vpcomuq: 3918 case X86::BI__builtin_ia32_vpcomb: 3919 case X86::BI__builtin_ia32_vpcomw: 3920 case X86::BI__builtin_ia32_vpcomd: 3921 case X86::BI__builtin_ia32_vpcomq: 3922 case X86::BI__builtin_ia32_vec_set_v8hi: 3923 case X86::BI__builtin_ia32_vec_set_v8si: 3924 i = 2; l = 0; u = 7; 3925 break; 3926 case X86::BI__builtin_ia32_vpermilpd256: 3927 case X86::BI__builtin_ia32_roundps: 3928 case X86::BI__builtin_ia32_roundpd: 3929 case X86::BI__builtin_ia32_roundps256: 3930 case X86::BI__builtin_ia32_roundpd256: 3931 case X86::BI__builtin_ia32_getmantpd128_mask: 3932 case X86::BI__builtin_ia32_getmantpd256_mask: 3933 case X86::BI__builtin_ia32_getmantps128_mask: 3934 case X86::BI__builtin_ia32_getmantps256_mask: 3935 case X86::BI__builtin_ia32_getmantpd512_mask: 3936 case X86::BI__builtin_ia32_getmantps512_mask: 3937 case X86::BI__builtin_ia32_vec_ext_v16qi: 3938 case X86::BI__builtin_ia32_vec_ext_v16hi: 3939 i = 1; l = 0; u = 15; 3940 break; 3941 case X86::BI__builtin_ia32_pblendd128: 3942 case X86::BI__builtin_ia32_blendps: 3943 case X86::BI__builtin_ia32_blendpd256: 3944 case X86::BI__builtin_ia32_shufpd256: 3945 case X86::BI__builtin_ia32_roundss: 3946 case X86::BI__builtin_ia32_roundsd: 3947 case X86::BI__builtin_ia32_rangepd128_mask: 3948 case X86::BI__builtin_ia32_rangepd256_mask: 3949 case X86::BI__builtin_ia32_rangepd512_mask: 3950 case X86::BI__builtin_ia32_rangeps128_mask: 3951 case X86::BI__builtin_ia32_rangeps256_mask: 3952 case X86::BI__builtin_ia32_rangeps512_mask: 3953 case X86::BI__builtin_ia32_getmantsd_round_mask: 3954 case X86::BI__builtin_ia32_getmantss_round_mask: 3955 case X86::BI__builtin_ia32_vec_set_v16qi: 3956 case X86::BI__builtin_ia32_vec_set_v16hi: 3957 i = 2; l = 0; u = 15; 3958 break; 3959 case X86::BI__builtin_ia32_vec_ext_v32qi: 3960 i = 1; l = 0; u = 31; 3961 break; 3962 case X86::BI__builtin_ia32_cmpps: 3963 case X86::BI__builtin_ia32_cmpss: 3964 case X86::BI__builtin_ia32_cmppd: 3965 case X86::BI__builtin_ia32_cmpsd: 3966 case X86::BI__builtin_ia32_cmpps256: 3967 case X86::BI__builtin_ia32_cmppd256: 3968 case X86::BI__builtin_ia32_cmpps128_mask: 3969 case X86::BI__builtin_ia32_cmppd128_mask: 3970 case X86::BI__builtin_ia32_cmpps256_mask: 3971 case X86::BI__builtin_ia32_cmppd256_mask: 3972 case X86::BI__builtin_ia32_cmpps512_mask: 3973 case X86::BI__builtin_ia32_cmppd512_mask: 3974 case X86::BI__builtin_ia32_cmpsd_mask: 3975 case X86::BI__builtin_ia32_cmpss_mask: 3976 case X86::BI__builtin_ia32_vec_set_v32qi: 3977 i = 2; l = 0; u = 31; 3978 break; 3979 case X86::BI__builtin_ia32_permdf256: 3980 case X86::BI__builtin_ia32_permdi256: 3981 case X86::BI__builtin_ia32_permdf512: 3982 case X86::BI__builtin_ia32_permdi512: 3983 case X86::BI__builtin_ia32_vpermilps: 3984 case X86::BI__builtin_ia32_vpermilps256: 3985 case X86::BI__builtin_ia32_vpermilpd512: 3986 case X86::BI__builtin_ia32_vpermilps512: 3987 case X86::BI__builtin_ia32_pshufd: 3988 case X86::BI__builtin_ia32_pshufd256: 3989 case X86::BI__builtin_ia32_pshufd512: 3990 case X86::BI__builtin_ia32_pshufhw: 3991 case X86::BI__builtin_ia32_pshufhw256: 3992 case X86::BI__builtin_ia32_pshufhw512: 3993 case X86::BI__builtin_ia32_pshuflw: 3994 case X86::BI__builtin_ia32_pshuflw256: 3995 case X86::BI__builtin_ia32_pshuflw512: 3996 case X86::BI__builtin_ia32_vcvtps2ph: 3997 case X86::BI__builtin_ia32_vcvtps2ph_mask: 3998 case X86::BI__builtin_ia32_vcvtps2ph256: 3999 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 4000 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 4001 case X86::BI__builtin_ia32_rndscaleps_128_mask: 4002 case X86::BI__builtin_ia32_rndscalepd_128_mask: 4003 case X86::BI__builtin_ia32_rndscaleps_256_mask: 4004 case X86::BI__builtin_ia32_rndscalepd_256_mask: 4005 case X86::BI__builtin_ia32_rndscaleps_mask: 4006 case X86::BI__builtin_ia32_rndscalepd_mask: 4007 case X86::BI__builtin_ia32_reducepd128_mask: 4008 case X86::BI__builtin_ia32_reducepd256_mask: 4009 case X86::BI__builtin_ia32_reducepd512_mask: 4010 case X86::BI__builtin_ia32_reduceps128_mask: 4011 case X86::BI__builtin_ia32_reduceps256_mask: 4012 case X86::BI__builtin_ia32_reduceps512_mask: 4013 case X86::BI__builtin_ia32_prold512: 4014 case X86::BI__builtin_ia32_prolq512: 4015 case X86::BI__builtin_ia32_prold128: 4016 case X86::BI__builtin_ia32_prold256: 4017 case X86::BI__builtin_ia32_prolq128: 4018 case X86::BI__builtin_ia32_prolq256: 4019 case X86::BI__builtin_ia32_prord512: 4020 case X86::BI__builtin_ia32_prorq512: 4021 case X86::BI__builtin_ia32_prord128: 4022 case X86::BI__builtin_ia32_prord256: 4023 case X86::BI__builtin_ia32_prorq128: 4024 case X86::BI__builtin_ia32_prorq256: 4025 case X86::BI__builtin_ia32_fpclasspd128_mask: 4026 case X86::BI__builtin_ia32_fpclasspd256_mask: 4027 case X86::BI__builtin_ia32_fpclassps128_mask: 4028 case X86::BI__builtin_ia32_fpclassps256_mask: 4029 case X86::BI__builtin_ia32_fpclassps512_mask: 4030 case X86::BI__builtin_ia32_fpclasspd512_mask: 4031 case X86::BI__builtin_ia32_fpclasssd_mask: 4032 case X86::BI__builtin_ia32_fpclassss_mask: 4033 case X86::BI__builtin_ia32_pslldqi128_byteshift: 4034 case X86::BI__builtin_ia32_pslldqi256_byteshift: 4035 case X86::BI__builtin_ia32_pslldqi512_byteshift: 4036 case X86::BI__builtin_ia32_psrldqi128_byteshift: 4037 case X86::BI__builtin_ia32_psrldqi256_byteshift: 4038 case X86::BI__builtin_ia32_psrldqi512_byteshift: 4039 case X86::BI__builtin_ia32_kshiftliqi: 4040 case X86::BI__builtin_ia32_kshiftlihi: 4041 case X86::BI__builtin_ia32_kshiftlisi: 4042 case X86::BI__builtin_ia32_kshiftlidi: 4043 case X86::BI__builtin_ia32_kshiftriqi: 4044 case X86::BI__builtin_ia32_kshiftrihi: 4045 case X86::BI__builtin_ia32_kshiftrisi: 4046 case X86::BI__builtin_ia32_kshiftridi: 4047 i = 1; l = 0; u = 255; 4048 break; 4049 case X86::BI__builtin_ia32_vperm2f128_pd256: 4050 case X86::BI__builtin_ia32_vperm2f128_ps256: 4051 case X86::BI__builtin_ia32_vperm2f128_si256: 4052 case X86::BI__builtin_ia32_permti256: 4053 case X86::BI__builtin_ia32_pblendw128: 4054 case X86::BI__builtin_ia32_pblendw256: 4055 case X86::BI__builtin_ia32_blendps256: 4056 case X86::BI__builtin_ia32_pblendd256: 4057 case X86::BI__builtin_ia32_palignr128: 4058 case X86::BI__builtin_ia32_palignr256: 4059 case X86::BI__builtin_ia32_palignr512: 4060 case X86::BI__builtin_ia32_alignq512: 4061 case X86::BI__builtin_ia32_alignd512: 4062 case X86::BI__builtin_ia32_alignd128: 4063 case X86::BI__builtin_ia32_alignd256: 4064 case X86::BI__builtin_ia32_alignq128: 4065 case X86::BI__builtin_ia32_alignq256: 4066 case X86::BI__builtin_ia32_vcomisd: 4067 case X86::BI__builtin_ia32_vcomiss: 4068 case X86::BI__builtin_ia32_shuf_f32x4: 4069 case X86::BI__builtin_ia32_shuf_f64x2: 4070 case X86::BI__builtin_ia32_shuf_i32x4: 4071 case X86::BI__builtin_ia32_shuf_i64x2: 4072 case X86::BI__builtin_ia32_shufpd512: 4073 case X86::BI__builtin_ia32_shufps: 4074 case X86::BI__builtin_ia32_shufps256: 4075 case X86::BI__builtin_ia32_shufps512: 4076 case X86::BI__builtin_ia32_dbpsadbw128: 4077 case X86::BI__builtin_ia32_dbpsadbw256: 4078 case X86::BI__builtin_ia32_dbpsadbw512: 4079 case X86::BI__builtin_ia32_vpshldd128: 4080 case X86::BI__builtin_ia32_vpshldd256: 4081 case X86::BI__builtin_ia32_vpshldd512: 4082 case X86::BI__builtin_ia32_vpshldq128: 4083 case X86::BI__builtin_ia32_vpshldq256: 4084 case X86::BI__builtin_ia32_vpshldq512: 4085 case X86::BI__builtin_ia32_vpshldw128: 4086 case X86::BI__builtin_ia32_vpshldw256: 4087 case X86::BI__builtin_ia32_vpshldw512: 4088 case X86::BI__builtin_ia32_vpshrdd128: 4089 case X86::BI__builtin_ia32_vpshrdd256: 4090 case X86::BI__builtin_ia32_vpshrdd512: 4091 case X86::BI__builtin_ia32_vpshrdq128: 4092 case X86::BI__builtin_ia32_vpshrdq256: 4093 case X86::BI__builtin_ia32_vpshrdq512: 4094 case X86::BI__builtin_ia32_vpshrdw128: 4095 case X86::BI__builtin_ia32_vpshrdw256: 4096 case X86::BI__builtin_ia32_vpshrdw512: 4097 i = 2; l = 0; u = 255; 4098 break; 4099 case X86::BI__builtin_ia32_fixupimmpd512_mask: 4100 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 4101 case X86::BI__builtin_ia32_fixupimmps512_mask: 4102 case X86::BI__builtin_ia32_fixupimmps512_maskz: 4103 case X86::BI__builtin_ia32_fixupimmsd_mask: 4104 case X86::BI__builtin_ia32_fixupimmsd_maskz: 4105 case X86::BI__builtin_ia32_fixupimmss_mask: 4106 case X86::BI__builtin_ia32_fixupimmss_maskz: 4107 case X86::BI__builtin_ia32_fixupimmpd128_mask: 4108 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 4109 case X86::BI__builtin_ia32_fixupimmpd256_mask: 4110 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 4111 case X86::BI__builtin_ia32_fixupimmps128_mask: 4112 case X86::BI__builtin_ia32_fixupimmps128_maskz: 4113 case X86::BI__builtin_ia32_fixupimmps256_mask: 4114 case X86::BI__builtin_ia32_fixupimmps256_maskz: 4115 case X86::BI__builtin_ia32_pternlogd512_mask: 4116 case X86::BI__builtin_ia32_pternlogd512_maskz: 4117 case X86::BI__builtin_ia32_pternlogq512_mask: 4118 case X86::BI__builtin_ia32_pternlogq512_maskz: 4119 case X86::BI__builtin_ia32_pternlogd128_mask: 4120 case X86::BI__builtin_ia32_pternlogd128_maskz: 4121 case X86::BI__builtin_ia32_pternlogd256_mask: 4122 case X86::BI__builtin_ia32_pternlogd256_maskz: 4123 case X86::BI__builtin_ia32_pternlogq128_mask: 4124 case X86::BI__builtin_ia32_pternlogq128_maskz: 4125 case X86::BI__builtin_ia32_pternlogq256_mask: 4126 case X86::BI__builtin_ia32_pternlogq256_maskz: 4127 i = 3; l = 0; u = 255; 4128 break; 4129 case X86::BI__builtin_ia32_gatherpfdpd: 4130 case X86::BI__builtin_ia32_gatherpfdps: 4131 case X86::BI__builtin_ia32_gatherpfqpd: 4132 case X86::BI__builtin_ia32_gatherpfqps: 4133 case X86::BI__builtin_ia32_scatterpfdpd: 4134 case X86::BI__builtin_ia32_scatterpfdps: 4135 case X86::BI__builtin_ia32_scatterpfqpd: 4136 case X86::BI__builtin_ia32_scatterpfqps: 4137 i = 4; l = 2; u = 3; 4138 break; 4139 case X86::BI__builtin_ia32_reducesd_mask: 4140 case X86::BI__builtin_ia32_reducess_mask: 4141 case X86::BI__builtin_ia32_rndscalesd_round_mask: 4142 case X86::BI__builtin_ia32_rndscaless_round_mask: 4143 i = 4; l = 0; u = 255; 4144 break; 4145 } 4146 4147 // Note that we don't force a hard error on the range check here, allowing 4148 // template-generated or macro-generated dead code to potentially have out-of- 4149 // range values. These need to code generate, but don't need to necessarily 4150 // make any sense. We use a warning that defaults to an error. 4151 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 4152 } 4153 4154 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 4155 /// parameter with the FormatAttr's correct format_idx and firstDataArg. 4156 /// Returns true when the format fits the function and the FormatStringInfo has 4157 /// been populated. 4158 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 4159 FormatStringInfo *FSI) { 4160 FSI->HasVAListArg = Format->getFirstArg() == 0; 4161 FSI->FormatIdx = Format->getFormatIdx() - 1; 4162 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1; 4163 4164 // The way the format attribute works in GCC, the implicit this argument 4165 // of member functions is counted. However, it doesn't appear in our own 4166 // lists, so decrement format_idx in that case. 4167 if (IsCXXMember) { 4168 if(FSI->FormatIdx == 0) 4169 return false; 4170 --FSI->FormatIdx; 4171 if (FSI->FirstDataArg != 0) 4172 --FSI->FirstDataArg; 4173 } 4174 return true; 4175 } 4176 4177 /// Checks if a the given expression evaluates to null. 4178 /// 4179 /// Returns true if the value evaluates to null. 4180 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 4181 // If the expression has non-null type, it doesn't evaluate to null. 4182 if (auto nullability 4183 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { 4184 if (*nullability == NullabilityKind::NonNull) 4185 return false; 4186 } 4187 4188 // As a special case, transparent unions initialized with zero are 4189 // considered null for the purposes of the nonnull attribute. 4190 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 4191 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 4192 if (const CompoundLiteralExpr *CLE = 4193 dyn_cast<CompoundLiteralExpr>(Expr)) 4194 if (const InitListExpr *ILE = 4195 dyn_cast<InitListExpr>(CLE->getInitializer())) 4196 Expr = ILE->getInit(0); 4197 } 4198 4199 bool Result; 4200 return (!Expr->isValueDependent() && 4201 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 4202 !Result); 4203 } 4204 4205 static void CheckNonNullArgument(Sema &S, 4206 const Expr *ArgExpr, 4207 SourceLocation CallSiteLoc) { 4208 if (CheckNonNullExpr(S, ArgExpr)) 4209 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 4210 S.PDiag(diag::warn_null_arg) 4211 << ArgExpr->getSourceRange()); 4212 } 4213 4214 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 4215 FormatStringInfo FSI; 4216 if ((GetFormatStringType(Format) == FST_NSString) && 4217 getFormatStringInfo(Format, false, &FSI)) { 4218 Idx = FSI.FormatIdx; 4219 return true; 4220 } 4221 return false; 4222 } 4223 4224 /// Diagnose use of %s directive in an NSString which is being passed 4225 /// as formatting string to formatting method. 4226 static void 4227 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 4228 const NamedDecl *FDecl, 4229 Expr **Args, 4230 unsigned NumArgs) { 4231 unsigned Idx = 0; 4232 bool Format = false; 4233 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 4234 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 4235 Idx = 2; 4236 Format = true; 4237 } 4238 else 4239 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 4240 if (S.GetFormatNSStringIdx(I, Idx)) { 4241 Format = true; 4242 break; 4243 } 4244 } 4245 if (!Format || NumArgs <= Idx) 4246 return; 4247 const Expr *FormatExpr = Args[Idx]; 4248 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 4249 FormatExpr = CSCE->getSubExpr(); 4250 const StringLiteral *FormatString; 4251 if (const ObjCStringLiteral *OSL = 4252 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 4253 FormatString = OSL->getString(); 4254 else 4255 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 4256 if (!FormatString) 4257 return; 4258 if (S.FormatStringHasSArg(FormatString)) { 4259 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 4260 << "%s" << 1 << 1; 4261 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 4262 << FDecl->getDeclName(); 4263 } 4264 } 4265 4266 /// Determine whether the given type has a non-null nullability annotation. 4267 static bool isNonNullType(ASTContext &ctx, QualType type) { 4268 if (auto nullability = type->getNullability(ctx)) 4269 return *nullability == NullabilityKind::NonNull; 4270 4271 return false; 4272 } 4273 4274 static void CheckNonNullArguments(Sema &S, 4275 const NamedDecl *FDecl, 4276 const FunctionProtoType *Proto, 4277 ArrayRef<const Expr *> Args, 4278 SourceLocation CallSiteLoc) { 4279 assert((FDecl || Proto) && "Need a function declaration or prototype"); 4280 4281 // Already checked by by constant evaluator. 4282 if (S.isConstantEvaluated()) 4283 return; 4284 // Check the attributes attached to the method/function itself. 4285 llvm::SmallBitVector NonNullArgs; 4286 if (FDecl) { 4287 // Handle the nonnull attribute on the function/method declaration itself. 4288 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 4289 if (!NonNull->args_size()) { 4290 // Easy case: all pointer arguments are nonnull. 4291 for (const auto *Arg : Args) 4292 if (S.isValidPointerAttrType(Arg->getType())) 4293 CheckNonNullArgument(S, Arg, CallSiteLoc); 4294 return; 4295 } 4296 4297 for (const ParamIdx &Idx : NonNull->args()) { 4298 unsigned IdxAST = Idx.getASTIndex(); 4299 if (IdxAST >= Args.size()) 4300 continue; 4301 if (NonNullArgs.empty()) 4302 NonNullArgs.resize(Args.size()); 4303 NonNullArgs.set(IdxAST); 4304 } 4305 } 4306 } 4307 4308 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 4309 // Handle the nonnull attribute on the parameters of the 4310 // function/method. 4311 ArrayRef<ParmVarDecl*> parms; 4312 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 4313 parms = FD->parameters(); 4314 else 4315 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 4316 4317 unsigned ParamIndex = 0; 4318 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 4319 I != E; ++I, ++ParamIndex) { 4320 const ParmVarDecl *PVD = *I; 4321 if (PVD->hasAttr<NonNullAttr>() || 4322 isNonNullType(S.Context, PVD->getType())) { 4323 if (NonNullArgs.empty()) 4324 NonNullArgs.resize(Args.size()); 4325 4326 NonNullArgs.set(ParamIndex); 4327 } 4328 } 4329 } else { 4330 // If we have a non-function, non-method declaration but no 4331 // function prototype, try to dig out the function prototype. 4332 if (!Proto) { 4333 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 4334 QualType type = VD->getType().getNonReferenceType(); 4335 if (auto pointerType = type->getAs<PointerType>()) 4336 type = pointerType->getPointeeType(); 4337 else if (auto blockType = type->getAs<BlockPointerType>()) 4338 type = blockType->getPointeeType(); 4339 // FIXME: data member pointers? 4340 4341 // Dig out the function prototype, if there is one. 4342 Proto = type->getAs<FunctionProtoType>(); 4343 } 4344 } 4345 4346 // Fill in non-null argument information from the nullability 4347 // information on the parameter types (if we have them). 4348 if (Proto) { 4349 unsigned Index = 0; 4350 for (auto paramType : Proto->getParamTypes()) { 4351 if (isNonNullType(S.Context, paramType)) { 4352 if (NonNullArgs.empty()) 4353 NonNullArgs.resize(Args.size()); 4354 4355 NonNullArgs.set(Index); 4356 } 4357 4358 ++Index; 4359 } 4360 } 4361 } 4362 4363 // Check for non-null arguments. 4364 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 4365 ArgIndex != ArgIndexEnd; ++ArgIndex) { 4366 if (NonNullArgs[ArgIndex]) 4367 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc); 4368 } 4369 } 4370 4371 /// Handles the checks for format strings, non-POD arguments to vararg 4372 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if 4373 /// attributes. 4374 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 4375 const Expr *ThisArg, ArrayRef<const Expr *> Args, 4376 bool IsMemberFunction, SourceLocation Loc, 4377 SourceRange Range, VariadicCallType CallType) { 4378 // FIXME: We should check as much as we can in the template definition. 4379 if (CurContext->isDependentContext()) 4380 return; 4381 4382 // Printf and scanf checking. 4383 llvm::SmallBitVector CheckedVarArgs; 4384 if (FDecl) { 4385 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 4386 // Only create vector if there are format attributes. 4387 CheckedVarArgs.resize(Args.size()); 4388 4389 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 4390 CheckedVarArgs); 4391 } 4392 } 4393 4394 // Refuse POD arguments that weren't caught by the format string 4395 // checks above. 4396 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 4397 if (CallType != VariadicDoesNotApply && 4398 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 4399 unsigned NumParams = Proto ? Proto->getNumParams() 4400 : FDecl && isa<FunctionDecl>(FDecl) 4401 ? cast<FunctionDecl>(FDecl)->getNumParams() 4402 : FDecl && isa<ObjCMethodDecl>(FDecl) 4403 ? cast<ObjCMethodDecl>(FDecl)->param_size() 4404 : 0; 4405 4406 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 4407 // Args[ArgIdx] can be null in malformed code. 4408 if (const Expr *Arg = Args[ArgIdx]) { 4409 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 4410 checkVariadicArgument(Arg, CallType); 4411 } 4412 } 4413 } 4414 4415 if (FDecl || Proto) { 4416 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 4417 4418 // Type safety checking. 4419 if (FDecl) { 4420 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 4421 CheckArgumentWithTypeTag(I, Args, Loc); 4422 } 4423 } 4424 4425 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) { 4426 auto *AA = FDecl->getAttr<AllocAlignAttr>(); 4427 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()]; 4428 if (!Arg->isValueDependent()) { 4429 Expr::EvalResult Align; 4430 if (Arg->EvaluateAsInt(Align, Context)) { 4431 const llvm::APSInt &I = Align.Val.getInt(); 4432 if (!I.isPowerOf2()) 4433 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two) 4434 << Arg->getSourceRange(); 4435 4436 if (I > Sema::MaximumAlignment) 4437 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great) 4438 << Arg->getSourceRange() << Sema::MaximumAlignment; 4439 } 4440 } 4441 } 4442 4443 if (FD) 4444 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 4445 } 4446 4447 /// CheckConstructorCall - Check a constructor call for correctness and safety 4448 /// properties not enforced by the C type system. 4449 void Sema::CheckConstructorCall(FunctionDecl *FDecl, 4450 ArrayRef<const Expr *> Args, 4451 const FunctionProtoType *Proto, 4452 SourceLocation Loc) { 4453 VariadicCallType CallType = 4454 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 4455 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 4456 Loc, SourceRange(), CallType); 4457 } 4458 4459 /// CheckFunctionCall - Check a direct function call for various correctness 4460 /// and safety properties not strictly enforced by the C type system. 4461 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 4462 const FunctionProtoType *Proto) { 4463 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 4464 isa<CXXMethodDecl>(FDecl); 4465 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 4466 IsMemberOperatorCall; 4467 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 4468 TheCall->getCallee()); 4469 Expr** Args = TheCall->getArgs(); 4470 unsigned NumArgs = TheCall->getNumArgs(); 4471 4472 Expr *ImplicitThis = nullptr; 4473 if (IsMemberOperatorCall) { 4474 // If this is a call to a member operator, hide the first argument 4475 // from checkCall. 4476 // FIXME: Our choice of AST representation here is less than ideal. 4477 ImplicitThis = Args[0]; 4478 ++Args; 4479 --NumArgs; 4480 } else if (IsMemberFunction) 4481 ImplicitThis = 4482 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 4483 4484 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs), 4485 IsMemberFunction, TheCall->getRParenLoc(), 4486 TheCall->getCallee()->getSourceRange(), CallType); 4487 4488 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 4489 // None of the checks below are needed for functions that don't have 4490 // simple names (e.g., C++ conversion functions). 4491 if (!FnInfo) 4492 return false; 4493 4494 CheckAbsoluteValueFunction(TheCall, FDecl); 4495 CheckMaxUnsignedZero(TheCall, FDecl); 4496 4497 if (getLangOpts().ObjC) 4498 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 4499 4500 unsigned CMId = FDecl->getMemoryFunctionKind(); 4501 if (CMId == 0) 4502 return false; 4503 4504 // Handle memory setting and copying functions. 4505 if (CMId == Builtin::BIstrlcpy || CMId == Builtin::BIstrlcat) 4506 CheckStrlcpycatArguments(TheCall, FnInfo); 4507 else if (CMId == Builtin::BIstrncat) 4508 CheckStrncatArguments(TheCall, FnInfo); 4509 else 4510 CheckMemaccessArguments(TheCall, CMId, FnInfo); 4511 4512 return false; 4513 } 4514 4515 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 4516 ArrayRef<const Expr *> Args) { 4517 VariadicCallType CallType = 4518 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 4519 4520 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 4521 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 4522 CallType); 4523 4524 return false; 4525 } 4526 4527 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 4528 const FunctionProtoType *Proto) { 4529 QualType Ty; 4530 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 4531 Ty = V->getType().getNonReferenceType(); 4532 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 4533 Ty = F->getType().getNonReferenceType(); 4534 else 4535 return false; 4536 4537 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 4538 !Ty->isFunctionProtoType()) 4539 return false; 4540 4541 VariadicCallType CallType; 4542 if (!Proto || !Proto->isVariadic()) { 4543 CallType = VariadicDoesNotApply; 4544 } else if (Ty->isBlockPointerType()) { 4545 CallType = VariadicBlock; 4546 } else { // Ty->isFunctionPointerType() 4547 CallType = VariadicFunction; 4548 } 4549 4550 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 4551 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 4552 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 4553 TheCall->getCallee()->getSourceRange(), CallType); 4554 4555 return false; 4556 } 4557 4558 /// Checks function calls when a FunctionDecl or a NamedDecl is not available, 4559 /// such as function pointers returned from functions. 4560 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 4561 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 4562 TheCall->getCallee()); 4563 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 4564 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 4565 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 4566 TheCall->getCallee()->getSourceRange(), CallType); 4567 4568 return false; 4569 } 4570 4571 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 4572 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 4573 return false; 4574 4575 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 4576 switch (Op) { 4577 case AtomicExpr::AO__c11_atomic_init: 4578 case AtomicExpr::AO__opencl_atomic_init: 4579 llvm_unreachable("There is no ordering argument for an init"); 4580 4581 case AtomicExpr::AO__c11_atomic_load: 4582 case AtomicExpr::AO__opencl_atomic_load: 4583 case AtomicExpr::AO__atomic_load_n: 4584 case AtomicExpr::AO__atomic_load: 4585 return OrderingCABI != llvm::AtomicOrderingCABI::release && 4586 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 4587 4588 case AtomicExpr::AO__c11_atomic_store: 4589 case AtomicExpr::AO__opencl_atomic_store: 4590 case AtomicExpr::AO__atomic_store: 4591 case AtomicExpr::AO__atomic_store_n: 4592 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 4593 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 4594 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 4595 4596 default: 4597 return true; 4598 } 4599 } 4600 4601 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 4602 AtomicExpr::AtomicOp Op) { 4603 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 4604 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 4605 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()}; 4606 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()}, 4607 DRE->getSourceRange(), TheCall->getRParenLoc(), Args, 4608 Op); 4609 } 4610 4611 ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, 4612 SourceLocation RParenLoc, MultiExprArg Args, 4613 AtomicExpr::AtomicOp Op, 4614 AtomicArgumentOrder ArgOrder) { 4615 // All the non-OpenCL operations take one of the following forms. 4616 // The OpenCL operations take the __c11 forms with one extra argument for 4617 // synchronization scope. 4618 enum { 4619 // C __c11_atomic_init(A *, C) 4620 Init, 4621 4622 // C __c11_atomic_load(A *, int) 4623 Load, 4624 4625 // void __atomic_load(A *, CP, int) 4626 LoadCopy, 4627 4628 // void __atomic_store(A *, CP, int) 4629 Copy, 4630 4631 // C __c11_atomic_add(A *, M, int) 4632 Arithmetic, 4633 4634 // C __atomic_exchange_n(A *, CP, int) 4635 Xchg, 4636 4637 // void __atomic_exchange(A *, C *, CP, int) 4638 GNUXchg, 4639 4640 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 4641 C11CmpXchg, 4642 4643 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 4644 GNUCmpXchg 4645 } Form = Init; 4646 4647 const unsigned NumForm = GNUCmpXchg + 1; 4648 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 4649 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 4650 // where: 4651 // C is an appropriate type, 4652 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 4653 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 4654 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 4655 // the int parameters are for orderings. 4656 4657 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 4658 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 4659 "need to update code for modified forms"); 4660 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 4661 AtomicExpr::AO__c11_atomic_fetch_min + 1 == 4662 AtomicExpr::AO__atomic_load, 4663 "need to update code for modified C11 atomics"); 4664 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 4665 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 4666 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 4667 Op <= AtomicExpr::AO__c11_atomic_fetch_min) || 4668 IsOpenCL; 4669 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 4670 Op == AtomicExpr::AO__atomic_store_n || 4671 Op == AtomicExpr::AO__atomic_exchange_n || 4672 Op == AtomicExpr::AO__atomic_compare_exchange_n; 4673 bool IsAddSub = false; 4674 4675 switch (Op) { 4676 case AtomicExpr::AO__c11_atomic_init: 4677 case AtomicExpr::AO__opencl_atomic_init: 4678 Form = Init; 4679 break; 4680 4681 case AtomicExpr::AO__c11_atomic_load: 4682 case AtomicExpr::AO__opencl_atomic_load: 4683 case AtomicExpr::AO__atomic_load_n: 4684 Form = Load; 4685 break; 4686 4687 case AtomicExpr::AO__atomic_load: 4688 Form = LoadCopy; 4689 break; 4690 4691 case AtomicExpr::AO__c11_atomic_store: 4692 case AtomicExpr::AO__opencl_atomic_store: 4693 case AtomicExpr::AO__atomic_store: 4694 case AtomicExpr::AO__atomic_store_n: 4695 Form = Copy; 4696 break; 4697 4698 case AtomicExpr::AO__c11_atomic_fetch_add: 4699 case AtomicExpr::AO__c11_atomic_fetch_sub: 4700 case AtomicExpr::AO__opencl_atomic_fetch_add: 4701 case AtomicExpr::AO__opencl_atomic_fetch_sub: 4702 case AtomicExpr::AO__atomic_fetch_add: 4703 case AtomicExpr::AO__atomic_fetch_sub: 4704 case AtomicExpr::AO__atomic_add_fetch: 4705 case AtomicExpr::AO__atomic_sub_fetch: 4706 IsAddSub = true; 4707 LLVM_FALLTHROUGH; 4708 case AtomicExpr::AO__c11_atomic_fetch_and: 4709 case AtomicExpr::AO__c11_atomic_fetch_or: 4710 case AtomicExpr::AO__c11_atomic_fetch_xor: 4711 case AtomicExpr::AO__opencl_atomic_fetch_and: 4712 case AtomicExpr::AO__opencl_atomic_fetch_or: 4713 case AtomicExpr::AO__opencl_atomic_fetch_xor: 4714 case AtomicExpr::AO__atomic_fetch_and: 4715 case AtomicExpr::AO__atomic_fetch_or: 4716 case AtomicExpr::AO__atomic_fetch_xor: 4717 case AtomicExpr::AO__atomic_fetch_nand: 4718 case AtomicExpr::AO__atomic_and_fetch: 4719 case AtomicExpr::AO__atomic_or_fetch: 4720 case AtomicExpr::AO__atomic_xor_fetch: 4721 case AtomicExpr::AO__atomic_nand_fetch: 4722 case AtomicExpr::AO__c11_atomic_fetch_min: 4723 case AtomicExpr::AO__c11_atomic_fetch_max: 4724 case AtomicExpr::AO__opencl_atomic_fetch_min: 4725 case AtomicExpr::AO__opencl_atomic_fetch_max: 4726 case AtomicExpr::AO__atomic_min_fetch: 4727 case AtomicExpr::AO__atomic_max_fetch: 4728 case AtomicExpr::AO__atomic_fetch_min: 4729 case AtomicExpr::AO__atomic_fetch_max: 4730 Form = Arithmetic; 4731 break; 4732 4733 case AtomicExpr::AO__c11_atomic_exchange: 4734 case AtomicExpr::AO__opencl_atomic_exchange: 4735 case AtomicExpr::AO__atomic_exchange_n: 4736 Form = Xchg; 4737 break; 4738 4739 case AtomicExpr::AO__atomic_exchange: 4740 Form = GNUXchg; 4741 break; 4742 4743 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 4744 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 4745 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 4746 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 4747 Form = C11CmpXchg; 4748 break; 4749 4750 case AtomicExpr::AO__atomic_compare_exchange: 4751 case AtomicExpr::AO__atomic_compare_exchange_n: 4752 Form = GNUCmpXchg; 4753 break; 4754 } 4755 4756 unsigned AdjustedNumArgs = NumArgs[Form]; 4757 if (IsOpenCL && Op != AtomicExpr::AO__opencl_atomic_init) 4758 ++AdjustedNumArgs; 4759 // Check we have the right number of arguments. 4760 if (Args.size() < AdjustedNumArgs) { 4761 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args) 4762 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 4763 << ExprRange; 4764 return ExprError(); 4765 } else if (Args.size() > AdjustedNumArgs) { 4766 Diag(Args[AdjustedNumArgs]->getBeginLoc(), 4767 diag::err_typecheck_call_too_many_args) 4768 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 4769 << ExprRange; 4770 return ExprError(); 4771 } 4772 4773 // Inspect the first argument of the atomic operation. 4774 Expr *Ptr = Args[0]; 4775 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 4776 if (ConvertedPtr.isInvalid()) 4777 return ExprError(); 4778 4779 Ptr = ConvertedPtr.get(); 4780 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 4781 if (!pointerType) { 4782 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer) 4783 << Ptr->getType() << Ptr->getSourceRange(); 4784 return ExprError(); 4785 } 4786 4787 // For a __c11 builtin, this should be a pointer to an _Atomic type. 4788 QualType AtomTy = pointerType->getPointeeType(); // 'A' 4789 QualType ValType = AtomTy; // 'C' 4790 if (IsC11) { 4791 if (!AtomTy->isAtomicType()) { 4792 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic) 4793 << Ptr->getType() << Ptr->getSourceRange(); 4794 return ExprError(); 4795 } 4796 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 4797 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 4798 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic) 4799 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 4800 << Ptr->getSourceRange(); 4801 return ExprError(); 4802 } 4803 ValType = AtomTy->castAs<AtomicType>()->getValueType(); 4804 } else if (Form != Load && Form != LoadCopy) { 4805 if (ValType.isConstQualified()) { 4806 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer) 4807 << Ptr->getType() << Ptr->getSourceRange(); 4808 return ExprError(); 4809 } 4810 } 4811 4812 // For an arithmetic operation, the implied arithmetic must be well-formed. 4813 if (Form == Arithmetic) { 4814 // gcc does not enforce these rules for GNU atomics, but we do so for sanity. 4815 if (IsAddSub && !ValType->isIntegerType() 4816 && !ValType->isPointerType()) { 4817 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 4818 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4819 return ExprError(); 4820 } 4821 if (!IsAddSub && !ValType->isIntegerType()) { 4822 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int) 4823 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4824 return ExprError(); 4825 } 4826 if (IsC11 && ValType->isPointerType() && 4827 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 4828 diag::err_incomplete_type)) { 4829 return ExprError(); 4830 } 4831 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 4832 // For __atomic_*_n operations, the value type must be a scalar integral or 4833 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 4834 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 4835 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 4836 return ExprError(); 4837 } 4838 4839 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 4840 !AtomTy->isScalarType()) { 4841 // For GNU atomics, require a trivially-copyable type. This is not part of 4842 // the GNU atomics specification, but we enforce it for sanity. 4843 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy) 4844 << Ptr->getType() << Ptr->getSourceRange(); 4845 return ExprError(); 4846 } 4847 4848 switch (ValType.getObjCLifetime()) { 4849 case Qualifiers::OCL_None: 4850 case Qualifiers::OCL_ExplicitNone: 4851 // okay 4852 break; 4853 4854 case Qualifiers::OCL_Weak: 4855 case Qualifiers::OCL_Strong: 4856 case Qualifiers::OCL_Autoreleasing: 4857 // FIXME: Can this happen? By this point, ValType should be known 4858 // to be trivially copyable. 4859 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership) 4860 << ValType << Ptr->getSourceRange(); 4861 return ExprError(); 4862 } 4863 4864 // All atomic operations have an overload which takes a pointer to a volatile 4865 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 4866 // into the result or the other operands. Similarly atomic_load takes a 4867 // pointer to a const 'A'. 4868 ValType.removeLocalVolatile(); 4869 ValType.removeLocalConst(); 4870 QualType ResultType = ValType; 4871 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 4872 Form == Init) 4873 ResultType = Context.VoidTy; 4874 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 4875 ResultType = Context.BoolTy; 4876 4877 // The type of a parameter passed 'by value'. In the GNU atomics, such 4878 // arguments are actually passed as pointers. 4879 QualType ByValType = ValType; // 'CP' 4880 bool IsPassedByAddress = false; 4881 if (!IsC11 && !IsN) { 4882 ByValType = Ptr->getType(); 4883 IsPassedByAddress = true; 4884 } 4885 4886 SmallVector<Expr *, 5> APIOrderedArgs; 4887 if (ArgOrder == Sema::AtomicArgumentOrder::AST) { 4888 APIOrderedArgs.push_back(Args[0]); 4889 switch (Form) { 4890 case Init: 4891 case Load: 4892 APIOrderedArgs.push_back(Args[1]); // Val1/Order 4893 break; 4894 case LoadCopy: 4895 case Copy: 4896 case Arithmetic: 4897 case Xchg: 4898 APIOrderedArgs.push_back(Args[2]); // Val1 4899 APIOrderedArgs.push_back(Args[1]); // Order 4900 break; 4901 case GNUXchg: 4902 APIOrderedArgs.push_back(Args[2]); // Val1 4903 APIOrderedArgs.push_back(Args[3]); // Val2 4904 APIOrderedArgs.push_back(Args[1]); // Order 4905 break; 4906 case C11CmpXchg: 4907 APIOrderedArgs.push_back(Args[2]); // Val1 4908 APIOrderedArgs.push_back(Args[4]); // Val2 4909 APIOrderedArgs.push_back(Args[1]); // Order 4910 APIOrderedArgs.push_back(Args[3]); // OrderFail 4911 break; 4912 case GNUCmpXchg: 4913 APIOrderedArgs.push_back(Args[2]); // Val1 4914 APIOrderedArgs.push_back(Args[4]); // Val2 4915 APIOrderedArgs.push_back(Args[5]); // Weak 4916 APIOrderedArgs.push_back(Args[1]); // Order 4917 APIOrderedArgs.push_back(Args[3]); // OrderFail 4918 break; 4919 } 4920 } else 4921 APIOrderedArgs.append(Args.begin(), Args.end()); 4922 4923 // The first argument's non-CV pointer type is used to deduce the type of 4924 // subsequent arguments, except for: 4925 // - weak flag (always converted to bool) 4926 // - memory order (always converted to int) 4927 // - scope (always converted to int) 4928 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) { 4929 QualType Ty; 4930 if (i < NumVals[Form] + 1) { 4931 switch (i) { 4932 case 0: 4933 // The first argument is always a pointer. It has a fixed type. 4934 // It is always dereferenced, a nullptr is undefined. 4935 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 4936 // Nothing else to do: we already know all we want about this pointer. 4937 continue; 4938 case 1: 4939 // The second argument is the non-atomic operand. For arithmetic, this 4940 // is always passed by value, and for a compare_exchange it is always 4941 // passed by address. For the rest, GNU uses by-address and C11 uses 4942 // by-value. 4943 assert(Form != Load); 4944 if (Form == Init || (Form == Arithmetic && ValType->isIntegerType())) 4945 Ty = ValType; 4946 else if (Form == Copy || Form == Xchg) { 4947 if (IsPassedByAddress) { 4948 // The value pointer is always dereferenced, a nullptr is undefined. 4949 CheckNonNullArgument(*this, APIOrderedArgs[i], 4950 ExprRange.getBegin()); 4951 } 4952 Ty = ByValType; 4953 } else if (Form == Arithmetic) 4954 Ty = Context.getPointerDiffType(); 4955 else { 4956 Expr *ValArg = APIOrderedArgs[i]; 4957 // The value pointer is always dereferenced, a nullptr is undefined. 4958 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin()); 4959 LangAS AS = LangAS::Default; 4960 // Keep address space of non-atomic pointer type. 4961 if (const PointerType *PtrTy = 4962 ValArg->getType()->getAs<PointerType>()) { 4963 AS = PtrTy->getPointeeType().getAddressSpace(); 4964 } 4965 Ty = Context.getPointerType( 4966 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 4967 } 4968 break; 4969 case 2: 4970 // The third argument to compare_exchange / GNU exchange is the desired 4971 // value, either by-value (for the C11 and *_n variant) or as a pointer. 4972 if (IsPassedByAddress) 4973 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 4974 Ty = ByValType; 4975 break; 4976 case 3: 4977 // The fourth argument to GNU compare_exchange is a 'weak' flag. 4978 Ty = Context.BoolTy; 4979 break; 4980 } 4981 } else { 4982 // The order(s) and scope are always converted to int. 4983 Ty = Context.IntTy; 4984 } 4985 4986 InitializedEntity Entity = 4987 InitializedEntity::InitializeParameter(Context, Ty, false); 4988 ExprResult Arg = APIOrderedArgs[i]; 4989 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 4990 if (Arg.isInvalid()) 4991 return true; 4992 APIOrderedArgs[i] = Arg.get(); 4993 } 4994 4995 // Permute the arguments into a 'consistent' order. 4996 SmallVector<Expr*, 5> SubExprs; 4997 SubExprs.push_back(Ptr); 4998 switch (Form) { 4999 case Init: 5000 // Note, AtomicExpr::getVal1() has a special case for this atomic. 5001 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5002 break; 5003 case Load: 5004 SubExprs.push_back(APIOrderedArgs[1]); // Order 5005 break; 5006 case LoadCopy: 5007 case Copy: 5008 case Arithmetic: 5009 case Xchg: 5010 SubExprs.push_back(APIOrderedArgs[2]); // Order 5011 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5012 break; 5013 case GNUXchg: 5014 // Note, AtomicExpr::getVal2() has a special case for this atomic. 5015 SubExprs.push_back(APIOrderedArgs[3]); // Order 5016 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5017 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5018 break; 5019 case C11CmpXchg: 5020 SubExprs.push_back(APIOrderedArgs[3]); // Order 5021 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5022 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail 5023 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5024 break; 5025 case GNUCmpXchg: 5026 SubExprs.push_back(APIOrderedArgs[4]); // Order 5027 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5028 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail 5029 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5030 SubExprs.push_back(APIOrderedArgs[3]); // Weak 5031 break; 5032 } 5033 5034 if (SubExprs.size() >= 2 && Form != Init) { 5035 if (Optional<llvm::APSInt> Result = 5036 SubExprs[1]->getIntegerConstantExpr(Context)) 5037 if (!isValidOrderingForOp(Result->getSExtValue(), Op)) 5038 Diag(SubExprs[1]->getBeginLoc(), 5039 diag::warn_atomic_op_has_invalid_memory_order) 5040 << SubExprs[1]->getSourceRange(); 5041 } 5042 5043 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 5044 auto *Scope = Args[Args.size() - 1]; 5045 if (Optional<llvm::APSInt> Result = 5046 Scope->getIntegerConstantExpr(Context)) { 5047 if (!ScopeModel->isValid(Result->getZExtValue())) 5048 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 5049 << Scope->getSourceRange(); 5050 } 5051 SubExprs.push_back(Scope); 5052 } 5053 5054 AtomicExpr *AE = new (Context) 5055 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc); 5056 5057 if ((Op == AtomicExpr::AO__c11_atomic_load || 5058 Op == AtomicExpr::AO__c11_atomic_store || 5059 Op == AtomicExpr::AO__opencl_atomic_load || 5060 Op == AtomicExpr::AO__opencl_atomic_store ) && 5061 Context.AtomicUsesUnsupportedLibcall(AE)) 5062 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 5063 << ((Op == AtomicExpr::AO__c11_atomic_load || 5064 Op == AtomicExpr::AO__opencl_atomic_load) 5065 ? 0 5066 : 1); 5067 5068 if (ValType->isExtIntType()) { 5069 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_ext_int_prohibit); 5070 return ExprError(); 5071 } 5072 5073 return AE; 5074 } 5075 5076 /// checkBuiltinArgument - Given a call to a builtin function, perform 5077 /// normal type-checking on the given argument, updating the call in 5078 /// place. This is useful when a builtin function requires custom 5079 /// type-checking for some of its arguments but not necessarily all of 5080 /// them. 5081 /// 5082 /// Returns true on error. 5083 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 5084 FunctionDecl *Fn = E->getDirectCallee(); 5085 assert(Fn && "builtin call without direct callee!"); 5086 5087 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 5088 InitializedEntity Entity = 5089 InitializedEntity::InitializeParameter(S.Context, Param); 5090 5091 ExprResult Arg = E->getArg(0); 5092 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 5093 if (Arg.isInvalid()) 5094 return true; 5095 5096 E->setArg(ArgIndex, Arg.get()); 5097 return false; 5098 } 5099 5100 /// We have a call to a function like __sync_fetch_and_add, which is an 5101 /// overloaded function based on the pointer type of its first argument. 5102 /// The main BuildCallExpr routines have already promoted the types of 5103 /// arguments because all of these calls are prototyped as void(...). 5104 /// 5105 /// This function goes through and does final semantic checking for these 5106 /// builtins, as well as generating any warnings. 5107 ExprResult 5108 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 5109 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 5110 Expr *Callee = TheCall->getCallee(); 5111 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 5112 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 5113 5114 // Ensure that we have at least one argument to do type inference from. 5115 if (TheCall->getNumArgs() < 1) { 5116 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 5117 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange(); 5118 return ExprError(); 5119 } 5120 5121 // Inspect the first argument of the atomic builtin. This should always be 5122 // a pointer type, whose element is an integral scalar or pointer type. 5123 // Because it is a pointer type, we don't have to worry about any implicit 5124 // casts here. 5125 // FIXME: We don't allow floating point scalars as input. 5126 Expr *FirstArg = TheCall->getArg(0); 5127 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 5128 if (FirstArgResult.isInvalid()) 5129 return ExprError(); 5130 FirstArg = FirstArgResult.get(); 5131 TheCall->setArg(0, FirstArg); 5132 5133 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 5134 if (!pointerType) { 5135 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 5136 << FirstArg->getType() << FirstArg->getSourceRange(); 5137 return ExprError(); 5138 } 5139 5140 QualType ValType = pointerType->getPointeeType(); 5141 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 5142 !ValType->isBlockPointerType()) { 5143 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 5144 << FirstArg->getType() << FirstArg->getSourceRange(); 5145 return ExprError(); 5146 } 5147 5148 if (ValType.isConstQualified()) { 5149 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 5150 << FirstArg->getType() << FirstArg->getSourceRange(); 5151 return ExprError(); 5152 } 5153 5154 switch (ValType.getObjCLifetime()) { 5155 case Qualifiers::OCL_None: 5156 case Qualifiers::OCL_ExplicitNone: 5157 // okay 5158 break; 5159 5160 case Qualifiers::OCL_Weak: 5161 case Qualifiers::OCL_Strong: 5162 case Qualifiers::OCL_Autoreleasing: 5163 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 5164 << ValType << FirstArg->getSourceRange(); 5165 return ExprError(); 5166 } 5167 5168 // Strip any qualifiers off ValType. 5169 ValType = ValType.getUnqualifiedType(); 5170 5171 // The majority of builtins return a value, but a few have special return 5172 // types, so allow them to override appropriately below. 5173 QualType ResultType = ValType; 5174 5175 // We need to figure out which concrete builtin this maps onto. For example, 5176 // __sync_fetch_and_add with a 2 byte object turns into 5177 // __sync_fetch_and_add_2. 5178 #define BUILTIN_ROW(x) \ 5179 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 5180 Builtin::BI##x##_8, Builtin::BI##x##_16 } 5181 5182 static const unsigned BuiltinIndices[][5] = { 5183 BUILTIN_ROW(__sync_fetch_and_add), 5184 BUILTIN_ROW(__sync_fetch_and_sub), 5185 BUILTIN_ROW(__sync_fetch_and_or), 5186 BUILTIN_ROW(__sync_fetch_and_and), 5187 BUILTIN_ROW(__sync_fetch_and_xor), 5188 BUILTIN_ROW(__sync_fetch_and_nand), 5189 5190 BUILTIN_ROW(__sync_add_and_fetch), 5191 BUILTIN_ROW(__sync_sub_and_fetch), 5192 BUILTIN_ROW(__sync_and_and_fetch), 5193 BUILTIN_ROW(__sync_or_and_fetch), 5194 BUILTIN_ROW(__sync_xor_and_fetch), 5195 BUILTIN_ROW(__sync_nand_and_fetch), 5196 5197 BUILTIN_ROW(__sync_val_compare_and_swap), 5198 BUILTIN_ROW(__sync_bool_compare_and_swap), 5199 BUILTIN_ROW(__sync_lock_test_and_set), 5200 BUILTIN_ROW(__sync_lock_release), 5201 BUILTIN_ROW(__sync_swap) 5202 }; 5203 #undef BUILTIN_ROW 5204 5205 // Determine the index of the size. 5206 unsigned SizeIndex; 5207 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 5208 case 1: SizeIndex = 0; break; 5209 case 2: SizeIndex = 1; break; 5210 case 4: SizeIndex = 2; break; 5211 case 8: SizeIndex = 3; break; 5212 case 16: SizeIndex = 4; break; 5213 default: 5214 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 5215 << FirstArg->getType() << FirstArg->getSourceRange(); 5216 return ExprError(); 5217 } 5218 5219 // Each of these builtins has one pointer argument, followed by some number of 5220 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 5221 // that we ignore. Find out which row of BuiltinIndices to read from as well 5222 // as the number of fixed args. 5223 unsigned BuiltinID = FDecl->getBuiltinID(); 5224 unsigned BuiltinIndex, NumFixed = 1; 5225 bool WarnAboutSemanticsChange = false; 5226 switch (BuiltinID) { 5227 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 5228 case Builtin::BI__sync_fetch_and_add: 5229 case Builtin::BI__sync_fetch_and_add_1: 5230 case Builtin::BI__sync_fetch_and_add_2: 5231 case Builtin::BI__sync_fetch_and_add_4: 5232 case Builtin::BI__sync_fetch_and_add_8: 5233 case Builtin::BI__sync_fetch_and_add_16: 5234 BuiltinIndex = 0; 5235 break; 5236 5237 case Builtin::BI__sync_fetch_and_sub: 5238 case Builtin::BI__sync_fetch_and_sub_1: 5239 case Builtin::BI__sync_fetch_and_sub_2: 5240 case Builtin::BI__sync_fetch_and_sub_4: 5241 case Builtin::BI__sync_fetch_and_sub_8: 5242 case Builtin::BI__sync_fetch_and_sub_16: 5243 BuiltinIndex = 1; 5244 break; 5245 5246 case Builtin::BI__sync_fetch_and_or: 5247 case Builtin::BI__sync_fetch_and_or_1: 5248 case Builtin::BI__sync_fetch_and_or_2: 5249 case Builtin::BI__sync_fetch_and_or_4: 5250 case Builtin::BI__sync_fetch_and_or_8: 5251 case Builtin::BI__sync_fetch_and_or_16: 5252 BuiltinIndex = 2; 5253 break; 5254 5255 case Builtin::BI__sync_fetch_and_and: 5256 case Builtin::BI__sync_fetch_and_and_1: 5257 case Builtin::BI__sync_fetch_and_and_2: 5258 case Builtin::BI__sync_fetch_and_and_4: 5259 case Builtin::BI__sync_fetch_and_and_8: 5260 case Builtin::BI__sync_fetch_and_and_16: 5261 BuiltinIndex = 3; 5262 break; 5263 5264 case Builtin::BI__sync_fetch_and_xor: 5265 case Builtin::BI__sync_fetch_and_xor_1: 5266 case Builtin::BI__sync_fetch_and_xor_2: 5267 case Builtin::BI__sync_fetch_and_xor_4: 5268 case Builtin::BI__sync_fetch_and_xor_8: 5269 case Builtin::BI__sync_fetch_and_xor_16: 5270 BuiltinIndex = 4; 5271 break; 5272 5273 case Builtin::BI__sync_fetch_and_nand: 5274 case Builtin::BI__sync_fetch_and_nand_1: 5275 case Builtin::BI__sync_fetch_and_nand_2: 5276 case Builtin::BI__sync_fetch_and_nand_4: 5277 case Builtin::BI__sync_fetch_and_nand_8: 5278 case Builtin::BI__sync_fetch_and_nand_16: 5279 BuiltinIndex = 5; 5280 WarnAboutSemanticsChange = true; 5281 break; 5282 5283 case Builtin::BI__sync_add_and_fetch: 5284 case Builtin::BI__sync_add_and_fetch_1: 5285 case Builtin::BI__sync_add_and_fetch_2: 5286 case Builtin::BI__sync_add_and_fetch_4: 5287 case Builtin::BI__sync_add_and_fetch_8: 5288 case Builtin::BI__sync_add_and_fetch_16: 5289 BuiltinIndex = 6; 5290 break; 5291 5292 case Builtin::BI__sync_sub_and_fetch: 5293 case Builtin::BI__sync_sub_and_fetch_1: 5294 case Builtin::BI__sync_sub_and_fetch_2: 5295 case Builtin::BI__sync_sub_and_fetch_4: 5296 case Builtin::BI__sync_sub_and_fetch_8: 5297 case Builtin::BI__sync_sub_and_fetch_16: 5298 BuiltinIndex = 7; 5299 break; 5300 5301 case Builtin::BI__sync_and_and_fetch: 5302 case Builtin::BI__sync_and_and_fetch_1: 5303 case Builtin::BI__sync_and_and_fetch_2: 5304 case Builtin::BI__sync_and_and_fetch_4: 5305 case Builtin::BI__sync_and_and_fetch_8: 5306 case Builtin::BI__sync_and_and_fetch_16: 5307 BuiltinIndex = 8; 5308 break; 5309 5310 case Builtin::BI__sync_or_and_fetch: 5311 case Builtin::BI__sync_or_and_fetch_1: 5312 case Builtin::BI__sync_or_and_fetch_2: 5313 case Builtin::BI__sync_or_and_fetch_4: 5314 case Builtin::BI__sync_or_and_fetch_8: 5315 case Builtin::BI__sync_or_and_fetch_16: 5316 BuiltinIndex = 9; 5317 break; 5318 5319 case Builtin::BI__sync_xor_and_fetch: 5320 case Builtin::BI__sync_xor_and_fetch_1: 5321 case Builtin::BI__sync_xor_and_fetch_2: 5322 case Builtin::BI__sync_xor_and_fetch_4: 5323 case Builtin::BI__sync_xor_and_fetch_8: 5324 case Builtin::BI__sync_xor_and_fetch_16: 5325 BuiltinIndex = 10; 5326 break; 5327 5328 case Builtin::BI__sync_nand_and_fetch: 5329 case Builtin::BI__sync_nand_and_fetch_1: 5330 case Builtin::BI__sync_nand_and_fetch_2: 5331 case Builtin::BI__sync_nand_and_fetch_4: 5332 case Builtin::BI__sync_nand_and_fetch_8: 5333 case Builtin::BI__sync_nand_and_fetch_16: 5334 BuiltinIndex = 11; 5335 WarnAboutSemanticsChange = true; 5336 break; 5337 5338 case Builtin::BI__sync_val_compare_and_swap: 5339 case Builtin::BI__sync_val_compare_and_swap_1: 5340 case Builtin::BI__sync_val_compare_and_swap_2: 5341 case Builtin::BI__sync_val_compare_and_swap_4: 5342 case Builtin::BI__sync_val_compare_and_swap_8: 5343 case Builtin::BI__sync_val_compare_and_swap_16: 5344 BuiltinIndex = 12; 5345 NumFixed = 2; 5346 break; 5347 5348 case Builtin::BI__sync_bool_compare_and_swap: 5349 case Builtin::BI__sync_bool_compare_and_swap_1: 5350 case Builtin::BI__sync_bool_compare_and_swap_2: 5351 case Builtin::BI__sync_bool_compare_and_swap_4: 5352 case Builtin::BI__sync_bool_compare_and_swap_8: 5353 case Builtin::BI__sync_bool_compare_and_swap_16: 5354 BuiltinIndex = 13; 5355 NumFixed = 2; 5356 ResultType = Context.BoolTy; 5357 break; 5358 5359 case Builtin::BI__sync_lock_test_and_set: 5360 case Builtin::BI__sync_lock_test_and_set_1: 5361 case Builtin::BI__sync_lock_test_and_set_2: 5362 case Builtin::BI__sync_lock_test_and_set_4: 5363 case Builtin::BI__sync_lock_test_and_set_8: 5364 case Builtin::BI__sync_lock_test_and_set_16: 5365 BuiltinIndex = 14; 5366 break; 5367 5368 case Builtin::BI__sync_lock_release: 5369 case Builtin::BI__sync_lock_release_1: 5370 case Builtin::BI__sync_lock_release_2: 5371 case Builtin::BI__sync_lock_release_4: 5372 case Builtin::BI__sync_lock_release_8: 5373 case Builtin::BI__sync_lock_release_16: 5374 BuiltinIndex = 15; 5375 NumFixed = 0; 5376 ResultType = Context.VoidTy; 5377 break; 5378 5379 case Builtin::BI__sync_swap: 5380 case Builtin::BI__sync_swap_1: 5381 case Builtin::BI__sync_swap_2: 5382 case Builtin::BI__sync_swap_4: 5383 case Builtin::BI__sync_swap_8: 5384 case Builtin::BI__sync_swap_16: 5385 BuiltinIndex = 16; 5386 break; 5387 } 5388 5389 // Now that we know how many fixed arguments we expect, first check that we 5390 // have at least that many. 5391 if (TheCall->getNumArgs() < 1+NumFixed) { 5392 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 5393 << 0 << 1 + NumFixed << TheCall->getNumArgs() 5394 << Callee->getSourceRange(); 5395 return ExprError(); 5396 } 5397 5398 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 5399 << Callee->getSourceRange(); 5400 5401 if (WarnAboutSemanticsChange) { 5402 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 5403 << Callee->getSourceRange(); 5404 } 5405 5406 // Get the decl for the concrete builtin from this, we can tell what the 5407 // concrete integer type we should convert to is. 5408 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 5409 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 5410 FunctionDecl *NewBuiltinDecl; 5411 if (NewBuiltinID == BuiltinID) 5412 NewBuiltinDecl = FDecl; 5413 else { 5414 // Perform builtin lookup to avoid redeclaring it. 5415 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 5416 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 5417 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 5418 assert(Res.getFoundDecl()); 5419 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 5420 if (!NewBuiltinDecl) 5421 return ExprError(); 5422 } 5423 5424 // The first argument --- the pointer --- has a fixed type; we 5425 // deduce the types of the rest of the arguments accordingly. Walk 5426 // the remaining arguments, converting them to the deduced value type. 5427 for (unsigned i = 0; i != NumFixed; ++i) { 5428 ExprResult Arg = TheCall->getArg(i+1); 5429 5430 // GCC does an implicit conversion to the pointer or integer ValType. This 5431 // can fail in some cases (1i -> int**), check for this error case now. 5432 // Initialize the argument. 5433 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 5434 ValType, /*consume*/ false); 5435 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5436 if (Arg.isInvalid()) 5437 return ExprError(); 5438 5439 // Okay, we have something that *can* be converted to the right type. Check 5440 // to see if there is a potentially weird extension going on here. This can 5441 // happen when you do an atomic operation on something like an char* and 5442 // pass in 42. The 42 gets converted to char. This is even more strange 5443 // for things like 45.123 -> char, etc. 5444 // FIXME: Do this check. 5445 TheCall->setArg(i+1, Arg.get()); 5446 } 5447 5448 // Create a new DeclRefExpr to refer to the new decl. 5449 DeclRefExpr *NewDRE = DeclRefExpr::Create( 5450 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, 5451 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, 5452 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse()); 5453 5454 // Set the callee in the CallExpr. 5455 // FIXME: This loses syntactic information. 5456 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 5457 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 5458 CK_BuiltinFnToFnPtr); 5459 TheCall->setCallee(PromotedCall.get()); 5460 5461 // Change the result type of the call to match the original value type. This 5462 // is arbitrary, but the codegen for these builtins ins design to handle it 5463 // gracefully. 5464 TheCall->setType(ResultType); 5465 5466 // Prohibit use of _ExtInt with atomic builtins. 5467 // The arguments would have already been converted to the first argument's 5468 // type, so only need to check the first argument. 5469 const auto *ExtIntValType = ValType->getAs<ExtIntType>(); 5470 if (ExtIntValType && !llvm::isPowerOf2_64(ExtIntValType->getNumBits())) { 5471 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size); 5472 return ExprError(); 5473 } 5474 5475 return TheCallResult; 5476 } 5477 5478 /// SemaBuiltinNontemporalOverloaded - We have a call to 5479 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 5480 /// overloaded function based on the pointer type of its last argument. 5481 /// 5482 /// This function goes through and does final semantic checking for these 5483 /// builtins. 5484 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 5485 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 5486 DeclRefExpr *DRE = 5487 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 5488 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 5489 unsigned BuiltinID = FDecl->getBuiltinID(); 5490 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 5491 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 5492 "Unexpected nontemporal load/store builtin!"); 5493 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 5494 unsigned numArgs = isStore ? 2 : 1; 5495 5496 // Ensure that we have the proper number of arguments. 5497 if (checkArgCount(*this, TheCall, numArgs)) 5498 return ExprError(); 5499 5500 // Inspect the last argument of the nontemporal builtin. This should always 5501 // be a pointer type, from which we imply the type of the memory access. 5502 // Because it is a pointer type, we don't have to worry about any implicit 5503 // casts here. 5504 Expr *PointerArg = TheCall->getArg(numArgs - 1); 5505 ExprResult PointerArgResult = 5506 DefaultFunctionArrayLvalueConversion(PointerArg); 5507 5508 if (PointerArgResult.isInvalid()) 5509 return ExprError(); 5510 PointerArg = PointerArgResult.get(); 5511 TheCall->setArg(numArgs - 1, PointerArg); 5512 5513 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 5514 if (!pointerType) { 5515 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 5516 << PointerArg->getType() << PointerArg->getSourceRange(); 5517 return ExprError(); 5518 } 5519 5520 QualType ValType = pointerType->getPointeeType(); 5521 5522 // Strip any qualifiers off ValType. 5523 ValType = ValType.getUnqualifiedType(); 5524 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 5525 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 5526 !ValType->isVectorType()) { 5527 Diag(DRE->getBeginLoc(), 5528 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 5529 << PointerArg->getType() << PointerArg->getSourceRange(); 5530 return ExprError(); 5531 } 5532 5533 if (!isStore) { 5534 TheCall->setType(ValType); 5535 return TheCallResult; 5536 } 5537 5538 ExprResult ValArg = TheCall->getArg(0); 5539 InitializedEntity Entity = InitializedEntity::InitializeParameter( 5540 Context, ValType, /*consume*/ false); 5541 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 5542 if (ValArg.isInvalid()) 5543 return ExprError(); 5544 5545 TheCall->setArg(0, ValArg.get()); 5546 TheCall->setType(Context.VoidTy); 5547 return TheCallResult; 5548 } 5549 5550 /// CheckObjCString - Checks that the argument to the builtin 5551 /// CFString constructor is correct 5552 /// Note: It might also make sense to do the UTF-16 conversion here (would 5553 /// simplify the backend). 5554 bool Sema::CheckObjCString(Expr *Arg) { 5555 Arg = Arg->IgnoreParenCasts(); 5556 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 5557 5558 if (!Literal || !Literal->isAscii()) { 5559 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 5560 << Arg->getSourceRange(); 5561 return true; 5562 } 5563 5564 if (Literal->containsNonAsciiOrNull()) { 5565 StringRef String = Literal->getString(); 5566 unsigned NumBytes = String.size(); 5567 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 5568 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 5569 llvm::UTF16 *ToPtr = &ToBuf[0]; 5570 5571 llvm::ConversionResult Result = 5572 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 5573 ToPtr + NumBytes, llvm::strictConversion); 5574 // Check for conversion failure. 5575 if (Result != llvm::conversionOK) 5576 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 5577 << Arg->getSourceRange(); 5578 } 5579 return false; 5580 } 5581 5582 /// CheckObjCString - Checks that the format string argument to the os_log() 5583 /// and os_trace() functions is correct, and converts it to const char *. 5584 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 5585 Arg = Arg->IgnoreParenCasts(); 5586 auto *Literal = dyn_cast<StringLiteral>(Arg); 5587 if (!Literal) { 5588 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 5589 Literal = ObjcLiteral->getString(); 5590 } 5591 } 5592 5593 if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) { 5594 return ExprError( 5595 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 5596 << Arg->getSourceRange()); 5597 } 5598 5599 ExprResult Result(Literal); 5600 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 5601 InitializedEntity Entity = 5602 InitializedEntity::InitializeParameter(Context, ResultTy, false); 5603 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 5604 return Result; 5605 } 5606 5607 /// Check that the user is calling the appropriate va_start builtin for the 5608 /// target and calling convention. 5609 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 5610 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 5611 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 5612 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 || 5613 TT.getArch() == llvm::Triple::aarch64_32); 5614 bool IsWindows = TT.isOSWindows(); 5615 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 5616 if (IsX64 || IsAArch64) { 5617 CallingConv CC = CC_C; 5618 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 5619 CC = FD->getType()->castAs<FunctionType>()->getCallConv(); 5620 if (IsMSVAStart) { 5621 // Don't allow this in System V ABI functions. 5622 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 5623 return S.Diag(Fn->getBeginLoc(), 5624 diag::err_ms_va_start_used_in_sysv_function); 5625 } else { 5626 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 5627 // On x64 Windows, don't allow this in System V ABI functions. 5628 // (Yes, that means there's no corresponding way to support variadic 5629 // System V ABI functions on Windows.) 5630 if ((IsWindows && CC == CC_X86_64SysV) || 5631 (!IsWindows && CC == CC_Win64)) 5632 return S.Diag(Fn->getBeginLoc(), 5633 diag::err_va_start_used_in_wrong_abi_function) 5634 << !IsWindows; 5635 } 5636 return false; 5637 } 5638 5639 if (IsMSVAStart) 5640 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 5641 return false; 5642 } 5643 5644 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 5645 ParmVarDecl **LastParam = nullptr) { 5646 // Determine whether the current function, block, or obj-c method is variadic 5647 // and get its parameter list. 5648 bool IsVariadic = false; 5649 ArrayRef<ParmVarDecl *> Params; 5650 DeclContext *Caller = S.CurContext; 5651 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 5652 IsVariadic = Block->isVariadic(); 5653 Params = Block->parameters(); 5654 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 5655 IsVariadic = FD->isVariadic(); 5656 Params = FD->parameters(); 5657 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 5658 IsVariadic = MD->isVariadic(); 5659 // FIXME: This isn't correct for methods (results in bogus warning). 5660 Params = MD->parameters(); 5661 } else if (isa<CapturedDecl>(Caller)) { 5662 // We don't support va_start in a CapturedDecl. 5663 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 5664 return true; 5665 } else { 5666 // This must be some other declcontext that parses exprs. 5667 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 5668 return true; 5669 } 5670 5671 if (!IsVariadic) { 5672 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 5673 return true; 5674 } 5675 5676 if (LastParam) 5677 *LastParam = Params.empty() ? nullptr : Params.back(); 5678 5679 return false; 5680 } 5681 5682 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 5683 /// for validity. Emit an error and return true on failure; return false 5684 /// on success. 5685 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 5686 Expr *Fn = TheCall->getCallee(); 5687 5688 if (checkVAStartABI(*this, BuiltinID, Fn)) 5689 return true; 5690 5691 if (checkArgCount(*this, TheCall, 2)) 5692 return true; 5693 5694 // Type-check the first argument normally. 5695 if (checkBuiltinArgument(*this, TheCall, 0)) 5696 return true; 5697 5698 // Check that the current function is variadic, and get its last parameter. 5699 ParmVarDecl *LastParam; 5700 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 5701 return true; 5702 5703 // Verify that the second argument to the builtin is the last argument of the 5704 // current function or method. 5705 bool SecondArgIsLastNamedArgument = false; 5706 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 5707 5708 // These are valid if SecondArgIsLastNamedArgument is false after the next 5709 // block. 5710 QualType Type; 5711 SourceLocation ParamLoc; 5712 bool IsCRegister = false; 5713 5714 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 5715 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 5716 SecondArgIsLastNamedArgument = PV == LastParam; 5717 5718 Type = PV->getType(); 5719 ParamLoc = PV->getLocation(); 5720 IsCRegister = 5721 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 5722 } 5723 } 5724 5725 if (!SecondArgIsLastNamedArgument) 5726 Diag(TheCall->getArg(1)->getBeginLoc(), 5727 diag::warn_second_arg_of_va_start_not_last_named_param); 5728 else if (IsCRegister || Type->isReferenceType() || 5729 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 5730 // Promotable integers are UB, but enumerations need a bit of 5731 // extra checking to see what their promotable type actually is. 5732 if (!Type->isPromotableIntegerType()) 5733 return false; 5734 if (!Type->isEnumeralType()) 5735 return true; 5736 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl(); 5737 return !(ED && 5738 Context.typesAreCompatible(ED->getPromotionType(), Type)); 5739 }()) { 5740 unsigned Reason = 0; 5741 if (Type->isReferenceType()) Reason = 1; 5742 else if (IsCRegister) Reason = 2; 5743 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 5744 Diag(ParamLoc, diag::note_parameter_type) << Type; 5745 } 5746 5747 TheCall->setType(Context.VoidTy); 5748 return false; 5749 } 5750 5751 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 5752 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 5753 // const char *named_addr); 5754 5755 Expr *Func = Call->getCallee(); 5756 5757 if (Call->getNumArgs() < 3) 5758 return Diag(Call->getEndLoc(), 5759 diag::err_typecheck_call_too_few_args_at_least) 5760 << 0 /*function call*/ << 3 << Call->getNumArgs(); 5761 5762 // Type-check the first argument normally. 5763 if (checkBuiltinArgument(*this, Call, 0)) 5764 return true; 5765 5766 // Check that the current function is variadic. 5767 if (checkVAStartIsInVariadicFunction(*this, Func)) 5768 return true; 5769 5770 // __va_start on Windows does not validate the parameter qualifiers 5771 5772 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 5773 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 5774 5775 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 5776 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 5777 5778 const QualType &ConstCharPtrTy = 5779 Context.getPointerType(Context.CharTy.withConst()); 5780 if (!Arg1Ty->isPointerType() || 5781 Arg1Ty->getPointeeType().withoutLocalFastQualifiers() != Context.CharTy) 5782 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 5783 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 5784 << 0 /* qualifier difference */ 5785 << 3 /* parameter mismatch */ 5786 << 2 << Arg1->getType() << ConstCharPtrTy; 5787 5788 const QualType SizeTy = Context.getSizeType(); 5789 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 5790 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 5791 << Arg2->getType() << SizeTy << 1 /* different class */ 5792 << 0 /* qualifier difference */ 5793 << 3 /* parameter mismatch */ 5794 << 3 << Arg2->getType() << SizeTy; 5795 5796 return false; 5797 } 5798 5799 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 5800 /// friends. This is declared to take (...), so we have to check everything. 5801 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 5802 if (checkArgCount(*this, TheCall, 2)) 5803 return true; 5804 5805 ExprResult OrigArg0 = TheCall->getArg(0); 5806 ExprResult OrigArg1 = TheCall->getArg(1); 5807 5808 // Do standard promotions between the two arguments, returning their common 5809 // type. 5810 QualType Res = UsualArithmeticConversions( 5811 OrigArg0, OrigArg1, TheCall->getExprLoc(), ACK_Comparison); 5812 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 5813 return true; 5814 5815 // Make sure any conversions are pushed back into the call; this is 5816 // type safe since unordered compare builtins are declared as "_Bool 5817 // foo(...)". 5818 TheCall->setArg(0, OrigArg0.get()); 5819 TheCall->setArg(1, OrigArg1.get()); 5820 5821 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 5822 return false; 5823 5824 // If the common type isn't a real floating type, then the arguments were 5825 // invalid for this operation. 5826 if (Res.isNull() || !Res->isRealFloatingType()) 5827 return Diag(OrigArg0.get()->getBeginLoc(), 5828 diag::err_typecheck_call_invalid_ordered_compare) 5829 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 5830 << SourceRange(OrigArg0.get()->getBeginLoc(), 5831 OrigArg1.get()->getEndLoc()); 5832 5833 return false; 5834 } 5835 5836 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 5837 /// __builtin_isnan and friends. This is declared to take (...), so we have 5838 /// to check everything. We expect the last argument to be a floating point 5839 /// value. 5840 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 5841 if (checkArgCount(*this, TheCall, NumArgs)) 5842 return true; 5843 5844 // __builtin_fpclassify is the only case where NumArgs != 1, so we can count 5845 // on all preceding parameters just being int. Try all of those. 5846 for (unsigned i = 0; i < NumArgs - 1; ++i) { 5847 Expr *Arg = TheCall->getArg(i); 5848 5849 if (Arg->isTypeDependent()) 5850 return false; 5851 5852 ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing); 5853 5854 if (Res.isInvalid()) 5855 return true; 5856 TheCall->setArg(i, Res.get()); 5857 } 5858 5859 Expr *OrigArg = TheCall->getArg(NumArgs-1); 5860 5861 if (OrigArg->isTypeDependent()) 5862 return false; 5863 5864 // Usual Unary Conversions will convert half to float, which we want for 5865 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the 5866 // type how it is, but do normal L->Rvalue conversions. 5867 if (Context.getTargetInfo().useFP16ConversionIntrinsics()) 5868 OrigArg = UsualUnaryConversions(OrigArg).get(); 5869 else 5870 OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get(); 5871 TheCall->setArg(NumArgs - 1, OrigArg); 5872 5873 // This operation requires a non-_Complex floating-point number. 5874 if (!OrigArg->getType()->isRealFloatingType()) 5875 return Diag(OrigArg->getBeginLoc(), 5876 diag::err_typecheck_call_invalid_unary_fp) 5877 << OrigArg->getType() << OrigArg->getSourceRange(); 5878 5879 return false; 5880 } 5881 5882 /// Perform semantic analysis for a call to __builtin_complex. 5883 bool Sema::SemaBuiltinComplex(CallExpr *TheCall) { 5884 if (checkArgCount(*this, TheCall, 2)) 5885 return true; 5886 5887 bool Dependent = false; 5888 for (unsigned I = 0; I != 2; ++I) { 5889 Expr *Arg = TheCall->getArg(I); 5890 QualType T = Arg->getType(); 5891 if (T->isDependentType()) { 5892 Dependent = true; 5893 continue; 5894 } 5895 5896 // Despite supporting _Complex int, GCC requires a real floating point type 5897 // for the operands of __builtin_complex. 5898 if (!T->isRealFloatingType()) { 5899 return Diag(Arg->getBeginLoc(), diag::err_typecheck_call_requires_real_fp) 5900 << Arg->getType() << Arg->getSourceRange(); 5901 } 5902 5903 ExprResult Converted = DefaultLvalueConversion(Arg); 5904 if (Converted.isInvalid()) 5905 return true; 5906 TheCall->setArg(I, Converted.get()); 5907 } 5908 5909 if (Dependent) { 5910 TheCall->setType(Context.DependentTy); 5911 return false; 5912 } 5913 5914 Expr *Real = TheCall->getArg(0); 5915 Expr *Imag = TheCall->getArg(1); 5916 if (!Context.hasSameType(Real->getType(), Imag->getType())) { 5917 return Diag(Real->getBeginLoc(), 5918 diag::err_typecheck_call_different_arg_types) 5919 << Real->getType() << Imag->getType() 5920 << Real->getSourceRange() << Imag->getSourceRange(); 5921 } 5922 5923 // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers; 5924 // don't allow this builtin to form those types either. 5925 // FIXME: Should we allow these types? 5926 if (Real->getType()->isFloat16Type()) 5927 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 5928 << "_Float16"; 5929 if (Real->getType()->isHalfType()) 5930 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 5931 << "half"; 5932 5933 TheCall->setType(Context.getComplexType(Real->getType())); 5934 return false; 5935 } 5936 5937 // Customized Sema Checking for VSX builtins that have the following signature: 5938 // vector [...] builtinName(vector [...], vector [...], const int); 5939 // Which takes the same type of vectors (any legal vector type) for the first 5940 // two arguments and takes compile time constant for the third argument. 5941 // Example builtins are : 5942 // vector double vec_xxpermdi(vector double, vector double, int); 5943 // vector short vec_xxsldwi(vector short, vector short, int); 5944 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 5945 unsigned ExpectedNumArgs = 3; 5946 if (checkArgCount(*this, TheCall, ExpectedNumArgs)) 5947 return true; 5948 5949 // Check the third argument is a compile time constant 5950 if (!TheCall->getArg(2)->isIntegerConstantExpr(Context)) 5951 return Diag(TheCall->getBeginLoc(), 5952 diag::err_vsx_builtin_nonconstant_argument) 5953 << 3 /* argument index */ << TheCall->getDirectCallee() 5954 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 5955 TheCall->getArg(2)->getEndLoc()); 5956 5957 QualType Arg1Ty = TheCall->getArg(0)->getType(); 5958 QualType Arg2Ty = TheCall->getArg(1)->getType(); 5959 5960 // Check the type of argument 1 and argument 2 are vectors. 5961 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 5962 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 5963 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 5964 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 5965 << TheCall->getDirectCallee() 5966 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5967 TheCall->getArg(1)->getEndLoc()); 5968 } 5969 5970 // Check the first two arguments are the same type. 5971 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 5972 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 5973 << TheCall->getDirectCallee() 5974 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 5975 TheCall->getArg(1)->getEndLoc()); 5976 } 5977 5978 // When default clang type checking is turned off and the customized type 5979 // checking is used, the returning type of the function must be explicitly 5980 // set. Otherwise it is _Bool by default. 5981 TheCall->setType(Arg1Ty); 5982 5983 return false; 5984 } 5985 5986 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 5987 // This is declared to take (...), so we have to check everything. 5988 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 5989 if (TheCall->getNumArgs() < 2) 5990 return ExprError(Diag(TheCall->getEndLoc(), 5991 diag::err_typecheck_call_too_few_args_at_least) 5992 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 5993 << TheCall->getSourceRange()); 5994 5995 // Determine which of the following types of shufflevector we're checking: 5996 // 1) unary, vector mask: (lhs, mask) 5997 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 5998 QualType resType = TheCall->getArg(0)->getType(); 5999 unsigned numElements = 0; 6000 6001 if (!TheCall->getArg(0)->isTypeDependent() && 6002 !TheCall->getArg(1)->isTypeDependent()) { 6003 QualType LHSType = TheCall->getArg(0)->getType(); 6004 QualType RHSType = TheCall->getArg(1)->getType(); 6005 6006 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 6007 return ExprError( 6008 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 6009 << TheCall->getDirectCallee() 6010 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6011 TheCall->getArg(1)->getEndLoc())); 6012 6013 numElements = LHSType->castAs<VectorType>()->getNumElements(); 6014 unsigned numResElements = TheCall->getNumArgs() - 2; 6015 6016 // Check to see if we have a call with 2 vector arguments, the unary shuffle 6017 // with mask. If so, verify that RHS is an integer vector type with the 6018 // same number of elts as lhs. 6019 if (TheCall->getNumArgs() == 2) { 6020 if (!RHSType->hasIntegerRepresentation() || 6021 RHSType->castAs<VectorType>()->getNumElements() != numElements) 6022 return ExprError(Diag(TheCall->getBeginLoc(), 6023 diag::err_vec_builtin_incompatible_vector) 6024 << TheCall->getDirectCallee() 6025 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 6026 TheCall->getArg(1)->getEndLoc())); 6027 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 6028 return ExprError(Diag(TheCall->getBeginLoc(), 6029 diag::err_vec_builtin_incompatible_vector) 6030 << TheCall->getDirectCallee() 6031 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6032 TheCall->getArg(1)->getEndLoc())); 6033 } else if (numElements != numResElements) { 6034 QualType eltType = LHSType->castAs<VectorType>()->getElementType(); 6035 resType = Context.getVectorType(eltType, numResElements, 6036 VectorType::GenericVector); 6037 } 6038 } 6039 6040 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 6041 if (TheCall->getArg(i)->isTypeDependent() || 6042 TheCall->getArg(i)->isValueDependent()) 6043 continue; 6044 6045 Optional<llvm::APSInt> Result; 6046 if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context))) 6047 return ExprError(Diag(TheCall->getBeginLoc(), 6048 diag::err_shufflevector_nonconstant_argument) 6049 << TheCall->getArg(i)->getSourceRange()); 6050 6051 // Allow -1 which will be translated to undef in the IR. 6052 if (Result->isSigned() && Result->isAllOnesValue()) 6053 continue; 6054 6055 if (Result->getActiveBits() > 64 || 6056 Result->getZExtValue() >= numElements * 2) 6057 return ExprError(Diag(TheCall->getBeginLoc(), 6058 diag::err_shufflevector_argument_too_large) 6059 << TheCall->getArg(i)->getSourceRange()); 6060 } 6061 6062 SmallVector<Expr*, 32> exprs; 6063 6064 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 6065 exprs.push_back(TheCall->getArg(i)); 6066 TheCall->setArg(i, nullptr); 6067 } 6068 6069 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 6070 TheCall->getCallee()->getBeginLoc(), 6071 TheCall->getRParenLoc()); 6072 } 6073 6074 /// SemaConvertVectorExpr - Handle __builtin_convertvector 6075 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 6076 SourceLocation BuiltinLoc, 6077 SourceLocation RParenLoc) { 6078 ExprValueKind VK = VK_RValue; 6079 ExprObjectKind OK = OK_Ordinary; 6080 QualType DstTy = TInfo->getType(); 6081 QualType SrcTy = E->getType(); 6082 6083 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 6084 return ExprError(Diag(BuiltinLoc, 6085 diag::err_convertvector_non_vector) 6086 << E->getSourceRange()); 6087 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 6088 return ExprError(Diag(BuiltinLoc, 6089 diag::err_convertvector_non_vector_type)); 6090 6091 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 6092 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements(); 6093 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements(); 6094 if (SrcElts != DstElts) 6095 return ExprError(Diag(BuiltinLoc, 6096 diag::err_convertvector_incompatible_vector) 6097 << E->getSourceRange()); 6098 } 6099 6100 return new (Context) 6101 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 6102 } 6103 6104 /// SemaBuiltinPrefetch - Handle __builtin_prefetch. 6105 // This is declared to take (const void*, ...) and can take two 6106 // optional constant int args. 6107 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 6108 unsigned NumArgs = TheCall->getNumArgs(); 6109 6110 if (NumArgs > 3) 6111 return Diag(TheCall->getEndLoc(), 6112 diag::err_typecheck_call_too_many_args_at_most) 6113 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 6114 6115 // Argument 0 is checked for us and the remaining arguments must be 6116 // constant integers. 6117 for (unsigned i = 1; i != NumArgs; ++i) 6118 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 6119 return true; 6120 6121 return false; 6122 } 6123 6124 /// SemaBuiltinAssume - Handle __assume (MS Extension). 6125 // __assume does not evaluate its arguments, and should warn if its argument 6126 // has side effects. 6127 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 6128 Expr *Arg = TheCall->getArg(0); 6129 if (Arg->isInstantiationDependent()) return false; 6130 6131 if (Arg->HasSideEffects(Context)) 6132 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 6133 << Arg->getSourceRange() 6134 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 6135 6136 return false; 6137 } 6138 6139 /// Handle __builtin_alloca_with_align. This is declared 6140 /// as (size_t, size_t) where the second size_t must be a power of 2 greater 6141 /// than 8. 6142 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 6143 // The alignment must be a constant integer. 6144 Expr *Arg = TheCall->getArg(1); 6145 6146 // We can't check the value of a dependent argument. 6147 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 6148 if (const auto *UE = 6149 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 6150 if (UE->getKind() == UETT_AlignOf || 6151 UE->getKind() == UETT_PreferredAlignOf) 6152 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 6153 << Arg->getSourceRange(); 6154 6155 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 6156 6157 if (!Result.isPowerOf2()) 6158 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 6159 << Arg->getSourceRange(); 6160 6161 if (Result < Context.getCharWidth()) 6162 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 6163 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 6164 6165 if (Result > std::numeric_limits<int32_t>::max()) 6166 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 6167 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 6168 } 6169 6170 return false; 6171 } 6172 6173 /// Handle __builtin_assume_aligned. This is declared 6174 /// as (const void*, size_t, ...) and can take one optional constant int arg. 6175 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 6176 unsigned NumArgs = TheCall->getNumArgs(); 6177 6178 if (NumArgs > 3) 6179 return Diag(TheCall->getEndLoc(), 6180 diag::err_typecheck_call_too_many_args_at_most) 6181 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 6182 6183 // The alignment must be a constant integer. 6184 Expr *Arg = TheCall->getArg(1); 6185 6186 // We can't check the value of a dependent argument. 6187 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 6188 llvm::APSInt Result; 6189 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 6190 return true; 6191 6192 if (!Result.isPowerOf2()) 6193 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 6194 << Arg->getSourceRange(); 6195 6196 if (Result > Sema::MaximumAlignment) 6197 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great) 6198 << Arg->getSourceRange() << Sema::MaximumAlignment; 6199 } 6200 6201 if (NumArgs > 2) { 6202 ExprResult Arg(TheCall->getArg(2)); 6203 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 6204 Context.getSizeType(), false); 6205 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6206 if (Arg.isInvalid()) return true; 6207 TheCall->setArg(2, Arg.get()); 6208 } 6209 6210 return false; 6211 } 6212 6213 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 6214 unsigned BuiltinID = 6215 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 6216 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 6217 6218 unsigned NumArgs = TheCall->getNumArgs(); 6219 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 6220 if (NumArgs < NumRequiredArgs) { 6221 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 6222 << 0 /* function call */ << NumRequiredArgs << NumArgs 6223 << TheCall->getSourceRange(); 6224 } 6225 if (NumArgs >= NumRequiredArgs + 0x100) { 6226 return Diag(TheCall->getEndLoc(), 6227 diag::err_typecheck_call_too_many_args_at_most) 6228 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 6229 << TheCall->getSourceRange(); 6230 } 6231 unsigned i = 0; 6232 6233 // For formatting call, check buffer arg. 6234 if (!IsSizeCall) { 6235 ExprResult Arg(TheCall->getArg(i)); 6236 InitializedEntity Entity = InitializedEntity::InitializeParameter( 6237 Context, Context.VoidPtrTy, false); 6238 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6239 if (Arg.isInvalid()) 6240 return true; 6241 TheCall->setArg(i, Arg.get()); 6242 i++; 6243 } 6244 6245 // Check string literal arg. 6246 unsigned FormatIdx = i; 6247 { 6248 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 6249 if (Arg.isInvalid()) 6250 return true; 6251 TheCall->setArg(i, Arg.get()); 6252 i++; 6253 } 6254 6255 // Make sure variadic args are scalar. 6256 unsigned FirstDataArg = i; 6257 while (i < NumArgs) { 6258 ExprResult Arg = DefaultVariadicArgumentPromotion( 6259 TheCall->getArg(i), VariadicFunction, nullptr); 6260 if (Arg.isInvalid()) 6261 return true; 6262 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 6263 if (ArgSize.getQuantity() >= 0x100) { 6264 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 6265 << i << (int)ArgSize.getQuantity() << 0xff 6266 << TheCall->getSourceRange(); 6267 } 6268 TheCall->setArg(i, Arg.get()); 6269 i++; 6270 } 6271 6272 // Check formatting specifiers. NOTE: We're only doing this for the non-size 6273 // call to avoid duplicate diagnostics. 6274 if (!IsSizeCall) { 6275 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 6276 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 6277 bool Success = CheckFormatArguments( 6278 Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog, 6279 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 6280 CheckedVarArgs); 6281 if (!Success) 6282 return true; 6283 } 6284 6285 if (IsSizeCall) { 6286 TheCall->setType(Context.getSizeType()); 6287 } else { 6288 TheCall->setType(Context.VoidPtrTy); 6289 } 6290 return false; 6291 } 6292 6293 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 6294 /// TheCall is a constant expression. 6295 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 6296 llvm::APSInt &Result) { 6297 Expr *Arg = TheCall->getArg(ArgNum); 6298 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6299 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6300 6301 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 6302 6303 Optional<llvm::APSInt> R; 6304 if (!(R = Arg->getIntegerConstantExpr(Context))) 6305 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 6306 << FDecl->getDeclName() << Arg->getSourceRange(); 6307 Result = *R; 6308 return false; 6309 } 6310 6311 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 6312 /// TheCall is a constant expression in the range [Low, High]. 6313 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 6314 int Low, int High, bool RangeIsError) { 6315 if (isConstantEvaluated()) 6316 return false; 6317 llvm::APSInt Result; 6318 6319 // We can't check the value of a dependent argument. 6320 Expr *Arg = TheCall->getArg(ArgNum); 6321 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6322 return false; 6323 6324 // Check constant-ness first. 6325 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6326 return true; 6327 6328 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 6329 if (RangeIsError) 6330 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 6331 << Result.toString(10) << Low << High << Arg->getSourceRange(); 6332 else 6333 // Defer the warning until we know if the code will be emitted so that 6334 // dead code can ignore this. 6335 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 6336 PDiag(diag::warn_argument_invalid_range) 6337 << Result.toString(10) << Low << High 6338 << Arg->getSourceRange()); 6339 } 6340 6341 return false; 6342 } 6343 6344 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 6345 /// TheCall is a constant expression is a multiple of Num.. 6346 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 6347 unsigned Num) { 6348 llvm::APSInt Result; 6349 6350 // We can't check the value of a dependent argument. 6351 Expr *Arg = TheCall->getArg(ArgNum); 6352 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6353 return false; 6354 6355 // Check constant-ness first. 6356 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6357 return true; 6358 6359 if (Result.getSExtValue() % Num != 0) 6360 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 6361 << Num << Arg->getSourceRange(); 6362 6363 return false; 6364 } 6365 6366 /// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a 6367 /// constant expression representing a power of 2. 6368 bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) { 6369 llvm::APSInt Result; 6370 6371 // We can't check the value of a dependent argument. 6372 Expr *Arg = TheCall->getArg(ArgNum); 6373 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6374 return false; 6375 6376 // Check constant-ness first. 6377 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6378 return true; 6379 6380 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if 6381 // and only if x is a power of 2. 6382 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0) 6383 return false; 6384 6385 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2) 6386 << Arg->getSourceRange(); 6387 } 6388 6389 static bool IsShiftedByte(llvm::APSInt Value) { 6390 if (Value.isNegative()) 6391 return false; 6392 6393 // Check if it's a shifted byte, by shifting it down 6394 while (true) { 6395 // If the value fits in the bottom byte, the check passes. 6396 if (Value < 0x100) 6397 return true; 6398 6399 // Otherwise, if the value has _any_ bits in the bottom byte, the check 6400 // fails. 6401 if ((Value & 0xFF) != 0) 6402 return false; 6403 6404 // If the bottom 8 bits are all 0, but something above that is nonzero, 6405 // then shifting the value right by 8 bits won't affect whether it's a 6406 // shifted byte or not. So do that, and go round again. 6407 Value >>= 8; 6408 } 6409 } 6410 6411 /// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is 6412 /// a constant expression representing an arbitrary byte value shifted left by 6413 /// a multiple of 8 bits. 6414 bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, 6415 unsigned ArgBits) { 6416 llvm::APSInt Result; 6417 6418 // We can't check the value of a dependent argument. 6419 Expr *Arg = TheCall->getArg(ArgNum); 6420 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6421 return false; 6422 6423 // Check constant-ness first. 6424 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6425 return true; 6426 6427 // Truncate to the given size. 6428 Result = Result.getLoBits(ArgBits); 6429 Result.setIsUnsigned(true); 6430 6431 if (IsShiftedByte(Result)) 6432 return false; 6433 6434 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte) 6435 << Arg->getSourceRange(); 6436 } 6437 6438 /// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of 6439 /// TheCall is a constant expression representing either a shifted byte value, 6440 /// or a value of the form 0x??FF (i.e. a member of the arithmetic progression 6441 /// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some 6442 /// Arm MVE intrinsics. 6443 bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, 6444 int ArgNum, 6445 unsigned ArgBits) { 6446 llvm::APSInt Result; 6447 6448 // We can't check the value of a dependent argument. 6449 Expr *Arg = TheCall->getArg(ArgNum); 6450 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6451 return false; 6452 6453 // Check constant-ness first. 6454 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6455 return true; 6456 6457 // Truncate to the given size. 6458 Result = Result.getLoBits(ArgBits); 6459 Result.setIsUnsigned(true); 6460 6461 // Check to see if it's in either of the required forms. 6462 if (IsShiftedByte(Result) || 6463 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF)) 6464 return false; 6465 6466 return Diag(TheCall->getBeginLoc(), 6467 diag::err_argument_not_shifted_byte_or_xxff) 6468 << Arg->getSourceRange(); 6469 } 6470 6471 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 6472 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { 6473 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 6474 if (checkArgCount(*this, TheCall, 2)) 6475 return true; 6476 Expr *Arg0 = TheCall->getArg(0); 6477 Expr *Arg1 = TheCall->getArg(1); 6478 6479 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6480 if (FirstArg.isInvalid()) 6481 return true; 6482 QualType FirstArgType = FirstArg.get()->getType(); 6483 if (!FirstArgType->isAnyPointerType()) 6484 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6485 << "first" << FirstArgType << Arg0->getSourceRange(); 6486 TheCall->setArg(0, FirstArg.get()); 6487 6488 ExprResult SecArg = DefaultLvalueConversion(Arg1); 6489 if (SecArg.isInvalid()) 6490 return true; 6491 QualType SecArgType = SecArg.get()->getType(); 6492 if (!SecArgType->isIntegerType()) 6493 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 6494 << "second" << SecArgType << Arg1->getSourceRange(); 6495 6496 // Derive the return type from the pointer argument. 6497 TheCall->setType(FirstArgType); 6498 return false; 6499 } 6500 6501 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 6502 if (checkArgCount(*this, TheCall, 2)) 6503 return true; 6504 6505 Expr *Arg0 = TheCall->getArg(0); 6506 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6507 if (FirstArg.isInvalid()) 6508 return true; 6509 QualType FirstArgType = FirstArg.get()->getType(); 6510 if (!FirstArgType->isAnyPointerType()) 6511 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6512 << "first" << FirstArgType << Arg0->getSourceRange(); 6513 TheCall->setArg(0, FirstArg.get()); 6514 6515 // Derive the return type from the pointer argument. 6516 TheCall->setType(FirstArgType); 6517 6518 // Second arg must be an constant in range [0,15] 6519 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 6520 } 6521 6522 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 6523 if (checkArgCount(*this, TheCall, 2)) 6524 return true; 6525 Expr *Arg0 = TheCall->getArg(0); 6526 Expr *Arg1 = TheCall->getArg(1); 6527 6528 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6529 if (FirstArg.isInvalid()) 6530 return true; 6531 QualType FirstArgType = FirstArg.get()->getType(); 6532 if (!FirstArgType->isAnyPointerType()) 6533 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6534 << "first" << FirstArgType << Arg0->getSourceRange(); 6535 6536 QualType SecArgType = Arg1->getType(); 6537 if (!SecArgType->isIntegerType()) 6538 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 6539 << "second" << SecArgType << Arg1->getSourceRange(); 6540 TheCall->setType(Context.IntTy); 6541 return false; 6542 } 6543 6544 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 6545 BuiltinID == AArch64::BI__builtin_arm_stg) { 6546 if (checkArgCount(*this, TheCall, 1)) 6547 return true; 6548 Expr *Arg0 = TheCall->getArg(0); 6549 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 6550 if (FirstArg.isInvalid()) 6551 return true; 6552 6553 QualType FirstArgType = FirstArg.get()->getType(); 6554 if (!FirstArgType->isAnyPointerType()) 6555 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 6556 << "first" << FirstArgType << Arg0->getSourceRange(); 6557 TheCall->setArg(0, FirstArg.get()); 6558 6559 // Derive the return type from the pointer argument. 6560 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 6561 TheCall->setType(FirstArgType); 6562 return false; 6563 } 6564 6565 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 6566 Expr *ArgA = TheCall->getArg(0); 6567 Expr *ArgB = TheCall->getArg(1); 6568 6569 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA); 6570 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB); 6571 6572 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 6573 return true; 6574 6575 QualType ArgTypeA = ArgExprA.get()->getType(); 6576 QualType ArgTypeB = ArgExprB.get()->getType(); 6577 6578 auto isNull = [&] (Expr *E) -> bool { 6579 return E->isNullPointerConstant( 6580 Context, Expr::NPC_ValueDependentIsNotNull); }; 6581 6582 // argument should be either a pointer or null 6583 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 6584 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 6585 << "first" << ArgTypeA << ArgA->getSourceRange(); 6586 6587 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 6588 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 6589 << "second" << ArgTypeB << ArgB->getSourceRange(); 6590 6591 // Ensure Pointee types are compatible 6592 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 6593 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 6594 QualType pointeeA = ArgTypeA->getPointeeType(); 6595 QualType pointeeB = ArgTypeB->getPointeeType(); 6596 if (!Context.typesAreCompatible( 6597 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 6598 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 6599 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) 6600 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 6601 << ArgB->getSourceRange(); 6602 } 6603 } 6604 6605 // at least one argument should be pointer type 6606 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 6607 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 6608 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 6609 6610 if (isNull(ArgA)) // adopt type of the other pointer 6611 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 6612 6613 if (isNull(ArgB)) 6614 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 6615 6616 TheCall->setArg(0, ArgExprA.get()); 6617 TheCall->setArg(1, ArgExprB.get()); 6618 TheCall->setType(Context.LongLongTy); 6619 return false; 6620 } 6621 assert(false && "Unhandled ARM MTE intrinsic"); 6622 return true; 6623 } 6624 6625 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 6626 /// TheCall is an ARM/AArch64 special register string literal. 6627 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 6628 int ArgNum, unsigned ExpectedFieldNum, 6629 bool AllowName) { 6630 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 6631 BuiltinID == ARM::BI__builtin_arm_wsr64 || 6632 BuiltinID == ARM::BI__builtin_arm_rsr || 6633 BuiltinID == ARM::BI__builtin_arm_rsrp || 6634 BuiltinID == ARM::BI__builtin_arm_wsr || 6635 BuiltinID == ARM::BI__builtin_arm_wsrp; 6636 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 6637 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 6638 BuiltinID == AArch64::BI__builtin_arm_rsr || 6639 BuiltinID == AArch64::BI__builtin_arm_rsrp || 6640 BuiltinID == AArch64::BI__builtin_arm_wsr || 6641 BuiltinID == AArch64::BI__builtin_arm_wsrp; 6642 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 6643 6644 // We can't check the value of a dependent argument. 6645 Expr *Arg = TheCall->getArg(ArgNum); 6646 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6647 return false; 6648 6649 // Check if the argument is a string literal. 6650 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 6651 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 6652 << Arg->getSourceRange(); 6653 6654 // Check the type of special register given. 6655 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 6656 SmallVector<StringRef, 6> Fields; 6657 Reg.split(Fields, ":"); 6658 6659 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 6660 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 6661 << Arg->getSourceRange(); 6662 6663 // If the string is the name of a register then we cannot check that it is 6664 // valid here but if the string is of one the forms described in ACLE then we 6665 // can check that the supplied fields are integers and within the valid 6666 // ranges. 6667 if (Fields.size() > 1) { 6668 bool FiveFields = Fields.size() == 5; 6669 6670 bool ValidString = true; 6671 if (IsARMBuiltin) { 6672 ValidString &= Fields[0].startswith_lower("cp") || 6673 Fields[0].startswith_lower("p"); 6674 if (ValidString) 6675 Fields[0] = 6676 Fields[0].drop_front(Fields[0].startswith_lower("cp") ? 2 : 1); 6677 6678 ValidString &= Fields[2].startswith_lower("c"); 6679 if (ValidString) 6680 Fields[2] = Fields[2].drop_front(1); 6681 6682 if (FiveFields) { 6683 ValidString &= Fields[3].startswith_lower("c"); 6684 if (ValidString) 6685 Fields[3] = Fields[3].drop_front(1); 6686 } 6687 } 6688 6689 SmallVector<int, 5> Ranges; 6690 if (FiveFields) 6691 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 6692 else 6693 Ranges.append({15, 7, 15}); 6694 6695 for (unsigned i=0; i<Fields.size(); ++i) { 6696 int IntField; 6697 ValidString &= !Fields[i].getAsInteger(10, IntField); 6698 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 6699 } 6700 6701 if (!ValidString) 6702 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 6703 << Arg->getSourceRange(); 6704 } else if (IsAArch64Builtin && Fields.size() == 1) { 6705 // If the register name is one of those that appear in the condition below 6706 // and the special register builtin being used is one of the write builtins, 6707 // then we require that the argument provided for writing to the register 6708 // is an integer constant expression. This is because it will be lowered to 6709 // an MSR (immediate) instruction, so we need to know the immediate at 6710 // compile time. 6711 if (TheCall->getNumArgs() != 2) 6712 return false; 6713 6714 std::string RegLower = Reg.lower(); 6715 if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" && 6716 RegLower != "pan" && RegLower != "uao") 6717 return false; 6718 6719 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 6720 } 6721 6722 return false; 6723 } 6724 6725 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 6726 /// This checks that the target supports __builtin_longjmp and 6727 /// that val is a constant 1. 6728 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 6729 if (!Context.getTargetInfo().hasSjLjLowering()) 6730 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 6731 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 6732 6733 Expr *Arg = TheCall->getArg(1); 6734 llvm::APSInt Result; 6735 6736 // TODO: This is less than ideal. Overload this to take a value. 6737 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 6738 return true; 6739 6740 if (Result != 1) 6741 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 6742 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 6743 6744 return false; 6745 } 6746 6747 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 6748 /// This checks that the target supports __builtin_setjmp. 6749 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 6750 if (!Context.getTargetInfo().hasSjLjLowering()) 6751 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 6752 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 6753 return false; 6754 } 6755 6756 namespace { 6757 6758 class UncoveredArgHandler { 6759 enum { Unknown = -1, AllCovered = -2 }; 6760 6761 signed FirstUncoveredArg = Unknown; 6762 SmallVector<const Expr *, 4> DiagnosticExprs; 6763 6764 public: 6765 UncoveredArgHandler() = default; 6766 6767 bool hasUncoveredArg() const { 6768 return (FirstUncoveredArg >= 0); 6769 } 6770 6771 unsigned getUncoveredArg() const { 6772 assert(hasUncoveredArg() && "no uncovered argument"); 6773 return FirstUncoveredArg; 6774 } 6775 6776 void setAllCovered() { 6777 // A string has been found with all arguments covered, so clear out 6778 // the diagnostics. 6779 DiagnosticExprs.clear(); 6780 FirstUncoveredArg = AllCovered; 6781 } 6782 6783 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 6784 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 6785 6786 // Don't update if a previous string covers all arguments. 6787 if (FirstUncoveredArg == AllCovered) 6788 return; 6789 6790 // UncoveredArgHandler tracks the highest uncovered argument index 6791 // and with it all the strings that match this index. 6792 if (NewFirstUncoveredArg == FirstUncoveredArg) 6793 DiagnosticExprs.push_back(StrExpr); 6794 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 6795 DiagnosticExprs.clear(); 6796 DiagnosticExprs.push_back(StrExpr); 6797 FirstUncoveredArg = NewFirstUncoveredArg; 6798 } 6799 } 6800 6801 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 6802 }; 6803 6804 enum StringLiteralCheckType { 6805 SLCT_NotALiteral, 6806 SLCT_UncheckedLiteral, 6807 SLCT_CheckedLiteral 6808 }; 6809 6810 } // namespace 6811 6812 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 6813 BinaryOperatorKind BinOpKind, 6814 bool AddendIsRight) { 6815 unsigned BitWidth = Offset.getBitWidth(); 6816 unsigned AddendBitWidth = Addend.getBitWidth(); 6817 // There might be negative interim results. 6818 if (Addend.isUnsigned()) { 6819 Addend = Addend.zext(++AddendBitWidth); 6820 Addend.setIsSigned(true); 6821 } 6822 // Adjust the bit width of the APSInts. 6823 if (AddendBitWidth > BitWidth) { 6824 Offset = Offset.sext(AddendBitWidth); 6825 BitWidth = AddendBitWidth; 6826 } else if (BitWidth > AddendBitWidth) { 6827 Addend = Addend.sext(BitWidth); 6828 } 6829 6830 bool Ov = false; 6831 llvm::APSInt ResOffset = Offset; 6832 if (BinOpKind == BO_Add) 6833 ResOffset = Offset.sadd_ov(Addend, Ov); 6834 else { 6835 assert(AddendIsRight && BinOpKind == BO_Sub && 6836 "operator must be add or sub with addend on the right"); 6837 ResOffset = Offset.ssub_ov(Addend, Ov); 6838 } 6839 6840 // We add an offset to a pointer here so we should support an offset as big as 6841 // possible. 6842 if (Ov) { 6843 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 6844 "index (intermediate) result too big"); 6845 Offset = Offset.sext(2 * BitWidth); 6846 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 6847 return; 6848 } 6849 6850 Offset = ResOffset; 6851 } 6852 6853 namespace { 6854 6855 // This is a wrapper class around StringLiteral to support offsetted string 6856 // literals as format strings. It takes the offset into account when returning 6857 // the string and its length or the source locations to display notes correctly. 6858 class FormatStringLiteral { 6859 const StringLiteral *FExpr; 6860 int64_t Offset; 6861 6862 public: 6863 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 6864 : FExpr(fexpr), Offset(Offset) {} 6865 6866 StringRef getString() const { 6867 return FExpr->getString().drop_front(Offset); 6868 } 6869 6870 unsigned getByteLength() const { 6871 return FExpr->getByteLength() - getCharByteWidth() * Offset; 6872 } 6873 6874 unsigned getLength() const { return FExpr->getLength() - Offset; } 6875 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 6876 6877 StringLiteral::StringKind getKind() const { return FExpr->getKind(); } 6878 6879 QualType getType() const { return FExpr->getType(); } 6880 6881 bool isAscii() const { return FExpr->isAscii(); } 6882 bool isWide() const { return FExpr->isWide(); } 6883 bool isUTF8() const { return FExpr->isUTF8(); } 6884 bool isUTF16() const { return FExpr->isUTF16(); } 6885 bool isUTF32() const { return FExpr->isUTF32(); } 6886 bool isPascal() const { return FExpr->isPascal(); } 6887 6888 SourceLocation getLocationOfByte( 6889 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 6890 const TargetInfo &Target, unsigned *StartToken = nullptr, 6891 unsigned *StartTokenByteOffset = nullptr) const { 6892 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 6893 StartToken, StartTokenByteOffset); 6894 } 6895 6896 SourceLocation getBeginLoc() const LLVM_READONLY { 6897 return FExpr->getBeginLoc().getLocWithOffset(Offset); 6898 } 6899 6900 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 6901 }; 6902 6903 } // namespace 6904 6905 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 6906 const Expr *OrigFormatExpr, 6907 ArrayRef<const Expr *> Args, 6908 bool HasVAListArg, unsigned format_idx, 6909 unsigned firstDataArg, 6910 Sema::FormatStringType Type, 6911 bool inFunctionCall, 6912 Sema::VariadicCallType CallType, 6913 llvm::SmallBitVector &CheckedVarArgs, 6914 UncoveredArgHandler &UncoveredArg, 6915 bool IgnoreStringsWithoutSpecifiers); 6916 6917 // Determine if an expression is a string literal or constant string. 6918 // If this function returns false on the arguments to a function expecting a 6919 // format string, we will usually need to emit a warning. 6920 // True string literals are then checked by CheckFormatString. 6921 static StringLiteralCheckType 6922 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 6923 bool HasVAListArg, unsigned format_idx, 6924 unsigned firstDataArg, Sema::FormatStringType Type, 6925 Sema::VariadicCallType CallType, bool InFunctionCall, 6926 llvm::SmallBitVector &CheckedVarArgs, 6927 UncoveredArgHandler &UncoveredArg, 6928 llvm::APSInt Offset, 6929 bool IgnoreStringsWithoutSpecifiers = false) { 6930 if (S.isConstantEvaluated()) 6931 return SLCT_NotALiteral; 6932 tryAgain: 6933 assert(Offset.isSigned() && "invalid offset"); 6934 6935 if (E->isTypeDependent() || E->isValueDependent()) 6936 return SLCT_NotALiteral; 6937 6938 E = E->IgnoreParenCasts(); 6939 6940 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 6941 // Technically -Wformat-nonliteral does not warn about this case. 6942 // The behavior of printf and friends in this case is implementation 6943 // dependent. Ideally if the format string cannot be null then 6944 // it should have a 'nonnull' attribute in the function prototype. 6945 return SLCT_UncheckedLiteral; 6946 6947 switch (E->getStmtClass()) { 6948 case Stmt::BinaryConditionalOperatorClass: 6949 case Stmt::ConditionalOperatorClass: { 6950 // The expression is a literal if both sub-expressions were, and it was 6951 // completely checked only if both sub-expressions were checked. 6952 const AbstractConditionalOperator *C = 6953 cast<AbstractConditionalOperator>(E); 6954 6955 // Determine whether it is necessary to check both sub-expressions, for 6956 // example, because the condition expression is a constant that can be 6957 // evaluated at compile time. 6958 bool CheckLeft = true, CheckRight = true; 6959 6960 bool Cond; 6961 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(), 6962 S.isConstantEvaluated())) { 6963 if (Cond) 6964 CheckRight = false; 6965 else 6966 CheckLeft = false; 6967 } 6968 6969 // We need to maintain the offsets for the right and the left hand side 6970 // separately to check if every possible indexed expression is a valid 6971 // string literal. They might have different offsets for different string 6972 // literals in the end. 6973 StringLiteralCheckType Left; 6974 if (!CheckLeft) 6975 Left = SLCT_UncheckedLiteral; 6976 else { 6977 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, 6978 HasVAListArg, format_idx, firstDataArg, 6979 Type, CallType, InFunctionCall, 6980 CheckedVarArgs, UncoveredArg, Offset, 6981 IgnoreStringsWithoutSpecifiers); 6982 if (Left == SLCT_NotALiteral || !CheckRight) { 6983 return Left; 6984 } 6985 } 6986 6987 StringLiteralCheckType Right = checkFormatStringExpr( 6988 S, C->getFalseExpr(), Args, HasVAListArg, format_idx, firstDataArg, 6989 Type, CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 6990 IgnoreStringsWithoutSpecifiers); 6991 6992 return (CheckLeft && Left < Right) ? Left : Right; 6993 } 6994 6995 case Stmt::ImplicitCastExprClass: 6996 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 6997 goto tryAgain; 6998 6999 case Stmt::OpaqueValueExprClass: 7000 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 7001 E = src; 7002 goto tryAgain; 7003 } 7004 return SLCT_NotALiteral; 7005 7006 case Stmt::PredefinedExprClass: 7007 // While __func__, etc., are technically not string literals, they 7008 // cannot contain format specifiers and thus are not a security 7009 // liability. 7010 return SLCT_UncheckedLiteral; 7011 7012 case Stmt::DeclRefExprClass: { 7013 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 7014 7015 // As an exception, do not flag errors for variables binding to 7016 // const string literals. 7017 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 7018 bool isConstant = false; 7019 QualType T = DR->getType(); 7020 7021 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 7022 isConstant = AT->getElementType().isConstant(S.Context); 7023 } else if (const PointerType *PT = T->getAs<PointerType>()) { 7024 isConstant = T.isConstant(S.Context) && 7025 PT->getPointeeType().isConstant(S.Context); 7026 } else if (T->isObjCObjectPointerType()) { 7027 // In ObjC, there is usually no "const ObjectPointer" type, 7028 // so don't check if the pointee type is constant. 7029 isConstant = T.isConstant(S.Context); 7030 } 7031 7032 if (isConstant) { 7033 if (const Expr *Init = VD->getAnyInitializer()) { 7034 // Look through initializers like const char c[] = { "foo" } 7035 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 7036 if (InitList->isStringLiteralInit()) 7037 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 7038 } 7039 return checkFormatStringExpr(S, Init, Args, 7040 HasVAListArg, format_idx, 7041 firstDataArg, Type, CallType, 7042 /*InFunctionCall*/ false, CheckedVarArgs, 7043 UncoveredArg, Offset); 7044 } 7045 } 7046 7047 // For vprintf* functions (i.e., HasVAListArg==true), we add a 7048 // special check to see if the format string is a function parameter 7049 // of the function calling the printf function. If the function 7050 // has an attribute indicating it is a printf-like function, then we 7051 // should suppress warnings concerning non-literals being used in a call 7052 // to a vprintf function. For example: 7053 // 7054 // void 7055 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){ 7056 // va_list ap; 7057 // va_start(ap, fmt); 7058 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt". 7059 // ... 7060 // } 7061 if (HasVAListArg) { 7062 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) { 7063 if (const NamedDecl *ND = dyn_cast<NamedDecl>(PV->getDeclContext())) { 7064 int PVIndex = PV->getFunctionScopeIndex() + 1; 7065 for (const auto *PVFormat : ND->specific_attrs<FormatAttr>()) { 7066 // adjust for implicit parameter 7067 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND)) 7068 if (MD->isInstance()) 7069 ++PVIndex; 7070 // We also check if the formats are compatible. 7071 // We can't pass a 'scanf' string to a 'printf' function. 7072 if (PVIndex == PVFormat->getFormatIdx() && 7073 Type == S.GetFormatStringType(PVFormat)) 7074 return SLCT_UncheckedLiteral; 7075 } 7076 } 7077 } 7078 } 7079 } 7080 7081 return SLCT_NotALiteral; 7082 } 7083 7084 case Stmt::CallExprClass: 7085 case Stmt::CXXMemberCallExprClass: { 7086 const CallExpr *CE = cast<CallExpr>(E); 7087 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 7088 bool IsFirst = true; 7089 StringLiteralCheckType CommonResult; 7090 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 7091 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 7092 StringLiteralCheckType Result = checkFormatStringExpr( 7093 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 7094 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 7095 IgnoreStringsWithoutSpecifiers); 7096 if (IsFirst) { 7097 CommonResult = Result; 7098 IsFirst = false; 7099 } 7100 } 7101 if (!IsFirst) 7102 return CommonResult; 7103 7104 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 7105 unsigned BuiltinID = FD->getBuiltinID(); 7106 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 7107 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 7108 const Expr *Arg = CE->getArg(0); 7109 return checkFormatStringExpr(S, Arg, Args, 7110 HasVAListArg, format_idx, 7111 firstDataArg, Type, CallType, 7112 InFunctionCall, CheckedVarArgs, 7113 UncoveredArg, Offset, 7114 IgnoreStringsWithoutSpecifiers); 7115 } 7116 } 7117 } 7118 7119 return SLCT_NotALiteral; 7120 } 7121 case Stmt::ObjCMessageExprClass: { 7122 const auto *ME = cast<ObjCMessageExpr>(E); 7123 if (const auto *MD = ME->getMethodDecl()) { 7124 if (const auto *FA = MD->getAttr<FormatArgAttr>()) { 7125 // As a special case heuristic, if we're using the method -[NSBundle 7126 // localizedStringForKey:value:table:], ignore any key strings that lack 7127 // format specifiers. The idea is that if the key doesn't have any 7128 // format specifiers then its probably just a key to map to the 7129 // localized strings. If it does have format specifiers though, then its 7130 // likely that the text of the key is the format string in the 7131 // programmer's language, and should be checked. 7132 const ObjCInterfaceDecl *IFace; 7133 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) && 7134 IFace->getIdentifier()->isStr("NSBundle") && 7135 MD->getSelector().isKeywordSelector( 7136 {"localizedStringForKey", "value", "table"})) { 7137 IgnoreStringsWithoutSpecifiers = true; 7138 } 7139 7140 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 7141 return checkFormatStringExpr( 7142 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 7143 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 7144 IgnoreStringsWithoutSpecifiers); 7145 } 7146 } 7147 7148 return SLCT_NotALiteral; 7149 } 7150 case Stmt::ObjCStringLiteralClass: 7151 case Stmt::StringLiteralClass: { 7152 const StringLiteral *StrE = nullptr; 7153 7154 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 7155 StrE = ObjCFExpr->getString(); 7156 else 7157 StrE = cast<StringLiteral>(E); 7158 7159 if (StrE) { 7160 if (Offset.isNegative() || Offset > StrE->getLength()) { 7161 // TODO: It would be better to have an explicit warning for out of 7162 // bounds literals. 7163 return SLCT_NotALiteral; 7164 } 7165 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 7166 CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx, 7167 firstDataArg, Type, InFunctionCall, CallType, 7168 CheckedVarArgs, UncoveredArg, 7169 IgnoreStringsWithoutSpecifiers); 7170 return SLCT_CheckedLiteral; 7171 } 7172 7173 return SLCT_NotALiteral; 7174 } 7175 case Stmt::BinaryOperatorClass: { 7176 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 7177 7178 // A string literal + an int offset is still a string literal. 7179 if (BinOp->isAdditiveOp()) { 7180 Expr::EvalResult LResult, RResult; 7181 7182 bool LIsInt = BinOp->getLHS()->EvaluateAsInt( 7183 LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 7184 bool RIsInt = BinOp->getRHS()->EvaluateAsInt( 7185 RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 7186 7187 if (LIsInt != RIsInt) { 7188 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 7189 7190 if (LIsInt) { 7191 if (BinOpKind == BO_Add) { 7192 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt); 7193 E = BinOp->getRHS(); 7194 goto tryAgain; 7195 } 7196 } else { 7197 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt); 7198 E = BinOp->getLHS(); 7199 goto tryAgain; 7200 } 7201 } 7202 } 7203 7204 return SLCT_NotALiteral; 7205 } 7206 case Stmt::UnaryOperatorClass: { 7207 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 7208 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 7209 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 7210 Expr::EvalResult IndexResult; 7211 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context, 7212 Expr::SE_NoSideEffects, 7213 S.isConstantEvaluated())) { 7214 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add, 7215 /*RHS is int*/ true); 7216 E = ASE->getBase(); 7217 goto tryAgain; 7218 } 7219 } 7220 7221 return SLCT_NotALiteral; 7222 } 7223 7224 default: 7225 return SLCT_NotALiteral; 7226 } 7227 } 7228 7229 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 7230 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 7231 .Case("scanf", FST_Scanf) 7232 .Cases("printf", "printf0", FST_Printf) 7233 .Cases("NSString", "CFString", FST_NSString) 7234 .Case("strftime", FST_Strftime) 7235 .Case("strfmon", FST_Strfmon) 7236 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 7237 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 7238 .Case("os_trace", FST_OSLog) 7239 .Case("os_log", FST_OSLog) 7240 .Default(FST_Unknown); 7241 } 7242 7243 /// CheckFormatArguments - Check calls to printf and scanf (and similar 7244 /// functions) for correct use of format strings. 7245 /// Returns true if a format string has been fully checked. 7246 bool Sema::CheckFormatArguments(const FormatAttr *Format, 7247 ArrayRef<const Expr *> Args, 7248 bool IsCXXMember, 7249 VariadicCallType CallType, 7250 SourceLocation Loc, SourceRange Range, 7251 llvm::SmallBitVector &CheckedVarArgs) { 7252 FormatStringInfo FSI; 7253 if (getFormatStringInfo(Format, IsCXXMember, &FSI)) 7254 return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx, 7255 FSI.FirstDataArg, GetFormatStringType(Format), 7256 CallType, Loc, Range, CheckedVarArgs); 7257 return false; 7258 } 7259 7260 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 7261 bool HasVAListArg, unsigned format_idx, 7262 unsigned firstDataArg, FormatStringType Type, 7263 VariadicCallType CallType, 7264 SourceLocation Loc, SourceRange Range, 7265 llvm::SmallBitVector &CheckedVarArgs) { 7266 // CHECK: printf/scanf-like function is called with no format string. 7267 if (format_idx >= Args.size()) { 7268 Diag(Loc, diag::warn_missing_format_string) << Range; 7269 return false; 7270 } 7271 7272 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 7273 7274 // CHECK: format string is not a string literal. 7275 // 7276 // Dynamically generated format strings are difficult to 7277 // automatically vet at compile time. Requiring that format strings 7278 // are string literals: (1) permits the checking of format strings by 7279 // the compiler and thereby (2) can practically remove the source of 7280 // many format string exploits. 7281 7282 // Format string can be either ObjC string (e.g. @"%d") or 7283 // C string (e.g. "%d") 7284 // ObjC string uses the same format specifiers as C string, so we can use 7285 // the same format string checking logic for both ObjC and C strings. 7286 UncoveredArgHandler UncoveredArg; 7287 StringLiteralCheckType CT = 7288 checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg, 7289 format_idx, firstDataArg, Type, CallType, 7290 /*IsFunctionCall*/ true, CheckedVarArgs, 7291 UncoveredArg, 7292 /*no string offset*/ llvm::APSInt(64, false) = 0); 7293 7294 // Generate a diagnostic where an uncovered argument is detected. 7295 if (UncoveredArg.hasUncoveredArg()) { 7296 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 7297 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 7298 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 7299 } 7300 7301 if (CT != SLCT_NotALiteral) 7302 // Literal format string found, check done! 7303 return CT == SLCT_CheckedLiteral; 7304 7305 // Strftime is particular as it always uses a single 'time' argument, 7306 // so it is safe to pass a non-literal string. 7307 if (Type == FST_Strftime) 7308 return false; 7309 7310 // Do not emit diag when the string param is a macro expansion and the 7311 // format is either NSString or CFString. This is a hack to prevent 7312 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 7313 // which are usually used in place of NS and CF string literals. 7314 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 7315 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 7316 return false; 7317 7318 // If there are no arguments specified, warn with -Wformat-security, otherwise 7319 // warn only with -Wformat-nonliteral. 7320 if (Args.size() == firstDataArg) { 7321 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 7322 << OrigFormatExpr->getSourceRange(); 7323 switch (Type) { 7324 default: 7325 break; 7326 case FST_Kprintf: 7327 case FST_FreeBSDKPrintf: 7328 case FST_Printf: 7329 Diag(FormatLoc, diag::note_format_security_fixit) 7330 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 7331 break; 7332 case FST_NSString: 7333 Diag(FormatLoc, diag::note_format_security_fixit) 7334 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 7335 break; 7336 } 7337 } else { 7338 Diag(FormatLoc, diag::warn_format_nonliteral) 7339 << OrigFormatExpr->getSourceRange(); 7340 } 7341 return false; 7342 } 7343 7344 namespace { 7345 7346 class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 7347 protected: 7348 Sema &S; 7349 const FormatStringLiteral *FExpr; 7350 const Expr *OrigFormatExpr; 7351 const Sema::FormatStringType FSType; 7352 const unsigned FirstDataArg; 7353 const unsigned NumDataArgs; 7354 const char *Beg; // Start of format string. 7355 const bool HasVAListArg; 7356 ArrayRef<const Expr *> Args; 7357 unsigned FormatIdx; 7358 llvm::SmallBitVector CoveredArgs; 7359 bool usesPositionalArgs = false; 7360 bool atFirstArg = true; 7361 bool inFunctionCall; 7362 Sema::VariadicCallType CallType; 7363 llvm::SmallBitVector &CheckedVarArgs; 7364 UncoveredArgHandler &UncoveredArg; 7365 7366 public: 7367 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 7368 const Expr *origFormatExpr, 7369 const Sema::FormatStringType type, unsigned firstDataArg, 7370 unsigned numDataArgs, const char *beg, bool hasVAListArg, 7371 ArrayRef<const Expr *> Args, unsigned formatIdx, 7372 bool inFunctionCall, Sema::VariadicCallType callType, 7373 llvm::SmallBitVector &CheckedVarArgs, 7374 UncoveredArgHandler &UncoveredArg) 7375 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 7376 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 7377 HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx), 7378 inFunctionCall(inFunctionCall), CallType(callType), 7379 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 7380 CoveredArgs.resize(numDataArgs); 7381 CoveredArgs.reset(); 7382 } 7383 7384 void DoneProcessing(); 7385 7386 void HandleIncompleteSpecifier(const char *startSpecifier, 7387 unsigned specifierLen) override; 7388 7389 void HandleInvalidLengthModifier( 7390 const analyze_format_string::FormatSpecifier &FS, 7391 const analyze_format_string::ConversionSpecifier &CS, 7392 const char *startSpecifier, unsigned specifierLen, 7393 unsigned DiagID); 7394 7395 void HandleNonStandardLengthModifier( 7396 const analyze_format_string::FormatSpecifier &FS, 7397 const char *startSpecifier, unsigned specifierLen); 7398 7399 void HandleNonStandardConversionSpecifier( 7400 const analyze_format_string::ConversionSpecifier &CS, 7401 const char *startSpecifier, unsigned specifierLen); 7402 7403 void HandlePosition(const char *startPos, unsigned posLen) override; 7404 7405 void HandleInvalidPosition(const char *startSpecifier, 7406 unsigned specifierLen, 7407 analyze_format_string::PositionContext p) override; 7408 7409 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 7410 7411 void HandleNullChar(const char *nullCharacter) override; 7412 7413 template <typename Range> 7414 static void 7415 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 7416 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 7417 bool IsStringLocation, Range StringRange, 7418 ArrayRef<FixItHint> Fixit = None); 7419 7420 protected: 7421 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 7422 const char *startSpec, 7423 unsigned specifierLen, 7424 const char *csStart, unsigned csLen); 7425 7426 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 7427 const char *startSpec, 7428 unsigned specifierLen); 7429 7430 SourceRange getFormatStringRange(); 7431 CharSourceRange getSpecifierRange(const char *startSpecifier, 7432 unsigned specifierLen); 7433 SourceLocation getLocationOfByte(const char *x); 7434 7435 const Expr *getDataArg(unsigned i) const; 7436 7437 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 7438 const analyze_format_string::ConversionSpecifier &CS, 7439 const char *startSpecifier, unsigned specifierLen, 7440 unsigned argIndex); 7441 7442 template <typename Range> 7443 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 7444 bool IsStringLocation, Range StringRange, 7445 ArrayRef<FixItHint> Fixit = None); 7446 }; 7447 7448 } // namespace 7449 7450 SourceRange CheckFormatHandler::getFormatStringRange() { 7451 return OrigFormatExpr->getSourceRange(); 7452 } 7453 7454 CharSourceRange CheckFormatHandler:: 7455 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 7456 SourceLocation Start = getLocationOfByte(startSpecifier); 7457 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 7458 7459 // Advance the end SourceLocation by one due to half-open ranges. 7460 End = End.getLocWithOffset(1); 7461 7462 return CharSourceRange::getCharRange(Start, End); 7463 } 7464 7465 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 7466 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 7467 S.getLangOpts(), S.Context.getTargetInfo()); 7468 } 7469 7470 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 7471 unsigned specifierLen){ 7472 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 7473 getLocationOfByte(startSpecifier), 7474 /*IsStringLocation*/true, 7475 getSpecifierRange(startSpecifier, specifierLen)); 7476 } 7477 7478 void CheckFormatHandler::HandleInvalidLengthModifier( 7479 const analyze_format_string::FormatSpecifier &FS, 7480 const analyze_format_string::ConversionSpecifier &CS, 7481 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 7482 using namespace analyze_format_string; 7483 7484 const LengthModifier &LM = FS.getLengthModifier(); 7485 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 7486 7487 // See if we know how to fix this length modifier. 7488 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 7489 if (FixedLM) { 7490 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 7491 getLocationOfByte(LM.getStart()), 7492 /*IsStringLocation*/true, 7493 getSpecifierRange(startSpecifier, specifierLen)); 7494 7495 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 7496 << FixedLM->toString() 7497 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 7498 7499 } else { 7500 FixItHint Hint; 7501 if (DiagID == diag::warn_format_nonsensical_length) 7502 Hint = FixItHint::CreateRemoval(LMRange); 7503 7504 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 7505 getLocationOfByte(LM.getStart()), 7506 /*IsStringLocation*/true, 7507 getSpecifierRange(startSpecifier, specifierLen), 7508 Hint); 7509 } 7510 } 7511 7512 void CheckFormatHandler::HandleNonStandardLengthModifier( 7513 const analyze_format_string::FormatSpecifier &FS, 7514 const char *startSpecifier, unsigned specifierLen) { 7515 using namespace analyze_format_string; 7516 7517 const LengthModifier &LM = FS.getLengthModifier(); 7518 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 7519 7520 // See if we know how to fix this length modifier. 7521 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 7522 if (FixedLM) { 7523 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7524 << LM.toString() << 0, 7525 getLocationOfByte(LM.getStart()), 7526 /*IsStringLocation*/true, 7527 getSpecifierRange(startSpecifier, specifierLen)); 7528 7529 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 7530 << FixedLM->toString() 7531 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 7532 7533 } else { 7534 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7535 << LM.toString() << 0, 7536 getLocationOfByte(LM.getStart()), 7537 /*IsStringLocation*/true, 7538 getSpecifierRange(startSpecifier, specifierLen)); 7539 } 7540 } 7541 7542 void CheckFormatHandler::HandleNonStandardConversionSpecifier( 7543 const analyze_format_string::ConversionSpecifier &CS, 7544 const char *startSpecifier, unsigned specifierLen) { 7545 using namespace analyze_format_string; 7546 7547 // See if we know how to fix this conversion specifier. 7548 Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 7549 if (FixedCS) { 7550 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7551 << CS.toString() << /*conversion specifier*/1, 7552 getLocationOfByte(CS.getStart()), 7553 /*IsStringLocation*/true, 7554 getSpecifierRange(startSpecifier, specifierLen)); 7555 7556 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 7557 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 7558 << FixedCS->toString() 7559 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 7560 } else { 7561 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 7562 << CS.toString() << /*conversion specifier*/1, 7563 getLocationOfByte(CS.getStart()), 7564 /*IsStringLocation*/true, 7565 getSpecifierRange(startSpecifier, specifierLen)); 7566 } 7567 } 7568 7569 void CheckFormatHandler::HandlePosition(const char *startPos, 7570 unsigned posLen) { 7571 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 7572 getLocationOfByte(startPos), 7573 /*IsStringLocation*/true, 7574 getSpecifierRange(startPos, posLen)); 7575 } 7576 7577 void 7578 CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, 7579 analyze_format_string::PositionContext p) { 7580 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) 7581 << (unsigned) p, 7582 getLocationOfByte(startPos), /*IsStringLocation*/true, 7583 getSpecifierRange(startPos, posLen)); 7584 } 7585 7586 void CheckFormatHandler::HandleZeroPosition(const char *startPos, 7587 unsigned posLen) { 7588 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 7589 getLocationOfByte(startPos), 7590 /*IsStringLocation*/true, 7591 getSpecifierRange(startPos, posLen)); 7592 } 7593 7594 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 7595 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 7596 // The presence of a null character is likely an error. 7597 EmitFormatDiagnostic( 7598 S.PDiag(diag::warn_printf_format_string_contains_null_char), 7599 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 7600 getFormatStringRange()); 7601 } 7602 } 7603 7604 // Note that this may return NULL if there was an error parsing or building 7605 // one of the argument expressions. 7606 const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 7607 return Args[FirstDataArg + i]; 7608 } 7609 7610 void CheckFormatHandler::DoneProcessing() { 7611 // Does the number of data arguments exceed the number of 7612 // format conversions in the format string? 7613 if (!HasVAListArg) { 7614 // Find any arguments that weren't covered. 7615 CoveredArgs.flip(); 7616 signed notCoveredArg = CoveredArgs.find_first(); 7617 if (notCoveredArg >= 0) { 7618 assert((unsigned)notCoveredArg < NumDataArgs); 7619 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 7620 } else { 7621 UncoveredArg.setAllCovered(); 7622 } 7623 } 7624 } 7625 7626 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 7627 const Expr *ArgExpr) { 7628 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 && 7629 "Invalid state"); 7630 7631 if (!ArgExpr) 7632 return; 7633 7634 SourceLocation Loc = ArgExpr->getBeginLoc(); 7635 7636 if (S.getSourceManager().isInSystemMacro(Loc)) 7637 return; 7638 7639 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 7640 for (auto E : DiagnosticExprs) 7641 PDiag << E->getSourceRange(); 7642 7643 CheckFormatHandler::EmitFormatDiagnostic( 7644 S, IsFunctionCall, DiagnosticExprs[0], 7645 PDiag, Loc, /*IsStringLocation*/false, 7646 DiagnosticExprs[0]->getSourceRange()); 7647 } 7648 7649 bool 7650 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 7651 SourceLocation Loc, 7652 const char *startSpec, 7653 unsigned specifierLen, 7654 const char *csStart, 7655 unsigned csLen) { 7656 bool keepGoing = true; 7657 if (argIndex < NumDataArgs) { 7658 // Consider the argument coverered, even though the specifier doesn't 7659 // make sense. 7660 CoveredArgs.set(argIndex); 7661 } 7662 else { 7663 // If argIndex exceeds the number of data arguments we 7664 // don't issue a warning because that is just a cascade of warnings (and 7665 // they may have intended '%%' anyway). We don't want to continue processing 7666 // the format string after this point, however, as we will like just get 7667 // gibberish when trying to match arguments. 7668 keepGoing = false; 7669 } 7670 7671 StringRef Specifier(csStart, csLen); 7672 7673 // If the specifier in non-printable, it could be the first byte of a UTF-8 7674 // sequence. In that case, print the UTF-8 code point. If not, print the byte 7675 // hex value. 7676 std::string CodePointStr; 7677 if (!llvm::sys::locale::isPrint(*csStart)) { 7678 llvm::UTF32 CodePoint; 7679 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 7680 const llvm::UTF8 *E = 7681 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 7682 llvm::ConversionResult Result = 7683 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 7684 7685 if (Result != llvm::conversionOK) { 7686 unsigned char FirstChar = *csStart; 7687 CodePoint = (llvm::UTF32)FirstChar; 7688 } 7689 7690 llvm::raw_string_ostream OS(CodePointStr); 7691 if (CodePoint < 256) 7692 OS << "\\x" << llvm::format("%02x", CodePoint); 7693 else if (CodePoint <= 0xFFFF) 7694 OS << "\\u" << llvm::format("%04x", CodePoint); 7695 else 7696 OS << "\\U" << llvm::format("%08x", CodePoint); 7697 OS.flush(); 7698 Specifier = CodePointStr; 7699 } 7700 7701 EmitFormatDiagnostic( 7702 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 7703 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 7704 7705 return keepGoing; 7706 } 7707 7708 void 7709 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 7710 const char *startSpec, 7711 unsigned specifierLen) { 7712 EmitFormatDiagnostic( 7713 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 7714 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 7715 } 7716 7717 bool 7718 CheckFormatHandler::CheckNumArgs( 7719 const analyze_format_string::FormatSpecifier &FS, 7720 const analyze_format_string::ConversionSpecifier &CS, 7721 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 7722 7723 if (argIndex >= NumDataArgs) { 7724 PartialDiagnostic PDiag = FS.usesPositionalArg() 7725 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 7726 << (argIndex+1) << NumDataArgs) 7727 : S.PDiag(diag::warn_printf_insufficient_data_args); 7728 EmitFormatDiagnostic( 7729 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 7730 getSpecifierRange(startSpecifier, specifierLen)); 7731 7732 // Since more arguments than conversion tokens are given, by extension 7733 // all arguments are covered, so mark this as so. 7734 UncoveredArg.setAllCovered(); 7735 return false; 7736 } 7737 return true; 7738 } 7739 7740 template<typename Range> 7741 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 7742 SourceLocation Loc, 7743 bool IsStringLocation, 7744 Range StringRange, 7745 ArrayRef<FixItHint> FixIt) { 7746 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 7747 Loc, IsStringLocation, StringRange, FixIt); 7748 } 7749 7750 /// If the format string is not within the function call, emit a note 7751 /// so that the function call and string are in diagnostic messages. 7752 /// 7753 /// \param InFunctionCall if true, the format string is within the function 7754 /// call and only one diagnostic message will be produced. Otherwise, an 7755 /// extra note will be emitted pointing to location of the format string. 7756 /// 7757 /// \param ArgumentExpr the expression that is passed as the format string 7758 /// argument in the function call. Used for getting locations when two 7759 /// diagnostics are emitted. 7760 /// 7761 /// \param PDiag the callee should already have provided any strings for the 7762 /// diagnostic message. This function only adds locations and fixits 7763 /// to diagnostics. 7764 /// 7765 /// \param Loc primary location for diagnostic. If two diagnostics are 7766 /// required, one will be at Loc and a new SourceLocation will be created for 7767 /// the other one. 7768 /// 7769 /// \param IsStringLocation if true, Loc points to the format string should be 7770 /// used for the note. Otherwise, Loc points to the argument list and will 7771 /// be used with PDiag. 7772 /// 7773 /// \param StringRange some or all of the string to highlight. This is 7774 /// templated so it can accept either a CharSourceRange or a SourceRange. 7775 /// 7776 /// \param FixIt optional fix it hint for the format string. 7777 template <typename Range> 7778 void CheckFormatHandler::EmitFormatDiagnostic( 7779 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 7780 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 7781 Range StringRange, ArrayRef<FixItHint> FixIt) { 7782 if (InFunctionCall) { 7783 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 7784 D << StringRange; 7785 D << FixIt; 7786 } else { 7787 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 7788 << ArgumentExpr->getSourceRange(); 7789 7790 const Sema::SemaDiagnosticBuilder &Note = 7791 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 7792 diag::note_format_string_defined); 7793 7794 Note << StringRange; 7795 Note << FixIt; 7796 } 7797 } 7798 7799 //===--- CHECK: Printf format string checking ------------------------------===// 7800 7801 namespace { 7802 7803 class CheckPrintfHandler : public CheckFormatHandler { 7804 public: 7805 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 7806 const Expr *origFormatExpr, 7807 const Sema::FormatStringType type, unsigned firstDataArg, 7808 unsigned numDataArgs, bool isObjC, const char *beg, 7809 bool hasVAListArg, ArrayRef<const Expr *> Args, 7810 unsigned formatIdx, bool inFunctionCall, 7811 Sema::VariadicCallType CallType, 7812 llvm::SmallBitVector &CheckedVarArgs, 7813 UncoveredArgHandler &UncoveredArg) 7814 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 7815 numDataArgs, beg, hasVAListArg, Args, formatIdx, 7816 inFunctionCall, CallType, CheckedVarArgs, 7817 UncoveredArg) {} 7818 7819 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 7820 7821 /// Returns true if '%@' specifiers are allowed in the format string. 7822 bool allowsObjCArg() const { 7823 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 7824 FSType == Sema::FST_OSTrace; 7825 } 7826 7827 bool HandleInvalidPrintfConversionSpecifier( 7828 const analyze_printf::PrintfSpecifier &FS, 7829 const char *startSpecifier, 7830 unsigned specifierLen) override; 7831 7832 void handleInvalidMaskType(StringRef MaskType) override; 7833 7834 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 7835 const char *startSpecifier, 7836 unsigned specifierLen) override; 7837 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 7838 const char *StartSpecifier, 7839 unsigned SpecifierLen, 7840 const Expr *E); 7841 7842 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 7843 const char *startSpecifier, unsigned specifierLen); 7844 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 7845 const analyze_printf::OptionalAmount &Amt, 7846 unsigned type, 7847 const char *startSpecifier, unsigned specifierLen); 7848 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 7849 const analyze_printf::OptionalFlag &flag, 7850 const char *startSpecifier, unsigned specifierLen); 7851 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 7852 const analyze_printf::OptionalFlag &ignoredFlag, 7853 const analyze_printf::OptionalFlag &flag, 7854 const char *startSpecifier, unsigned specifierLen); 7855 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 7856 const Expr *E); 7857 7858 void HandleEmptyObjCModifierFlag(const char *startFlag, 7859 unsigned flagLen) override; 7860 7861 void HandleInvalidObjCModifierFlag(const char *startFlag, 7862 unsigned flagLen) override; 7863 7864 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 7865 const char *flagsEnd, 7866 const char *conversionPosition) 7867 override; 7868 }; 7869 7870 } // namespace 7871 7872 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 7873 const analyze_printf::PrintfSpecifier &FS, 7874 const char *startSpecifier, 7875 unsigned specifierLen) { 7876 const analyze_printf::PrintfConversionSpecifier &CS = 7877 FS.getConversionSpecifier(); 7878 7879 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 7880 getLocationOfByte(CS.getStart()), 7881 startSpecifier, specifierLen, 7882 CS.getStart(), CS.getLength()); 7883 } 7884 7885 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 7886 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 7887 } 7888 7889 bool CheckPrintfHandler::HandleAmount( 7890 const analyze_format_string::OptionalAmount &Amt, 7891 unsigned k, const char *startSpecifier, 7892 unsigned specifierLen) { 7893 if (Amt.hasDataArgument()) { 7894 if (!HasVAListArg) { 7895 unsigned argIndex = Amt.getArgIndex(); 7896 if (argIndex >= NumDataArgs) { 7897 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 7898 << k, 7899 getLocationOfByte(Amt.getStart()), 7900 /*IsStringLocation*/true, 7901 getSpecifierRange(startSpecifier, specifierLen)); 7902 // Don't do any more checking. We will just emit 7903 // spurious errors. 7904 return false; 7905 } 7906 7907 // Type check the data argument. It should be an 'int'. 7908 // Although not in conformance with C99, we also allow the argument to be 7909 // an 'unsigned int' as that is a reasonably safe case. GCC also 7910 // doesn't emit a warning for that case. 7911 CoveredArgs.set(argIndex); 7912 const Expr *Arg = getDataArg(argIndex); 7913 if (!Arg) 7914 return false; 7915 7916 QualType T = Arg->getType(); 7917 7918 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 7919 assert(AT.isValid()); 7920 7921 if (!AT.matchesType(S.Context, T)) { 7922 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 7923 << k << AT.getRepresentativeTypeName(S.Context) 7924 << T << Arg->getSourceRange(), 7925 getLocationOfByte(Amt.getStart()), 7926 /*IsStringLocation*/true, 7927 getSpecifierRange(startSpecifier, specifierLen)); 7928 // Don't do any more checking. We will just emit 7929 // spurious errors. 7930 return false; 7931 } 7932 } 7933 } 7934 return true; 7935 } 7936 7937 void CheckPrintfHandler::HandleInvalidAmount( 7938 const analyze_printf::PrintfSpecifier &FS, 7939 const analyze_printf::OptionalAmount &Amt, 7940 unsigned type, 7941 const char *startSpecifier, 7942 unsigned specifierLen) { 7943 const analyze_printf::PrintfConversionSpecifier &CS = 7944 FS.getConversionSpecifier(); 7945 7946 FixItHint fixit = 7947 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 7948 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 7949 Amt.getConstantLength())) 7950 : FixItHint(); 7951 7952 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 7953 << type << CS.toString(), 7954 getLocationOfByte(Amt.getStart()), 7955 /*IsStringLocation*/true, 7956 getSpecifierRange(startSpecifier, specifierLen), 7957 fixit); 7958 } 7959 7960 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 7961 const analyze_printf::OptionalFlag &flag, 7962 const char *startSpecifier, 7963 unsigned specifierLen) { 7964 // Warn about pointless flag with a fixit removal. 7965 const analyze_printf::PrintfConversionSpecifier &CS = 7966 FS.getConversionSpecifier(); 7967 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 7968 << flag.toString() << CS.toString(), 7969 getLocationOfByte(flag.getPosition()), 7970 /*IsStringLocation*/true, 7971 getSpecifierRange(startSpecifier, specifierLen), 7972 FixItHint::CreateRemoval( 7973 getSpecifierRange(flag.getPosition(), 1))); 7974 } 7975 7976 void CheckPrintfHandler::HandleIgnoredFlag( 7977 const analyze_printf::PrintfSpecifier &FS, 7978 const analyze_printf::OptionalFlag &ignoredFlag, 7979 const analyze_printf::OptionalFlag &flag, 7980 const char *startSpecifier, 7981 unsigned specifierLen) { 7982 // Warn about ignored flag with a fixit removal. 7983 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 7984 << ignoredFlag.toString() << flag.toString(), 7985 getLocationOfByte(ignoredFlag.getPosition()), 7986 /*IsStringLocation*/true, 7987 getSpecifierRange(startSpecifier, specifierLen), 7988 FixItHint::CreateRemoval( 7989 getSpecifierRange(ignoredFlag.getPosition(), 1))); 7990 } 7991 7992 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 7993 unsigned flagLen) { 7994 // Warn about an empty flag. 7995 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 7996 getLocationOfByte(startFlag), 7997 /*IsStringLocation*/true, 7998 getSpecifierRange(startFlag, flagLen)); 7999 } 8000 8001 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 8002 unsigned flagLen) { 8003 // Warn about an invalid flag. 8004 auto Range = getSpecifierRange(startFlag, flagLen); 8005 StringRef flag(startFlag, flagLen); 8006 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 8007 getLocationOfByte(startFlag), 8008 /*IsStringLocation*/true, 8009 Range, FixItHint::CreateRemoval(Range)); 8010 } 8011 8012 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 8013 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 8014 // Warn about using '[...]' without a '@' conversion. 8015 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 8016 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 8017 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 8018 getLocationOfByte(conversionPosition), 8019 /*IsStringLocation*/true, 8020 Range, FixItHint::CreateRemoval(Range)); 8021 } 8022 8023 // Determines if the specified is a C++ class or struct containing 8024 // a member with the specified name and kind (e.g. a CXXMethodDecl named 8025 // "c_str()"). 8026 template<typename MemberKind> 8027 static llvm::SmallPtrSet<MemberKind*, 1> 8028 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 8029 const RecordType *RT = Ty->getAs<RecordType>(); 8030 llvm::SmallPtrSet<MemberKind*, 1> Results; 8031 8032 if (!RT) 8033 return Results; 8034 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 8035 if (!RD || !RD->getDefinition()) 8036 return Results; 8037 8038 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 8039 Sema::LookupMemberName); 8040 R.suppressDiagnostics(); 8041 8042 // We just need to include all members of the right kind turned up by the 8043 // filter, at this point. 8044 if (S.LookupQualifiedName(R, RT->getDecl())) 8045 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 8046 NamedDecl *decl = (*I)->getUnderlyingDecl(); 8047 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 8048 Results.insert(FK); 8049 } 8050 return Results; 8051 } 8052 8053 /// Check if we could call '.c_str()' on an object. 8054 /// 8055 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 8056 /// allow the call, or if it would be ambiguous). 8057 bool Sema::hasCStrMethod(const Expr *E) { 8058 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 8059 8060 MethodSet Results = 8061 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 8062 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 8063 MI != ME; ++MI) 8064 if ((*MI)->getMinRequiredArguments() == 0) 8065 return true; 8066 return false; 8067 } 8068 8069 // Check if a (w)string was passed when a (w)char* was needed, and offer a 8070 // better diagnostic if so. AT is assumed to be valid. 8071 // Returns true when a c_str() conversion method is found. 8072 bool CheckPrintfHandler::checkForCStrMembers( 8073 const analyze_printf::ArgType &AT, const Expr *E) { 8074 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 8075 8076 MethodSet Results = 8077 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 8078 8079 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 8080 MI != ME; ++MI) { 8081 const CXXMethodDecl *Method = *MI; 8082 if (Method->getMinRequiredArguments() == 0 && 8083 AT.matchesType(S.Context, Method->getReturnType())) { 8084 // FIXME: Suggest parens if the expression needs them. 8085 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 8086 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 8087 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 8088 return true; 8089 } 8090 } 8091 8092 return false; 8093 } 8094 8095 bool 8096 CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier 8097 &FS, 8098 const char *startSpecifier, 8099 unsigned specifierLen) { 8100 using namespace analyze_format_string; 8101 using namespace analyze_printf; 8102 8103 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 8104 8105 if (FS.consumesDataArgument()) { 8106 if (atFirstArg) { 8107 atFirstArg = false; 8108 usesPositionalArgs = FS.usesPositionalArg(); 8109 } 8110 else if (usesPositionalArgs != FS.usesPositionalArg()) { 8111 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 8112 startSpecifier, specifierLen); 8113 return false; 8114 } 8115 } 8116 8117 // First check if the field width, precision, and conversion specifier 8118 // have matching data arguments. 8119 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 8120 startSpecifier, specifierLen)) { 8121 return false; 8122 } 8123 8124 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 8125 startSpecifier, specifierLen)) { 8126 return false; 8127 } 8128 8129 if (!CS.consumesDataArgument()) { 8130 // FIXME: Technically specifying a precision or field width here 8131 // makes no sense. Worth issuing a warning at some point. 8132 return true; 8133 } 8134 8135 // Consume the argument. 8136 unsigned argIndex = FS.getArgIndex(); 8137 if (argIndex < NumDataArgs) { 8138 // The check to see if the argIndex is valid will come later. 8139 // We set the bit here because we may exit early from this 8140 // function if we encounter some other error. 8141 CoveredArgs.set(argIndex); 8142 } 8143 8144 // FreeBSD kernel extensions. 8145 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 8146 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 8147 // We need at least two arguments. 8148 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 8149 return false; 8150 8151 // Claim the second argument. 8152 CoveredArgs.set(argIndex + 1); 8153 8154 // Type check the first argument (int for %b, pointer for %D) 8155 const Expr *Ex = getDataArg(argIndex); 8156 const analyze_printf::ArgType &AT = 8157 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? 8158 ArgType(S.Context.IntTy) : ArgType::CPointerTy; 8159 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 8160 EmitFormatDiagnostic( 8161 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 8162 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 8163 << false << Ex->getSourceRange(), 8164 Ex->getBeginLoc(), /*IsStringLocation*/ false, 8165 getSpecifierRange(startSpecifier, specifierLen)); 8166 8167 // Type check the second argument (char * for both %b and %D) 8168 Ex = getDataArg(argIndex + 1); 8169 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 8170 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 8171 EmitFormatDiagnostic( 8172 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 8173 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 8174 << false << Ex->getSourceRange(), 8175 Ex->getBeginLoc(), /*IsStringLocation*/ false, 8176 getSpecifierRange(startSpecifier, specifierLen)); 8177 8178 return true; 8179 } 8180 8181 // Check for using an Objective-C specific conversion specifier 8182 // in a non-ObjC literal. 8183 if (!allowsObjCArg() && CS.isObjCArg()) { 8184 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 8185 specifierLen); 8186 } 8187 8188 // %P can only be used with os_log. 8189 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 8190 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 8191 specifierLen); 8192 } 8193 8194 // %n is not allowed with os_log. 8195 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 8196 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 8197 getLocationOfByte(CS.getStart()), 8198 /*IsStringLocation*/ false, 8199 getSpecifierRange(startSpecifier, specifierLen)); 8200 8201 return true; 8202 } 8203 8204 // Only scalars are allowed for os_trace. 8205 if (FSType == Sema::FST_OSTrace && 8206 (CS.getKind() == ConversionSpecifier::PArg || 8207 CS.getKind() == ConversionSpecifier::sArg || 8208 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 8209 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 8210 specifierLen); 8211 } 8212 8213 // Check for use of public/private annotation outside of os_log(). 8214 if (FSType != Sema::FST_OSLog) { 8215 if (FS.isPublic().isSet()) { 8216 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 8217 << "public", 8218 getLocationOfByte(FS.isPublic().getPosition()), 8219 /*IsStringLocation*/ false, 8220 getSpecifierRange(startSpecifier, specifierLen)); 8221 } 8222 if (FS.isPrivate().isSet()) { 8223 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 8224 << "private", 8225 getLocationOfByte(FS.isPrivate().getPosition()), 8226 /*IsStringLocation*/ false, 8227 getSpecifierRange(startSpecifier, specifierLen)); 8228 } 8229 } 8230 8231 // Check for invalid use of field width 8232 if (!FS.hasValidFieldWidth()) { 8233 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 8234 startSpecifier, specifierLen); 8235 } 8236 8237 // Check for invalid use of precision 8238 if (!FS.hasValidPrecision()) { 8239 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 8240 startSpecifier, specifierLen); 8241 } 8242 8243 // Precision is mandatory for %P specifier. 8244 if (CS.getKind() == ConversionSpecifier::PArg && 8245 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 8246 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 8247 getLocationOfByte(startSpecifier), 8248 /*IsStringLocation*/ false, 8249 getSpecifierRange(startSpecifier, specifierLen)); 8250 } 8251 8252 // Check each flag does not conflict with any other component. 8253 if (!FS.hasValidThousandsGroupingPrefix()) 8254 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 8255 if (!FS.hasValidLeadingZeros()) 8256 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 8257 if (!FS.hasValidPlusPrefix()) 8258 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 8259 if (!FS.hasValidSpacePrefix()) 8260 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 8261 if (!FS.hasValidAlternativeForm()) 8262 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 8263 if (!FS.hasValidLeftJustified()) 8264 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 8265 8266 // Check that flags are not ignored by another flag 8267 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 8268 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 8269 startSpecifier, specifierLen); 8270 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 8271 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 8272 startSpecifier, specifierLen); 8273 8274 // Check the length modifier is valid with the given conversion specifier. 8275 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 8276 S.getLangOpts())) 8277 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8278 diag::warn_format_nonsensical_length); 8279 else if (!FS.hasStandardLengthModifier()) 8280 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 8281 else if (!FS.hasStandardLengthConversionCombination()) 8282 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8283 diag::warn_format_non_standard_conversion_spec); 8284 8285 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 8286 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 8287 8288 // The remaining checks depend on the data arguments. 8289 if (HasVAListArg) 8290 return true; 8291 8292 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 8293 return false; 8294 8295 const Expr *Arg = getDataArg(argIndex); 8296 if (!Arg) 8297 return true; 8298 8299 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 8300 } 8301 8302 static bool requiresParensToAddCast(const Expr *E) { 8303 // FIXME: We should have a general way to reason about operator 8304 // precedence and whether parens are actually needed here. 8305 // Take care of a few common cases where they aren't. 8306 const Expr *Inside = E->IgnoreImpCasts(); 8307 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 8308 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 8309 8310 switch (Inside->getStmtClass()) { 8311 case Stmt::ArraySubscriptExprClass: 8312 case Stmt::CallExprClass: 8313 case Stmt::CharacterLiteralClass: 8314 case Stmt::CXXBoolLiteralExprClass: 8315 case Stmt::DeclRefExprClass: 8316 case Stmt::FloatingLiteralClass: 8317 case Stmt::IntegerLiteralClass: 8318 case Stmt::MemberExprClass: 8319 case Stmt::ObjCArrayLiteralClass: 8320 case Stmt::ObjCBoolLiteralExprClass: 8321 case Stmt::ObjCBoxedExprClass: 8322 case Stmt::ObjCDictionaryLiteralClass: 8323 case Stmt::ObjCEncodeExprClass: 8324 case Stmt::ObjCIvarRefExprClass: 8325 case Stmt::ObjCMessageExprClass: 8326 case Stmt::ObjCPropertyRefExprClass: 8327 case Stmt::ObjCStringLiteralClass: 8328 case Stmt::ObjCSubscriptRefExprClass: 8329 case Stmt::ParenExprClass: 8330 case Stmt::StringLiteralClass: 8331 case Stmt::UnaryOperatorClass: 8332 return false; 8333 default: 8334 return true; 8335 } 8336 } 8337 8338 static std::pair<QualType, StringRef> 8339 shouldNotPrintDirectly(const ASTContext &Context, 8340 QualType IntendedTy, 8341 const Expr *E) { 8342 // Use a 'while' to peel off layers of typedefs. 8343 QualType TyTy = IntendedTy; 8344 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 8345 StringRef Name = UserTy->getDecl()->getName(); 8346 QualType CastTy = llvm::StringSwitch<QualType>(Name) 8347 .Case("CFIndex", Context.getNSIntegerType()) 8348 .Case("NSInteger", Context.getNSIntegerType()) 8349 .Case("NSUInteger", Context.getNSUIntegerType()) 8350 .Case("SInt32", Context.IntTy) 8351 .Case("UInt32", Context.UnsignedIntTy) 8352 .Default(QualType()); 8353 8354 if (!CastTy.isNull()) 8355 return std::make_pair(CastTy, Name); 8356 8357 TyTy = UserTy->desugar(); 8358 } 8359 8360 // Strip parens if necessary. 8361 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 8362 return shouldNotPrintDirectly(Context, 8363 PE->getSubExpr()->getType(), 8364 PE->getSubExpr()); 8365 8366 // If this is a conditional expression, then its result type is constructed 8367 // via usual arithmetic conversions and thus there might be no necessary 8368 // typedef sugar there. Recurse to operands to check for NSInteger & 8369 // Co. usage condition. 8370 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 8371 QualType TrueTy, FalseTy; 8372 StringRef TrueName, FalseName; 8373 8374 std::tie(TrueTy, TrueName) = 8375 shouldNotPrintDirectly(Context, 8376 CO->getTrueExpr()->getType(), 8377 CO->getTrueExpr()); 8378 std::tie(FalseTy, FalseName) = 8379 shouldNotPrintDirectly(Context, 8380 CO->getFalseExpr()->getType(), 8381 CO->getFalseExpr()); 8382 8383 if (TrueTy == FalseTy) 8384 return std::make_pair(TrueTy, TrueName); 8385 else if (TrueTy.isNull()) 8386 return std::make_pair(FalseTy, FalseName); 8387 else if (FalseTy.isNull()) 8388 return std::make_pair(TrueTy, TrueName); 8389 } 8390 8391 return std::make_pair(QualType(), StringRef()); 8392 } 8393 8394 /// Return true if \p ICE is an implicit argument promotion of an arithmetic 8395 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked 8396 /// type do not count. 8397 static bool 8398 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { 8399 QualType From = ICE->getSubExpr()->getType(); 8400 QualType To = ICE->getType(); 8401 // It's an integer promotion if the destination type is the promoted 8402 // source type. 8403 if (ICE->getCastKind() == CK_IntegralCast && 8404 From->isPromotableIntegerType() && 8405 S.Context.getPromotedIntegerType(From) == To) 8406 return true; 8407 // Look through vector types, since we do default argument promotion for 8408 // those in OpenCL. 8409 if (const auto *VecTy = From->getAs<ExtVectorType>()) 8410 From = VecTy->getElementType(); 8411 if (const auto *VecTy = To->getAs<ExtVectorType>()) 8412 To = VecTy->getElementType(); 8413 // It's a floating promotion if the source type is a lower rank. 8414 return ICE->getCastKind() == CK_FloatingCast && 8415 S.Context.getFloatingTypeOrder(From, To) < 0; 8416 } 8417 8418 bool 8419 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 8420 const char *StartSpecifier, 8421 unsigned SpecifierLen, 8422 const Expr *E) { 8423 using namespace analyze_format_string; 8424 using namespace analyze_printf; 8425 8426 // Now type check the data expression that matches the 8427 // format specifier. 8428 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 8429 if (!AT.isValid()) 8430 return true; 8431 8432 QualType ExprTy = E->getType(); 8433 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 8434 ExprTy = TET->getUnderlyingExpr()->getType(); 8435 } 8436 8437 // Diagnose attempts to print a boolean value as a character. Unlike other 8438 // -Wformat diagnostics, this is fine from a type perspective, but it still 8439 // doesn't make sense. 8440 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg && 8441 E->isKnownToHaveBooleanValue()) { 8442 const CharSourceRange &CSR = 8443 getSpecifierRange(StartSpecifier, SpecifierLen); 8444 SmallString<4> FSString; 8445 llvm::raw_svector_ostream os(FSString); 8446 FS.toString(os); 8447 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character) 8448 << FSString, 8449 E->getExprLoc(), false, CSR); 8450 return true; 8451 } 8452 8453 analyze_printf::ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy); 8454 if (Match == analyze_printf::ArgType::Match) 8455 return true; 8456 8457 // Look through argument promotions for our error message's reported type. 8458 // This includes the integral and floating promotions, but excludes array 8459 // and function pointer decay (seeing that an argument intended to be a 8460 // string has type 'char [6]' is probably more confusing than 'char *') and 8461 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type). 8462 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 8463 if (isArithmeticArgumentPromotion(S, ICE)) { 8464 E = ICE->getSubExpr(); 8465 ExprTy = E->getType(); 8466 8467 // Check if we didn't match because of an implicit cast from a 'char' 8468 // or 'short' to an 'int'. This is done because printf is a varargs 8469 // function. 8470 if (ICE->getType() == S.Context.IntTy || 8471 ICE->getType() == S.Context.UnsignedIntTy) { 8472 // All further checking is done on the subexpression 8473 const analyze_printf::ArgType::MatchKind ImplicitMatch = 8474 AT.matchesType(S.Context, ExprTy); 8475 if (ImplicitMatch == analyze_printf::ArgType::Match) 8476 return true; 8477 if (ImplicitMatch == ArgType::NoMatchPedantic || 8478 ImplicitMatch == ArgType::NoMatchTypeConfusion) 8479 Match = ImplicitMatch; 8480 } 8481 } 8482 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 8483 // Special case for 'a', which has type 'int' in C. 8484 // Note, however, that we do /not/ want to treat multibyte constants like 8485 // 'MooV' as characters! This form is deprecated but still exists. 8486 if (ExprTy == S.Context.IntTy) 8487 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) 8488 ExprTy = S.Context.CharTy; 8489 } 8490 8491 // Look through enums to their underlying type. 8492 bool IsEnum = false; 8493 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 8494 ExprTy = EnumTy->getDecl()->getIntegerType(); 8495 IsEnum = true; 8496 } 8497 8498 // %C in an Objective-C context prints a unichar, not a wchar_t. 8499 // If the argument is an integer of some kind, believe the %C and suggest 8500 // a cast instead of changing the conversion specifier. 8501 QualType IntendedTy = ExprTy; 8502 if (isObjCContext() && 8503 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 8504 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 8505 !ExprTy->isCharType()) { 8506 // 'unichar' is defined as a typedef of unsigned short, but we should 8507 // prefer using the typedef if it is visible. 8508 IntendedTy = S.Context.UnsignedShortTy; 8509 8510 // While we are here, check if the value is an IntegerLiteral that happens 8511 // to be within the valid range. 8512 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 8513 const llvm::APInt &V = IL->getValue(); 8514 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 8515 return true; 8516 } 8517 8518 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 8519 Sema::LookupOrdinaryName); 8520 if (S.LookupName(Result, S.getCurScope())) { 8521 NamedDecl *ND = Result.getFoundDecl(); 8522 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 8523 if (TD->getUnderlyingType() == IntendedTy) 8524 IntendedTy = S.Context.getTypedefType(TD); 8525 } 8526 } 8527 } 8528 8529 // Special-case some of Darwin's platform-independence types by suggesting 8530 // casts to primitive types that are known to be large enough. 8531 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 8532 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 8533 QualType CastTy; 8534 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 8535 if (!CastTy.isNull()) { 8536 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 8537 // (long in ASTContext). Only complain to pedants. 8538 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 8539 (AT.isSizeT() || AT.isPtrdiffT()) && 8540 AT.matchesType(S.Context, CastTy)) 8541 Match = ArgType::NoMatchPedantic; 8542 IntendedTy = CastTy; 8543 ShouldNotPrintDirectly = true; 8544 } 8545 } 8546 8547 // We may be able to offer a FixItHint if it is a supported type. 8548 PrintfSpecifier fixedFS = FS; 8549 bool Success = 8550 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 8551 8552 if (Success) { 8553 // Get the fix string from the fixed format specifier 8554 SmallString<16> buf; 8555 llvm::raw_svector_ostream os(buf); 8556 fixedFS.toString(os); 8557 8558 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 8559 8560 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) { 8561 unsigned Diag; 8562 switch (Match) { 8563 case ArgType::Match: llvm_unreachable("expected non-matching"); 8564 case ArgType::NoMatchPedantic: 8565 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 8566 break; 8567 case ArgType::NoMatchTypeConfusion: 8568 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 8569 break; 8570 case ArgType::NoMatch: 8571 Diag = diag::warn_format_conversion_argument_type_mismatch; 8572 break; 8573 } 8574 8575 // In this case, the specifier is wrong and should be changed to match 8576 // the argument. 8577 EmitFormatDiagnostic(S.PDiag(Diag) 8578 << AT.getRepresentativeTypeName(S.Context) 8579 << IntendedTy << IsEnum << E->getSourceRange(), 8580 E->getBeginLoc(), 8581 /*IsStringLocation*/ false, SpecRange, 8582 FixItHint::CreateReplacement(SpecRange, os.str())); 8583 } else { 8584 // The canonical type for formatting this value is different from the 8585 // actual type of the expression. (This occurs, for example, with Darwin's 8586 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 8587 // should be printed as 'long' for 64-bit compatibility.) 8588 // Rather than emitting a normal format/argument mismatch, we want to 8589 // add a cast to the recommended type (and correct the format string 8590 // if necessary). 8591 SmallString<16> CastBuf; 8592 llvm::raw_svector_ostream CastFix(CastBuf); 8593 CastFix << "("; 8594 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 8595 CastFix << ")"; 8596 8597 SmallVector<FixItHint,4> Hints; 8598 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly) 8599 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 8600 8601 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 8602 // If there's already a cast present, just replace it. 8603 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 8604 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 8605 8606 } else if (!requiresParensToAddCast(E)) { 8607 // If the expression has high enough precedence, 8608 // just write the C-style cast. 8609 Hints.push_back( 8610 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 8611 } else { 8612 // Otherwise, add parens around the expression as well as the cast. 8613 CastFix << "("; 8614 Hints.push_back( 8615 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 8616 8617 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc()); 8618 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 8619 } 8620 8621 if (ShouldNotPrintDirectly) { 8622 // The expression has a type that should not be printed directly. 8623 // We extract the name from the typedef because we don't want to show 8624 // the underlying type in the diagnostic. 8625 StringRef Name; 8626 if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy)) 8627 Name = TypedefTy->getDecl()->getName(); 8628 else 8629 Name = CastTyName; 8630 unsigned Diag = Match == ArgType::NoMatchPedantic 8631 ? diag::warn_format_argument_needs_cast_pedantic 8632 : diag::warn_format_argument_needs_cast; 8633 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 8634 << E->getSourceRange(), 8635 E->getBeginLoc(), /*IsStringLocation=*/false, 8636 SpecRange, Hints); 8637 } else { 8638 // In this case, the expression could be printed using a different 8639 // specifier, but we've decided that the specifier is probably correct 8640 // and we should cast instead. Just use the normal warning message. 8641 EmitFormatDiagnostic( 8642 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 8643 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 8644 << E->getSourceRange(), 8645 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 8646 } 8647 } 8648 } else { 8649 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 8650 SpecifierLen); 8651 // Since the warning for passing non-POD types to variadic functions 8652 // was deferred until now, we emit a warning for non-POD 8653 // arguments here. 8654 switch (S.isValidVarArgType(ExprTy)) { 8655 case Sema::VAK_Valid: 8656 case Sema::VAK_ValidInCXX11: { 8657 unsigned Diag; 8658 switch (Match) { 8659 case ArgType::Match: llvm_unreachable("expected non-matching"); 8660 case ArgType::NoMatchPedantic: 8661 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 8662 break; 8663 case ArgType::NoMatchTypeConfusion: 8664 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 8665 break; 8666 case ArgType::NoMatch: 8667 Diag = diag::warn_format_conversion_argument_type_mismatch; 8668 break; 8669 } 8670 8671 EmitFormatDiagnostic( 8672 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 8673 << IsEnum << CSR << E->getSourceRange(), 8674 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 8675 break; 8676 } 8677 case Sema::VAK_Undefined: 8678 case Sema::VAK_MSVCUndefined: 8679 EmitFormatDiagnostic(S.PDiag(diag::warn_non_pod_vararg_with_format_string) 8680 << S.getLangOpts().CPlusPlus11 << ExprTy 8681 << CallType 8682 << AT.getRepresentativeTypeName(S.Context) << CSR 8683 << E->getSourceRange(), 8684 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 8685 checkForCStrMembers(AT, E); 8686 break; 8687 8688 case Sema::VAK_Invalid: 8689 if (ExprTy->isObjCObjectType()) 8690 EmitFormatDiagnostic( 8691 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 8692 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 8693 << AT.getRepresentativeTypeName(S.Context) << CSR 8694 << E->getSourceRange(), 8695 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 8696 else 8697 // FIXME: If this is an initializer list, suggest removing the braces 8698 // or inserting a cast to the target type. 8699 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 8700 << isa<InitListExpr>(E) << ExprTy << CallType 8701 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 8702 break; 8703 } 8704 8705 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 8706 "format string specifier index out of range"); 8707 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 8708 } 8709 8710 return true; 8711 } 8712 8713 //===--- CHECK: Scanf format string checking ------------------------------===// 8714 8715 namespace { 8716 8717 class CheckScanfHandler : public CheckFormatHandler { 8718 public: 8719 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 8720 const Expr *origFormatExpr, Sema::FormatStringType type, 8721 unsigned firstDataArg, unsigned numDataArgs, 8722 const char *beg, bool hasVAListArg, 8723 ArrayRef<const Expr *> Args, unsigned formatIdx, 8724 bool inFunctionCall, Sema::VariadicCallType CallType, 8725 llvm::SmallBitVector &CheckedVarArgs, 8726 UncoveredArgHandler &UncoveredArg) 8727 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 8728 numDataArgs, beg, hasVAListArg, Args, formatIdx, 8729 inFunctionCall, CallType, CheckedVarArgs, 8730 UncoveredArg) {} 8731 8732 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 8733 const char *startSpecifier, 8734 unsigned specifierLen) override; 8735 8736 bool HandleInvalidScanfConversionSpecifier( 8737 const analyze_scanf::ScanfSpecifier &FS, 8738 const char *startSpecifier, 8739 unsigned specifierLen) override; 8740 8741 void HandleIncompleteScanList(const char *start, const char *end) override; 8742 }; 8743 8744 } // namespace 8745 8746 void CheckScanfHandler::HandleIncompleteScanList(const char *start, 8747 const char *end) { 8748 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 8749 getLocationOfByte(end), /*IsStringLocation*/true, 8750 getSpecifierRange(start, end - start)); 8751 } 8752 8753 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 8754 const analyze_scanf::ScanfSpecifier &FS, 8755 const char *startSpecifier, 8756 unsigned specifierLen) { 8757 const analyze_scanf::ScanfConversionSpecifier &CS = 8758 FS.getConversionSpecifier(); 8759 8760 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 8761 getLocationOfByte(CS.getStart()), 8762 startSpecifier, specifierLen, 8763 CS.getStart(), CS.getLength()); 8764 } 8765 8766 bool CheckScanfHandler::HandleScanfSpecifier( 8767 const analyze_scanf::ScanfSpecifier &FS, 8768 const char *startSpecifier, 8769 unsigned specifierLen) { 8770 using namespace analyze_scanf; 8771 using namespace analyze_format_string; 8772 8773 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 8774 8775 // Handle case where '%' and '*' don't consume an argument. These shouldn't 8776 // be used to decide if we are using positional arguments consistently. 8777 if (FS.consumesDataArgument()) { 8778 if (atFirstArg) { 8779 atFirstArg = false; 8780 usesPositionalArgs = FS.usesPositionalArg(); 8781 } 8782 else if (usesPositionalArgs != FS.usesPositionalArg()) { 8783 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 8784 startSpecifier, specifierLen); 8785 return false; 8786 } 8787 } 8788 8789 // Check if the field with is non-zero. 8790 const OptionalAmount &Amt = FS.getFieldWidth(); 8791 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 8792 if (Amt.getConstantAmount() == 0) { 8793 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 8794 Amt.getConstantLength()); 8795 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 8796 getLocationOfByte(Amt.getStart()), 8797 /*IsStringLocation*/true, R, 8798 FixItHint::CreateRemoval(R)); 8799 } 8800 } 8801 8802 if (!FS.consumesDataArgument()) { 8803 // FIXME: Technically specifying a precision or field width here 8804 // makes no sense. Worth issuing a warning at some point. 8805 return true; 8806 } 8807 8808 // Consume the argument. 8809 unsigned argIndex = FS.getArgIndex(); 8810 if (argIndex < NumDataArgs) { 8811 // The check to see if the argIndex is valid will come later. 8812 // We set the bit here because we may exit early from this 8813 // function if we encounter some other error. 8814 CoveredArgs.set(argIndex); 8815 } 8816 8817 // Check the length modifier is valid with the given conversion specifier. 8818 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 8819 S.getLangOpts())) 8820 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8821 diag::warn_format_nonsensical_length); 8822 else if (!FS.hasStandardLengthModifier()) 8823 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 8824 else if (!FS.hasStandardLengthConversionCombination()) 8825 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8826 diag::warn_format_non_standard_conversion_spec); 8827 8828 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 8829 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 8830 8831 // The remaining checks depend on the data arguments. 8832 if (HasVAListArg) 8833 return true; 8834 8835 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 8836 return false; 8837 8838 // Check that the argument type matches the format specifier. 8839 const Expr *Ex = getDataArg(argIndex); 8840 if (!Ex) 8841 return true; 8842 8843 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 8844 8845 if (!AT.isValid()) { 8846 return true; 8847 } 8848 8849 analyze_format_string::ArgType::MatchKind Match = 8850 AT.matchesType(S.Context, Ex->getType()); 8851 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 8852 if (Match == analyze_format_string::ArgType::Match) 8853 return true; 8854 8855 ScanfSpecifier fixedFS = FS; 8856 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 8857 S.getLangOpts(), S.Context); 8858 8859 unsigned Diag = 8860 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 8861 : diag::warn_format_conversion_argument_type_mismatch; 8862 8863 if (Success) { 8864 // Get the fix string from the fixed format specifier. 8865 SmallString<128> buf; 8866 llvm::raw_svector_ostream os(buf); 8867 fixedFS.toString(os); 8868 8869 EmitFormatDiagnostic( 8870 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 8871 << Ex->getType() << false << Ex->getSourceRange(), 8872 Ex->getBeginLoc(), 8873 /*IsStringLocation*/ false, 8874 getSpecifierRange(startSpecifier, specifierLen), 8875 FixItHint::CreateReplacement( 8876 getSpecifierRange(startSpecifier, specifierLen), os.str())); 8877 } else { 8878 EmitFormatDiagnostic(S.PDiag(Diag) 8879 << AT.getRepresentativeTypeName(S.Context) 8880 << Ex->getType() << false << Ex->getSourceRange(), 8881 Ex->getBeginLoc(), 8882 /*IsStringLocation*/ false, 8883 getSpecifierRange(startSpecifier, specifierLen)); 8884 } 8885 8886 return true; 8887 } 8888 8889 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 8890 const Expr *OrigFormatExpr, 8891 ArrayRef<const Expr *> Args, 8892 bool HasVAListArg, unsigned format_idx, 8893 unsigned firstDataArg, 8894 Sema::FormatStringType Type, 8895 bool inFunctionCall, 8896 Sema::VariadicCallType CallType, 8897 llvm::SmallBitVector &CheckedVarArgs, 8898 UncoveredArgHandler &UncoveredArg, 8899 bool IgnoreStringsWithoutSpecifiers) { 8900 // CHECK: is the format string a wide literal? 8901 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 8902 CheckFormatHandler::EmitFormatDiagnostic( 8903 S, inFunctionCall, Args[format_idx], 8904 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 8905 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 8906 return; 8907 } 8908 8909 // Str - The format string. NOTE: this is NOT null-terminated! 8910 StringRef StrRef = FExpr->getString(); 8911 const char *Str = StrRef.data(); 8912 // Account for cases where the string literal is truncated in a declaration. 8913 const ConstantArrayType *T = 8914 S.Context.getAsConstantArrayType(FExpr->getType()); 8915 assert(T && "String literal not of constant array type!"); 8916 size_t TypeSize = T->getSize().getZExtValue(); 8917 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 8918 const unsigned numDataArgs = Args.size() - firstDataArg; 8919 8920 if (IgnoreStringsWithoutSpecifiers && 8921 !analyze_format_string::parseFormatStringHasFormattingSpecifiers( 8922 Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 8923 return; 8924 8925 // Emit a warning if the string literal is truncated and does not contain an 8926 // embedded null character. 8927 if (TypeSize <= StrRef.size() && 8928 StrRef.substr(0, TypeSize).find('\0') == StringRef::npos) { 8929 CheckFormatHandler::EmitFormatDiagnostic( 8930 S, inFunctionCall, Args[format_idx], 8931 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 8932 FExpr->getBeginLoc(), 8933 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 8934 return; 8935 } 8936 8937 // CHECK: empty format string? 8938 if (StrLen == 0 && numDataArgs > 0) { 8939 CheckFormatHandler::EmitFormatDiagnostic( 8940 S, inFunctionCall, Args[format_idx], 8941 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 8942 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 8943 return; 8944 } 8945 8946 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 8947 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog || 8948 Type == Sema::FST_OSTrace) { 8949 CheckPrintfHandler H( 8950 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 8951 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, 8952 HasVAListArg, Args, format_idx, inFunctionCall, CallType, 8953 CheckedVarArgs, UncoveredArg); 8954 8955 if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen, 8956 S.getLangOpts(), 8957 S.Context.getTargetInfo(), 8958 Type == Sema::FST_FreeBSDKPrintf)) 8959 H.DoneProcessing(); 8960 } else if (Type == Sema::FST_Scanf) { 8961 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 8962 numDataArgs, Str, HasVAListArg, Args, format_idx, 8963 inFunctionCall, CallType, CheckedVarArgs, UncoveredArg); 8964 8965 if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen, 8966 S.getLangOpts(), 8967 S.Context.getTargetInfo())) 8968 H.DoneProcessing(); 8969 } // TODO: handle other formats 8970 } 8971 8972 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 8973 // Str - The format string. NOTE: this is NOT null-terminated! 8974 StringRef StrRef = FExpr->getString(); 8975 const char *Str = StrRef.data(); 8976 // Account for cases where the string literal is truncated in a declaration. 8977 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 8978 assert(T && "String literal not of constant array type!"); 8979 size_t TypeSize = T->getSize().getZExtValue(); 8980 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 8981 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 8982 getLangOpts(), 8983 Context.getTargetInfo()); 8984 } 8985 8986 //===--- CHECK: Warn on use of wrong absolute value function. -------------===// 8987 8988 // Returns the related absolute value function that is larger, of 0 if one 8989 // does not exist. 8990 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 8991 switch (AbsFunction) { 8992 default: 8993 return 0; 8994 8995 case Builtin::BI__builtin_abs: 8996 return Builtin::BI__builtin_labs; 8997 case Builtin::BI__builtin_labs: 8998 return Builtin::BI__builtin_llabs; 8999 case Builtin::BI__builtin_llabs: 9000 return 0; 9001 9002 case Builtin::BI__builtin_fabsf: 9003 return Builtin::BI__builtin_fabs; 9004 case Builtin::BI__builtin_fabs: 9005 return Builtin::BI__builtin_fabsl; 9006 case Builtin::BI__builtin_fabsl: 9007 return 0; 9008 9009 case Builtin::BI__builtin_cabsf: 9010 return Builtin::BI__builtin_cabs; 9011 case Builtin::BI__builtin_cabs: 9012 return Builtin::BI__builtin_cabsl; 9013 case Builtin::BI__builtin_cabsl: 9014 return 0; 9015 9016 case Builtin::BIabs: 9017 return Builtin::BIlabs; 9018 case Builtin::BIlabs: 9019 return Builtin::BIllabs; 9020 case Builtin::BIllabs: 9021 return 0; 9022 9023 case Builtin::BIfabsf: 9024 return Builtin::BIfabs; 9025 case Builtin::BIfabs: 9026 return Builtin::BIfabsl; 9027 case Builtin::BIfabsl: 9028 return 0; 9029 9030 case Builtin::BIcabsf: 9031 return Builtin::BIcabs; 9032 case Builtin::BIcabs: 9033 return Builtin::BIcabsl; 9034 case Builtin::BIcabsl: 9035 return 0; 9036 } 9037 } 9038 9039 // Returns the argument type of the absolute value function. 9040 static QualType getAbsoluteValueArgumentType(ASTContext &Context, 9041 unsigned AbsType) { 9042 if (AbsType == 0) 9043 return QualType(); 9044 9045 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 9046 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 9047 if (Error != ASTContext::GE_None) 9048 return QualType(); 9049 9050 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 9051 if (!FT) 9052 return QualType(); 9053 9054 if (FT->getNumParams() != 1) 9055 return QualType(); 9056 9057 return FT->getParamType(0); 9058 } 9059 9060 // Returns the best absolute value function, or zero, based on type and 9061 // current absolute value function. 9062 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 9063 unsigned AbsFunctionKind) { 9064 unsigned BestKind = 0; 9065 uint64_t ArgSize = Context.getTypeSize(ArgType); 9066 for (unsigned Kind = AbsFunctionKind; Kind != 0; 9067 Kind = getLargerAbsoluteValueFunction(Kind)) { 9068 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 9069 if (Context.getTypeSize(ParamType) >= ArgSize) { 9070 if (BestKind == 0) 9071 BestKind = Kind; 9072 else if (Context.hasSameType(ParamType, ArgType)) { 9073 BestKind = Kind; 9074 break; 9075 } 9076 } 9077 } 9078 return BestKind; 9079 } 9080 9081 enum AbsoluteValueKind { 9082 AVK_Integer, 9083 AVK_Floating, 9084 AVK_Complex 9085 }; 9086 9087 static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 9088 if (T->isIntegralOrEnumerationType()) 9089 return AVK_Integer; 9090 if (T->isRealFloatingType()) 9091 return AVK_Floating; 9092 if (T->isAnyComplexType()) 9093 return AVK_Complex; 9094 9095 llvm_unreachable("Type not integer, floating, or complex"); 9096 } 9097 9098 // Changes the absolute value function to a different type. Preserves whether 9099 // the function is a builtin. 9100 static unsigned changeAbsFunction(unsigned AbsKind, 9101 AbsoluteValueKind ValueKind) { 9102 switch (ValueKind) { 9103 case AVK_Integer: 9104 switch (AbsKind) { 9105 default: 9106 return 0; 9107 case Builtin::BI__builtin_fabsf: 9108 case Builtin::BI__builtin_fabs: 9109 case Builtin::BI__builtin_fabsl: 9110 case Builtin::BI__builtin_cabsf: 9111 case Builtin::BI__builtin_cabs: 9112 case Builtin::BI__builtin_cabsl: 9113 return Builtin::BI__builtin_abs; 9114 case Builtin::BIfabsf: 9115 case Builtin::BIfabs: 9116 case Builtin::BIfabsl: 9117 case Builtin::BIcabsf: 9118 case Builtin::BIcabs: 9119 case Builtin::BIcabsl: 9120 return Builtin::BIabs; 9121 } 9122 case AVK_Floating: 9123 switch (AbsKind) { 9124 default: 9125 return 0; 9126 case Builtin::BI__builtin_abs: 9127 case Builtin::BI__builtin_labs: 9128 case Builtin::BI__builtin_llabs: 9129 case Builtin::BI__builtin_cabsf: 9130 case Builtin::BI__builtin_cabs: 9131 case Builtin::BI__builtin_cabsl: 9132 return Builtin::BI__builtin_fabsf; 9133 case Builtin::BIabs: 9134 case Builtin::BIlabs: 9135 case Builtin::BIllabs: 9136 case Builtin::BIcabsf: 9137 case Builtin::BIcabs: 9138 case Builtin::BIcabsl: 9139 return Builtin::BIfabsf; 9140 } 9141 case AVK_Complex: 9142 switch (AbsKind) { 9143 default: 9144 return 0; 9145 case Builtin::BI__builtin_abs: 9146 case Builtin::BI__builtin_labs: 9147 case Builtin::BI__builtin_llabs: 9148 case Builtin::BI__builtin_fabsf: 9149 case Builtin::BI__builtin_fabs: 9150 case Builtin::BI__builtin_fabsl: 9151 return Builtin::BI__builtin_cabsf; 9152 case Builtin::BIabs: 9153 case Builtin::BIlabs: 9154 case Builtin::BIllabs: 9155 case Builtin::BIfabsf: 9156 case Builtin::BIfabs: 9157 case Builtin::BIfabsl: 9158 return Builtin::BIcabsf; 9159 } 9160 } 9161 llvm_unreachable("Unable to convert function"); 9162 } 9163 9164 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 9165 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 9166 if (!FnInfo) 9167 return 0; 9168 9169 switch (FDecl->getBuiltinID()) { 9170 default: 9171 return 0; 9172 case Builtin::BI__builtin_abs: 9173 case Builtin::BI__builtin_fabs: 9174 case Builtin::BI__builtin_fabsf: 9175 case Builtin::BI__builtin_fabsl: 9176 case Builtin::BI__builtin_labs: 9177 case Builtin::BI__builtin_llabs: 9178 case Builtin::BI__builtin_cabs: 9179 case Builtin::BI__builtin_cabsf: 9180 case Builtin::BI__builtin_cabsl: 9181 case Builtin::BIabs: 9182 case Builtin::BIlabs: 9183 case Builtin::BIllabs: 9184 case Builtin::BIfabs: 9185 case Builtin::BIfabsf: 9186 case Builtin::BIfabsl: 9187 case Builtin::BIcabs: 9188 case Builtin::BIcabsf: 9189 case Builtin::BIcabsl: 9190 return FDecl->getBuiltinID(); 9191 } 9192 llvm_unreachable("Unknown Builtin type"); 9193 } 9194 9195 // If the replacement is valid, emit a note with replacement function. 9196 // Additionally, suggest including the proper header if not already included. 9197 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 9198 unsigned AbsKind, QualType ArgType) { 9199 bool EmitHeaderHint = true; 9200 const char *HeaderName = nullptr; 9201 const char *FunctionName = nullptr; 9202 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 9203 FunctionName = "std::abs"; 9204 if (ArgType->isIntegralOrEnumerationType()) { 9205 HeaderName = "cstdlib"; 9206 } else if (ArgType->isRealFloatingType()) { 9207 HeaderName = "cmath"; 9208 } else { 9209 llvm_unreachable("Invalid Type"); 9210 } 9211 9212 // Lookup all std::abs 9213 if (NamespaceDecl *Std = S.getStdNamespace()) { 9214 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 9215 R.suppressDiagnostics(); 9216 S.LookupQualifiedName(R, Std); 9217 9218 for (const auto *I : R) { 9219 const FunctionDecl *FDecl = nullptr; 9220 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 9221 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 9222 } else { 9223 FDecl = dyn_cast<FunctionDecl>(I); 9224 } 9225 if (!FDecl) 9226 continue; 9227 9228 // Found std::abs(), check that they are the right ones. 9229 if (FDecl->getNumParams() != 1) 9230 continue; 9231 9232 // Check that the parameter type can handle the argument. 9233 QualType ParamType = FDecl->getParamDecl(0)->getType(); 9234 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 9235 S.Context.getTypeSize(ArgType) <= 9236 S.Context.getTypeSize(ParamType)) { 9237 // Found a function, don't need the header hint. 9238 EmitHeaderHint = false; 9239 break; 9240 } 9241 } 9242 } 9243 } else { 9244 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 9245 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 9246 9247 if (HeaderName) { 9248 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 9249 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 9250 R.suppressDiagnostics(); 9251 S.LookupName(R, S.getCurScope()); 9252 9253 if (R.isSingleResult()) { 9254 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 9255 if (FD && FD->getBuiltinID() == AbsKind) { 9256 EmitHeaderHint = false; 9257 } else { 9258 return; 9259 } 9260 } else if (!R.empty()) { 9261 return; 9262 } 9263 } 9264 } 9265 9266 S.Diag(Loc, diag::note_replace_abs_function) 9267 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 9268 9269 if (!HeaderName) 9270 return; 9271 9272 if (!EmitHeaderHint) 9273 return; 9274 9275 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 9276 << FunctionName; 9277 } 9278 9279 template <std::size_t StrLen> 9280 static bool IsStdFunction(const FunctionDecl *FDecl, 9281 const char (&Str)[StrLen]) { 9282 if (!FDecl) 9283 return false; 9284 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 9285 return false; 9286 if (!FDecl->isInStdNamespace()) 9287 return false; 9288 9289 return true; 9290 } 9291 9292 // Warn when using the wrong abs() function. 9293 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 9294 const FunctionDecl *FDecl) { 9295 if (Call->getNumArgs() != 1) 9296 return; 9297 9298 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 9299 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 9300 if (AbsKind == 0 && !IsStdAbs) 9301 return; 9302 9303 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 9304 QualType ParamType = Call->getArg(0)->getType(); 9305 9306 // Unsigned types cannot be negative. Suggest removing the absolute value 9307 // function call. 9308 if (ArgType->isUnsignedIntegerType()) { 9309 const char *FunctionName = 9310 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 9311 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 9312 Diag(Call->getExprLoc(), diag::note_remove_abs) 9313 << FunctionName 9314 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 9315 return; 9316 } 9317 9318 // Taking the absolute value of a pointer is very suspicious, they probably 9319 // wanted to index into an array, dereference a pointer, call a function, etc. 9320 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 9321 unsigned DiagType = 0; 9322 if (ArgType->isFunctionType()) 9323 DiagType = 1; 9324 else if (ArgType->isArrayType()) 9325 DiagType = 2; 9326 9327 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 9328 return; 9329 } 9330 9331 // std::abs has overloads which prevent most of the absolute value problems 9332 // from occurring. 9333 if (IsStdAbs) 9334 return; 9335 9336 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 9337 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 9338 9339 // The argument and parameter are the same kind. Check if they are the right 9340 // size. 9341 if (ArgValueKind == ParamValueKind) { 9342 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 9343 return; 9344 9345 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 9346 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 9347 << FDecl << ArgType << ParamType; 9348 9349 if (NewAbsKind == 0) 9350 return; 9351 9352 emitReplacement(*this, Call->getExprLoc(), 9353 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 9354 return; 9355 } 9356 9357 // ArgValueKind != ParamValueKind 9358 // The wrong type of absolute value function was used. Attempt to find the 9359 // proper one. 9360 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 9361 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 9362 if (NewAbsKind == 0) 9363 return; 9364 9365 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 9366 << FDecl << ParamValueKind << ArgValueKind; 9367 9368 emitReplacement(*this, Call->getExprLoc(), 9369 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 9370 } 9371 9372 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 9373 void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 9374 const FunctionDecl *FDecl) { 9375 if (!Call || !FDecl) return; 9376 9377 // Ignore template specializations and macros. 9378 if (inTemplateInstantiation()) return; 9379 if (Call->getExprLoc().isMacroID()) return; 9380 9381 // Only care about the one template argument, two function parameter std::max 9382 if (Call->getNumArgs() != 2) return; 9383 if (!IsStdFunction(FDecl, "max")) return; 9384 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 9385 if (!ArgList) return; 9386 if (ArgList->size() != 1) return; 9387 9388 // Check that template type argument is unsigned integer. 9389 const auto& TA = ArgList->get(0); 9390 if (TA.getKind() != TemplateArgument::Type) return; 9391 QualType ArgType = TA.getAsType(); 9392 if (!ArgType->isUnsignedIntegerType()) return; 9393 9394 // See if either argument is a literal zero. 9395 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 9396 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 9397 if (!MTE) return false; 9398 const auto *Num = dyn_cast<IntegerLiteral>(MTE->getSubExpr()); 9399 if (!Num) return false; 9400 if (Num->getValue() != 0) return false; 9401 return true; 9402 }; 9403 9404 const Expr *FirstArg = Call->getArg(0); 9405 const Expr *SecondArg = Call->getArg(1); 9406 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 9407 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 9408 9409 // Only warn when exactly one argument is zero. 9410 if (IsFirstArgZero == IsSecondArgZero) return; 9411 9412 SourceRange FirstRange = FirstArg->getSourceRange(); 9413 SourceRange SecondRange = SecondArg->getSourceRange(); 9414 9415 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 9416 9417 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 9418 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 9419 9420 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 9421 SourceRange RemovalRange; 9422 if (IsFirstArgZero) { 9423 RemovalRange = SourceRange(FirstRange.getBegin(), 9424 SecondRange.getBegin().getLocWithOffset(-1)); 9425 } else { 9426 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 9427 SecondRange.getEnd()); 9428 } 9429 9430 Diag(Call->getExprLoc(), diag::note_remove_max_call) 9431 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 9432 << FixItHint::CreateRemoval(RemovalRange); 9433 } 9434 9435 //===--- CHECK: Standard memory functions ---------------------------------===// 9436 9437 /// Takes the expression passed to the size_t parameter of functions 9438 /// such as memcmp, strncat, etc and warns if it's a comparison. 9439 /// 9440 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 9441 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 9442 IdentifierInfo *FnName, 9443 SourceLocation FnLoc, 9444 SourceLocation RParenLoc) { 9445 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 9446 if (!Size) 9447 return false; 9448 9449 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 9450 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 9451 return false; 9452 9453 SourceRange SizeRange = Size->getSourceRange(); 9454 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 9455 << SizeRange << FnName; 9456 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 9457 << FnName 9458 << FixItHint::CreateInsertion( 9459 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 9460 << FixItHint::CreateRemoval(RParenLoc); 9461 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 9462 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 9463 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 9464 ")"); 9465 9466 return true; 9467 } 9468 9469 /// Determine whether the given type is or contains a dynamic class type 9470 /// (e.g., whether it has a vtable). 9471 static const CXXRecordDecl *getContainedDynamicClass(QualType T, 9472 bool &IsContained) { 9473 // Look through array types while ignoring qualifiers. 9474 const Type *Ty = T->getBaseElementTypeUnsafe(); 9475 IsContained = false; 9476 9477 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 9478 RD = RD ? RD->getDefinition() : nullptr; 9479 if (!RD || RD->isInvalidDecl()) 9480 return nullptr; 9481 9482 if (RD->isDynamicClass()) 9483 return RD; 9484 9485 // Check all the fields. If any bases were dynamic, the class is dynamic. 9486 // It's impossible for a class to transitively contain itself by value, so 9487 // infinite recursion is impossible. 9488 for (auto *FD : RD->fields()) { 9489 bool SubContained; 9490 if (const CXXRecordDecl *ContainedRD = 9491 getContainedDynamicClass(FD->getType(), SubContained)) { 9492 IsContained = true; 9493 return ContainedRD; 9494 } 9495 } 9496 9497 return nullptr; 9498 } 9499 9500 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 9501 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 9502 if (Unary->getKind() == UETT_SizeOf) 9503 return Unary; 9504 return nullptr; 9505 } 9506 9507 /// If E is a sizeof expression, returns its argument expression, 9508 /// otherwise returns NULL. 9509 static const Expr *getSizeOfExprArg(const Expr *E) { 9510 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 9511 if (!SizeOf->isArgumentType()) 9512 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 9513 return nullptr; 9514 } 9515 9516 /// If E is a sizeof expression, returns its argument type. 9517 static QualType getSizeOfArgType(const Expr *E) { 9518 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 9519 return SizeOf->getTypeOfArgument(); 9520 return QualType(); 9521 } 9522 9523 namespace { 9524 9525 struct SearchNonTrivialToInitializeField 9526 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 9527 using Super = 9528 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 9529 9530 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 9531 9532 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 9533 SourceLocation SL) { 9534 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 9535 asDerived().visitArray(PDIK, AT, SL); 9536 return; 9537 } 9538 9539 Super::visitWithKind(PDIK, FT, SL); 9540 } 9541 9542 void visitARCStrong(QualType FT, SourceLocation SL) { 9543 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 9544 } 9545 void visitARCWeak(QualType FT, SourceLocation SL) { 9546 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 9547 } 9548 void visitStruct(QualType FT, SourceLocation SL) { 9549 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 9550 visit(FD->getType(), FD->getLocation()); 9551 } 9552 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 9553 const ArrayType *AT, SourceLocation SL) { 9554 visit(getContext().getBaseElementType(AT), SL); 9555 } 9556 void visitTrivial(QualType FT, SourceLocation SL) {} 9557 9558 static void diag(QualType RT, const Expr *E, Sema &S) { 9559 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 9560 } 9561 9562 ASTContext &getContext() { return S.getASTContext(); } 9563 9564 const Expr *E; 9565 Sema &S; 9566 }; 9567 9568 struct SearchNonTrivialToCopyField 9569 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 9570 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 9571 9572 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 9573 9574 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 9575 SourceLocation SL) { 9576 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 9577 asDerived().visitArray(PCK, AT, SL); 9578 return; 9579 } 9580 9581 Super::visitWithKind(PCK, FT, SL); 9582 } 9583 9584 void visitARCStrong(QualType FT, SourceLocation SL) { 9585 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 9586 } 9587 void visitARCWeak(QualType FT, SourceLocation SL) { 9588 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 9589 } 9590 void visitStruct(QualType FT, SourceLocation SL) { 9591 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 9592 visit(FD->getType(), FD->getLocation()); 9593 } 9594 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 9595 SourceLocation SL) { 9596 visit(getContext().getBaseElementType(AT), SL); 9597 } 9598 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 9599 SourceLocation SL) {} 9600 void visitTrivial(QualType FT, SourceLocation SL) {} 9601 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 9602 9603 static void diag(QualType RT, const Expr *E, Sema &S) { 9604 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 9605 } 9606 9607 ASTContext &getContext() { return S.getASTContext(); } 9608 9609 const Expr *E; 9610 Sema &S; 9611 }; 9612 9613 } 9614 9615 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 9616 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 9617 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 9618 9619 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 9620 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 9621 return false; 9622 9623 return doesExprLikelyComputeSize(BO->getLHS()) || 9624 doesExprLikelyComputeSize(BO->getRHS()); 9625 } 9626 9627 return getAsSizeOfExpr(SizeofExpr) != nullptr; 9628 } 9629 9630 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 9631 /// 9632 /// \code 9633 /// #define MACRO 0 9634 /// foo(MACRO); 9635 /// foo(0); 9636 /// \endcode 9637 /// 9638 /// This should return true for the first call to foo, but not for the second 9639 /// (regardless of whether foo is a macro or function). 9640 static bool isArgumentExpandedFromMacro(SourceManager &SM, 9641 SourceLocation CallLoc, 9642 SourceLocation ArgLoc) { 9643 if (!CallLoc.isMacroID()) 9644 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 9645 9646 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 9647 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 9648 } 9649 9650 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 9651 /// last two arguments transposed. 9652 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 9653 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 9654 return; 9655 9656 const Expr *SizeArg = 9657 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 9658 9659 auto isLiteralZero = [](const Expr *E) { 9660 return isa<IntegerLiteral>(E) && cast<IntegerLiteral>(E)->getValue() == 0; 9661 }; 9662 9663 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 9664 SourceLocation CallLoc = Call->getRParenLoc(); 9665 SourceManager &SM = S.getSourceManager(); 9666 if (isLiteralZero(SizeArg) && 9667 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 9668 9669 SourceLocation DiagLoc = SizeArg->getExprLoc(); 9670 9671 // Some platforms #define bzero to __builtin_memset. See if this is the 9672 // case, and if so, emit a better diagnostic. 9673 if (BId == Builtin::BIbzero || 9674 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 9675 CallLoc, SM, S.getLangOpts()) == "bzero")) { 9676 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 9677 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 9678 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 9679 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 9680 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 9681 } 9682 return; 9683 } 9684 9685 // If the second argument to a memset is a sizeof expression and the third 9686 // isn't, this is also likely an error. This should catch 9687 // 'memset(buf, sizeof(buf), 0xff)'. 9688 if (BId == Builtin::BImemset && 9689 doesExprLikelyComputeSize(Call->getArg(1)) && 9690 !doesExprLikelyComputeSize(Call->getArg(2))) { 9691 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 9692 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 9693 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 9694 return; 9695 } 9696 } 9697 9698 /// Check for dangerous or invalid arguments to memset(). 9699 /// 9700 /// This issues warnings on known problematic, dangerous or unspecified 9701 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 9702 /// function calls. 9703 /// 9704 /// \param Call The call expression to diagnose. 9705 void Sema::CheckMemaccessArguments(const CallExpr *Call, 9706 unsigned BId, 9707 IdentifierInfo *FnName) { 9708 assert(BId != 0); 9709 9710 // It is possible to have a non-standard definition of memset. Validate 9711 // we have enough arguments, and if not, abort further checking. 9712 unsigned ExpectedNumArgs = 9713 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 9714 if (Call->getNumArgs() < ExpectedNumArgs) 9715 return; 9716 9717 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 9718 BId == Builtin::BIstrndup ? 1 : 2); 9719 unsigned LenArg = 9720 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 9721 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 9722 9723 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 9724 Call->getBeginLoc(), Call->getRParenLoc())) 9725 return; 9726 9727 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 9728 CheckMemaccessSize(*this, BId, Call); 9729 9730 // We have special checking when the length is a sizeof expression. 9731 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 9732 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 9733 llvm::FoldingSetNodeID SizeOfArgID; 9734 9735 // Although widely used, 'bzero' is not a standard function. Be more strict 9736 // with the argument types before allowing diagnostics and only allow the 9737 // form bzero(ptr, sizeof(...)). 9738 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 9739 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 9740 return; 9741 9742 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 9743 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 9744 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 9745 9746 QualType DestTy = Dest->getType(); 9747 QualType PointeeTy; 9748 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 9749 PointeeTy = DestPtrTy->getPointeeType(); 9750 9751 // Never warn about void type pointers. This can be used to suppress 9752 // false positives. 9753 if (PointeeTy->isVoidType()) 9754 continue; 9755 9756 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 9757 // actually comparing the expressions for equality. Because computing the 9758 // expression IDs can be expensive, we only do this if the diagnostic is 9759 // enabled. 9760 if (SizeOfArg && 9761 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 9762 SizeOfArg->getExprLoc())) { 9763 // We only compute IDs for expressions if the warning is enabled, and 9764 // cache the sizeof arg's ID. 9765 if (SizeOfArgID == llvm::FoldingSetNodeID()) 9766 SizeOfArg->Profile(SizeOfArgID, Context, true); 9767 llvm::FoldingSetNodeID DestID; 9768 Dest->Profile(DestID, Context, true); 9769 if (DestID == SizeOfArgID) { 9770 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 9771 // over sizeof(src) as well. 9772 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 9773 StringRef ReadableName = FnName->getName(); 9774 9775 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 9776 if (UnaryOp->getOpcode() == UO_AddrOf) 9777 ActionIdx = 1; // If its an address-of operator, just remove it. 9778 if (!PointeeTy->isIncompleteType() && 9779 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 9780 ActionIdx = 2; // If the pointee's size is sizeof(char), 9781 // suggest an explicit length. 9782 9783 // If the function is defined as a builtin macro, do not show macro 9784 // expansion. 9785 SourceLocation SL = SizeOfArg->getExprLoc(); 9786 SourceRange DSR = Dest->getSourceRange(); 9787 SourceRange SSR = SizeOfArg->getSourceRange(); 9788 SourceManager &SM = getSourceManager(); 9789 9790 if (SM.isMacroArgExpansion(SL)) { 9791 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 9792 SL = SM.getSpellingLoc(SL); 9793 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 9794 SM.getSpellingLoc(DSR.getEnd())); 9795 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 9796 SM.getSpellingLoc(SSR.getEnd())); 9797 } 9798 9799 DiagRuntimeBehavior(SL, SizeOfArg, 9800 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 9801 << ReadableName 9802 << PointeeTy 9803 << DestTy 9804 << DSR 9805 << SSR); 9806 DiagRuntimeBehavior(SL, SizeOfArg, 9807 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 9808 << ActionIdx 9809 << SSR); 9810 9811 break; 9812 } 9813 } 9814 9815 // Also check for cases where the sizeof argument is the exact same 9816 // type as the memory argument, and where it points to a user-defined 9817 // record type. 9818 if (SizeOfArgTy != QualType()) { 9819 if (PointeeTy->isRecordType() && 9820 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 9821 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 9822 PDiag(diag::warn_sizeof_pointer_type_memaccess) 9823 << FnName << SizeOfArgTy << ArgIdx 9824 << PointeeTy << Dest->getSourceRange() 9825 << LenExpr->getSourceRange()); 9826 break; 9827 } 9828 } 9829 } else if (DestTy->isArrayType()) { 9830 PointeeTy = DestTy; 9831 } 9832 9833 if (PointeeTy == QualType()) 9834 continue; 9835 9836 // Always complain about dynamic classes. 9837 bool IsContained; 9838 if (const CXXRecordDecl *ContainedRD = 9839 getContainedDynamicClass(PointeeTy, IsContained)) { 9840 9841 unsigned OperationType = 0; 9842 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp; 9843 // "overwritten" if we're warning about the destination for any call 9844 // but memcmp; otherwise a verb appropriate to the call. 9845 if (ArgIdx != 0 || IsCmp) { 9846 if (BId == Builtin::BImemcpy) 9847 OperationType = 1; 9848 else if(BId == Builtin::BImemmove) 9849 OperationType = 2; 9850 else if (IsCmp) 9851 OperationType = 3; 9852 } 9853 9854 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 9855 PDiag(diag::warn_dyn_class_memaccess) 9856 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName 9857 << IsContained << ContainedRD << OperationType 9858 << Call->getCallee()->getSourceRange()); 9859 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 9860 BId != Builtin::BImemset) 9861 DiagRuntimeBehavior( 9862 Dest->getExprLoc(), Dest, 9863 PDiag(diag::warn_arc_object_memaccess) 9864 << ArgIdx << FnName << PointeeTy 9865 << Call->getCallee()->getSourceRange()); 9866 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 9867 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 9868 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 9869 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 9870 PDiag(diag::warn_cstruct_memaccess) 9871 << ArgIdx << FnName << PointeeTy << 0); 9872 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 9873 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 9874 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 9875 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 9876 PDiag(diag::warn_cstruct_memaccess) 9877 << ArgIdx << FnName << PointeeTy << 1); 9878 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 9879 } else { 9880 continue; 9881 } 9882 } else 9883 continue; 9884 9885 DiagRuntimeBehavior( 9886 Dest->getExprLoc(), Dest, 9887 PDiag(diag::note_bad_memaccess_silence) 9888 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 9889 break; 9890 } 9891 } 9892 9893 // A little helper routine: ignore addition and subtraction of integer literals. 9894 // This intentionally does not ignore all integer constant expressions because 9895 // we don't want to remove sizeof(). 9896 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 9897 Ex = Ex->IgnoreParenCasts(); 9898 9899 while (true) { 9900 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 9901 if (!BO || !BO->isAdditiveOp()) 9902 break; 9903 9904 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 9905 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 9906 9907 if (isa<IntegerLiteral>(RHS)) 9908 Ex = LHS; 9909 else if (isa<IntegerLiteral>(LHS)) 9910 Ex = RHS; 9911 else 9912 break; 9913 } 9914 9915 return Ex; 9916 } 9917 9918 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 9919 ASTContext &Context) { 9920 // Only handle constant-sized or VLAs, but not flexible members. 9921 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 9922 // Only issue the FIXIT for arrays of size > 1. 9923 if (CAT->getSize().getSExtValue() <= 1) 9924 return false; 9925 } else if (!Ty->isVariableArrayType()) { 9926 return false; 9927 } 9928 return true; 9929 } 9930 9931 // Warn if the user has made the 'size' argument to strlcpy or strlcat 9932 // be the size of the source, instead of the destination. 9933 void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 9934 IdentifierInfo *FnName) { 9935 9936 // Don't crash if the user has the wrong number of arguments 9937 unsigned NumArgs = Call->getNumArgs(); 9938 if ((NumArgs != 3) && (NumArgs != 4)) 9939 return; 9940 9941 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 9942 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 9943 const Expr *CompareWithSrc = nullptr; 9944 9945 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 9946 Call->getBeginLoc(), Call->getRParenLoc())) 9947 return; 9948 9949 // Look for 'strlcpy(dst, x, sizeof(x))' 9950 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 9951 CompareWithSrc = Ex; 9952 else { 9953 // Look for 'strlcpy(dst, x, strlen(x))' 9954 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 9955 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 9956 SizeCall->getNumArgs() == 1) 9957 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 9958 } 9959 } 9960 9961 if (!CompareWithSrc) 9962 return; 9963 9964 // Determine if the argument to sizeof/strlen is equal to the source 9965 // argument. In principle there's all kinds of things you could do 9966 // here, for instance creating an == expression and evaluating it with 9967 // EvaluateAsBooleanCondition, but this uses a more direct technique: 9968 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 9969 if (!SrcArgDRE) 9970 return; 9971 9972 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 9973 if (!CompareWithSrcDRE || 9974 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 9975 return; 9976 9977 const Expr *OriginalSizeArg = Call->getArg(2); 9978 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 9979 << OriginalSizeArg->getSourceRange() << FnName; 9980 9981 // Output a FIXIT hint if the destination is an array (rather than a 9982 // pointer to an array). This could be enhanced to handle some 9983 // pointers if we know the actual size, like if DstArg is 'array+2' 9984 // we could say 'sizeof(array)-2'. 9985 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 9986 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 9987 return; 9988 9989 SmallString<128> sizeString; 9990 llvm::raw_svector_ostream OS(sizeString); 9991 OS << "sizeof("; 9992 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 9993 OS << ")"; 9994 9995 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 9996 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 9997 OS.str()); 9998 } 9999 10000 /// Check if two expressions refer to the same declaration. 10001 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 10002 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 10003 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 10004 return D1->getDecl() == D2->getDecl(); 10005 return false; 10006 } 10007 10008 static const Expr *getStrlenExprArg(const Expr *E) { 10009 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 10010 const FunctionDecl *FD = CE->getDirectCallee(); 10011 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 10012 return nullptr; 10013 return CE->getArg(0)->IgnoreParenCasts(); 10014 } 10015 return nullptr; 10016 } 10017 10018 // Warn on anti-patterns as the 'size' argument to strncat. 10019 // The correct size argument should look like following: 10020 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 10021 void Sema::CheckStrncatArguments(const CallExpr *CE, 10022 IdentifierInfo *FnName) { 10023 // Don't crash if the user has the wrong number of arguments. 10024 if (CE->getNumArgs() < 3) 10025 return; 10026 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 10027 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 10028 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 10029 10030 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 10031 CE->getRParenLoc())) 10032 return; 10033 10034 // Identify common expressions, which are wrongly used as the size argument 10035 // to strncat and may lead to buffer overflows. 10036 unsigned PatternType = 0; 10037 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 10038 // - sizeof(dst) 10039 if (referToTheSameDecl(SizeOfArg, DstArg)) 10040 PatternType = 1; 10041 // - sizeof(src) 10042 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 10043 PatternType = 2; 10044 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 10045 if (BE->getOpcode() == BO_Sub) { 10046 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 10047 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 10048 // - sizeof(dst) - strlen(dst) 10049 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 10050 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 10051 PatternType = 1; 10052 // - sizeof(src) - (anything) 10053 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 10054 PatternType = 2; 10055 } 10056 } 10057 10058 if (PatternType == 0) 10059 return; 10060 10061 // Generate the diagnostic. 10062 SourceLocation SL = LenArg->getBeginLoc(); 10063 SourceRange SR = LenArg->getSourceRange(); 10064 SourceManager &SM = getSourceManager(); 10065 10066 // If the function is defined as a builtin macro, do not show macro expansion. 10067 if (SM.isMacroArgExpansion(SL)) { 10068 SL = SM.getSpellingLoc(SL); 10069 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 10070 SM.getSpellingLoc(SR.getEnd())); 10071 } 10072 10073 // Check if the destination is an array (rather than a pointer to an array). 10074 QualType DstTy = DstArg->getType(); 10075 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 10076 Context); 10077 if (!isKnownSizeArray) { 10078 if (PatternType == 1) 10079 Diag(SL, diag::warn_strncat_wrong_size) << SR; 10080 else 10081 Diag(SL, diag::warn_strncat_src_size) << SR; 10082 return; 10083 } 10084 10085 if (PatternType == 1) 10086 Diag(SL, diag::warn_strncat_large_size) << SR; 10087 else 10088 Diag(SL, diag::warn_strncat_src_size) << SR; 10089 10090 SmallString<128> sizeString; 10091 llvm::raw_svector_ostream OS(sizeString); 10092 OS << "sizeof("; 10093 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 10094 OS << ") - "; 10095 OS << "strlen("; 10096 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 10097 OS << ") - 1"; 10098 10099 Diag(SL, diag::note_strncat_wrong_size) 10100 << FixItHint::CreateReplacement(SR, OS.str()); 10101 } 10102 10103 void 10104 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 10105 SourceLocation ReturnLoc, 10106 bool isObjCMethod, 10107 const AttrVec *Attrs, 10108 const FunctionDecl *FD) { 10109 // Check if the return value is null but should not be. 10110 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 10111 (!isObjCMethod && isNonNullType(Context, lhsType))) && 10112 CheckNonNullExpr(*this, RetValExp)) 10113 Diag(ReturnLoc, diag::warn_null_ret) 10114 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 10115 10116 // C++11 [basic.stc.dynamic.allocation]p4: 10117 // If an allocation function declared with a non-throwing 10118 // exception-specification fails to allocate storage, it shall return 10119 // a null pointer. Any other allocation function that fails to allocate 10120 // storage shall indicate failure only by throwing an exception [...] 10121 if (FD) { 10122 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 10123 if (Op == OO_New || Op == OO_Array_New) { 10124 const FunctionProtoType *Proto 10125 = FD->getType()->castAs<FunctionProtoType>(); 10126 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 10127 CheckNonNullExpr(*this, RetValExp)) 10128 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 10129 << FD << getLangOpts().CPlusPlus11; 10130 } 10131 } 10132 } 10133 10134 //===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===// 10135 10136 /// Check for comparisons of floating point operands using != and ==. 10137 /// Issue a warning if these are no self-comparisons, as they are not likely 10138 /// to do what the programmer intended. 10139 void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) { 10140 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 10141 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 10142 10143 // Special case: check for x == x (which is OK). 10144 // Do not emit warnings for such cases. 10145 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 10146 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 10147 if (DRL->getDecl() == DRR->getDecl()) 10148 return; 10149 10150 // Special case: check for comparisons against literals that can be exactly 10151 // represented by APFloat. In such cases, do not emit a warning. This 10152 // is a heuristic: often comparison against such literals are used to 10153 // detect if a value in a variable has not changed. This clearly can 10154 // lead to false negatives. 10155 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 10156 if (FLL->isExact()) 10157 return; 10158 } else 10159 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 10160 if (FLR->isExact()) 10161 return; 10162 10163 // Check for comparisons with builtin types. 10164 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 10165 if (CL->getBuiltinCallee()) 10166 return; 10167 10168 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 10169 if (CR->getBuiltinCallee()) 10170 return; 10171 10172 // Emit the diagnostic. 10173 Diag(Loc, diag::warn_floatingpoint_eq) 10174 << LHS->getSourceRange() << RHS->getSourceRange(); 10175 } 10176 10177 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 10178 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 10179 10180 namespace { 10181 10182 /// Structure recording the 'active' range of an integer-valued 10183 /// expression. 10184 struct IntRange { 10185 /// The number of bits active in the int. Note that this includes exactly one 10186 /// sign bit if !NonNegative. 10187 unsigned Width; 10188 10189 /// True if the int is known not to have negative values. If so, all leading 10190 /// bits before Width are known zero, otherwise they are known to be the 10191 /// same as the MSB within Width. 10192 bool NonNegative; 10193 10194 IntRange(unsigned Width, bool NonNegative) 10195 : Width(Width), NonNegative(NonNegative) {} 10196 10197 /// Number of bits excluding the sign bit. 10198 unsigned valueBits() const { 10199 return NonNegative ? Width : Width - 1; 10200 } 10201 10202 /// Returns the range of the bool type. 10203 static IntRange forBoolType() { 10204 return IntRange(1, true); 10205 } 10206 10207 /// Returns the range of an opaque value of the given integral type. 10208 static IntRange forValueOfType(ASTContext &C, QualType T) { 10209 return forValueOfCanonicalType(C, 10210 T->getCanonicalTypeInternal().getTypePtr()); 10211 } 10212 10213 /// Returns the range of an opaque value of a canonical integral type. 10214 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 10215 assert(T->isCanonicalUnqualified()); 10216 10217 if (const VectorType *VT = dyn_cast<VectorType>(T)) 10218 T = VT->getElementType().getTypePtr(); 10219 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 10220 T = CT->getElementType().getTypePtr(); 10221 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 10222 T = AT->getValueType().getTypePtr(); 10223 10224 if (!C.getLangOpts().CPlusPlus) { 10225 // For enum types in C code, use the underlying datatype. 10226 if (const EnumType *ET = dyn_cast<EnumType>(T)) 10227 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 10228 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 10229 // For enum types in C++, use the known bit width of the enumerators. 10230 EnumDecl *Enum = ET->getDecl(); 10231 // In C++11, enums can have a fixed underlying type. Use this type to 10232 // compute the range. 10233 if (Enum->isFixed()) { 10234 return IntRange(C.getIntWidth(QualType(T, 0)), 10235 !ET->isSignedIntegerOrEnumerationType()); 10236 } 10237 10238 unsigned NumPositive = Enum->getNumPositiveBits(); 10239 unsigned NumNegative = Enum->getNumNegativeBits(); 10240 10241 if (NumNegative == 0) 10242 return IntRange(NumPositive, true/*NonNegative*/); 10243 else 10244 return IntRange(std::max(NumPositive + 1, NumNegative), 10245 false/*NonNegative*/); 10246 } 10247 10248 if (const auto *EIT = dyn_cast<ExtIntType>(T)) 10249 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 10250 10251 const BuiltinType *BT = cast<BuiltinType>(T); 10252 assert(BT->isInteger()); 10253 10254 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 10255 } 10256 10257 /// Returns the "target" range of a canonical integral type, i.e. 10258 /// the range of values expressible in the type. 10259 /// 10260 /// This matches forValueOfCanonicalType except that enums have the 10261 /// full range of their type, not the range of their enumerators. 10262 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 10263 assert(T->isCanonicalUnqualified()); 10264 10265 if (const VectorType *VT = dyn_cast<VectorType>(T)) 10266 T = VT->getElementType().getTypePtr(); 10267 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 10268 T = CT->getElementType().getTypePtr(); 10269 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 10270 T = AT->getValueType().getTypePtr(); 10271 if (const EnumType *ET = dyn_cast<EnumType>(T)) 10272 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 10273 10274 if (const auto *EIT = dyn_cast<ExtIntType>(T)) 10275 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 10276 10277 const BuiltinType *BT = cast<BuiltinType>(T); 10278 assert(BT->isInteger()); 10279 10280 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 10281 } 10282 10283 /// Returns the supremum of two ranges: i.e. their conservative merge. 10284 static IntRange join(IntRange L, IntRange R) { 10285 bool Unsigned = L.NonNegative && R.NonNegative; 10286 return IntRange(std::max(L.valueBits(), R.valueBits()) + !Unsigned, 10287 L.NonNegative && R.NonNegative); 10288 } 10289 10290 /// Return the range of a bitwise-AND of the two ranges. 10291 static IntRange bit_and(IntRange L, IntRange R) { 10292 unsigned Bits = std::max(L.Width, R.Width); 10293 bool NonNegative = false; 10294 if (L.NonNegative) { 10295 Bits = std::min(Bits, L.Width); 10296 NonNegative = true; 10297 } 10298 if (R.NonNegative) { 10299 Bits = std::min(Bits, R.Width); 10300 NonNegative = true; 10301 } 10302 return IntRange(Bits, NonNegative); 10303 } 10304 10305 /// Return the range of a sum of the two ranges. 10306 static IntRange sum(IntRange L, IntRange R) { 10307 bool Unsigned = L.NonNegative && R.NonNegative; 10308 return IntRange(std::max(L.valueBits(), R.valueBits()) + 1 + !Unsigned, 10309 Unsigned); 10310 } 10311 10312 /// Return the range of a difference of the two ranges. 10313 static IntRange difference(IntRange L, IntRange R) { 10314 // We need a 1-bit-wider range if: 10315 // 1) LHS can be negative: least value can be reduced. 10316 // 2) RHS can be negative: greatest value can be increased. 10317 bool CanWiden = !L.NonNegative || !R.NonNegative; 10318 bool Unsigned = L.NonNegative && R.Width == 0; 10319 return IntRange(std::max(L.valueBits(), R.valueBits()) + CanWiden + 10320 !Unsigned, 10321 Unsigned); 10322 } 10323 10324 /// Return the range of a product of the two ranges. 10325 static IntRange product(IntRange L, IntRange R) { 10326 // If both LHS and RHS can be negative, we can form 10327 // -2^L * -2^R = 2^(L + R) 10328 // which requires L + R + 1 value bits to represent. 10329 bool CanWiden = !L.NonNegative && !R.NonNegative; 10330 bool Unsigned = L.NonNegative && R.NonNegative; 10331 return IntRange(L.valueBits() + R.valueBits() + CanWiden + !Unsigned, 10332 Unsigned); 10333 } 10334 10335 /// Return the range of a remainder operation between the two ranges. 10336 static IntRange rem(IntRange L, IntRange R) { 10337 // The result of a remainder can't be larger than the result of 10338 // either side. The sign of the result is the sign of the LHS. 10339 bool Unsigned = L.NonNegative; 10340 return IntRange(std::min(L.valueBits(), R.valueBits()) + !Unsigned, 10341 Unsigned); 10342 } 10343 }; 10344 10345 } // namespace 10346 10347 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 10348 unsigned MaxWidth) { 10349 if (value.isSigned() && value.isNegative()) 10350 return IntRange(value.getMinSignedBits(), false); 10351 10352 if (value.getBitWidth() > MaxWidth) 10353 value = value.trunc(MaxWidth); 10354 10355 // isNonNegative() just checks the sign bit without considering 10356 // signedness. 10357 return IntRange(value.getActiveBits(), true); 10358 } 10359 10360 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 10361 unsigned MaxWidth) { 10362 if (result.isInt()) 10363 return GetValueRange(C, result.getInt(), MaxWidth); 10364 10365 if (result.isVector()) { 10366 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 10367 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 10368 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 10369 R = IntRange::join(R, El); 10370 } 10371 return R; 10372 } 10373 10374 if (result.isComplexInt()) { 10375 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 10376 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 10377 return IntRange::join(R, I); 10378 } 10379 10380 // This can happen with lossless casts to intptr_t of "based" lvalues. 10381 // Assume it might use arbitrary bits. 10382 // FIXME: The only reason we need to pass the type in here is to get 10383 // the sign right on this one case. It would be nice if APValue 10384 // preserved this. 10385 assert(result.isLValue() || result.isAddrLabelDiff()); 10386 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 10387 } 10388 10389 static QualType GetExprType(const Expr *E) { 10390 QualType Ty = E->getType(); 10391 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 10392 Ty = AtomicRHS->getValueType(); 10393 return Ty; 10394 } 10395 10396 /// Pseudo-evaluate the given integer expression, estimating the 10397 /// range of values it might take. 10398 /// 10399 /// \param MaxWidth The width to which the value will be truncated. 10400 /// \param Approximate If \c true, return a likely range for the result: in 10401 /// particular, assume that aritmetic on narrower types doesn't leave 10402 /// those types. If \c false, return a range including all possible 10403 /// result values. 10404 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth, 10405 bool InConstantContext, bool Approximate) { 10406 E = E->IgnoreParens(); 10407 10408 // Try a full evaluation first. 10409 Expr::EvalResult result; 10410 if (E->EvaluateAsRValue(result, C, InConstantContext)) 10411 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 10412 10413 // I think we only want to look through implicit casts here; if the 10414 // user has an explicit widening cast, we should treat the value as 10415 // being of the new, wider type. 10416 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 10417 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 10418 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext, 10419 Approximate); 10420 10421 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 10422 10423 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 10424 CE->getCastKind() == CK_BooleanToSignedIntegral; 10425 10426 // Assume that non-integer casts can span the full range of the type. 10427 if (!isIntegerCast) 10428 return OutputTypeRange; 10429 10430 IntRange SubRange = GetExprRange(C, CE->getSubExpr(), 10431 std::min(MaxWidth, OutputTypeRange.Width), 10432 InConstantContext, Approximate); 10433 10434 // Bail out if the subexpr's range is as wide as the cast type. 10435 if (SubRange.Width >= OutputTypeRange.Width) 10436 return OutputTypeRange; 10437 10438 // Otherwise, we take the smaller width, and we're non-negative if 10439 // either the output type or the subexpr is. 10440 return IntRange(SubRange.Width, 10441 SubRange.NonNegative || OutputTypeRange.NonNegative); 10442 } 10443 10444 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 10445 // If we can fold the condition, just take that operand. 10446 bool CondResult; 10447 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 10448 return GetExprRange(C, 10449 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(), 10450 MaxWidth, InConstantContext, Approximate); 10451 10452 // Otherwise, conservatively merge. 10453 // GetExprRange requires an integer expression, but a throw expression 10454 // results in a void type. 10455 Expr *E = CO->getTrueExpr(); 10456 IntRange L = E->getType()->isVoidType() 10457 ? IntRange{0, true} 10458 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 10459 E = CO->getFalseExpr(); 10460 IntRange R = E->getType()->isVoidType() 10461 ? IntRange{0, true} 10462 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 10463 return IntRange::join(L, R); 10464 } 10465 10466 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 10467 IntRange (*Combine)(IntRange, IntRange) = IntRange::join; 10468 10469 switch (BO->getOpcode()) { 10470 case BO_Cmp: 10471 llvm_unreachable("builtin <=> should have class type"); 10472 10473 // Boolean-valued operations are single-bit and positive. 10474 case BO_LAnd: 10475 case BO_LOr: 10476 case BO_LT: 10477 case BO_GT: 10478 case BO_LE: 10479 case BO_GE: 10480 case BO_EQ: 10481 case BO_NE: 10482 return IntRange::forBoolType(); 10483 10484 // The type of the assignments is the type of the LHS, so the RHS 10485 // is not necessarily the same type. 10486 case BO_MulAssign: 10487 case BO_DivAssign: 10488 case BO_RemAssign: 10489 case BO_AddAssign: 10490 case BO_SubAssign: 10491 case BO_XorAssign: 10492 case BO_OrAssign: 10493 // TODO: bitfields? 10494 return IntRange::forValueOfType(C, GetExprType(E)); 10495 10496 // Simple assignments just pass through the RHS, which will have 10497 // been coerced to the LHS type. 10498 case BO_Assign: 10499 // TODO: bitfields? 10500 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 10501 Approximate); 10502 10503 // Operations with opaque sources are black-listed. 10504 case BO_PtrMemD: 10505 case BO_PtrMemI: 10506 return IntRange::forValueOfType(C, GetExprType(E)); 10507 10508 // Bitwise-and uses the *infinum* of the two source ranges. 10509 case BO_And: 10510 case BO_AndAssign: 10511 Combine = IntRange::bit_and; 10512 break; 10513 10514 // Left shift gets black-listed based on a judgement call. 10515 case BO_Shl: 10516 // ...except that we want to treat '1 << (blah)' as logically 10517 // positive. It's an important idiom. 10518 if (IntegerLiteral *I 10519 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 10520 if (I->getValue() == 1) { 10521 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 10522 return IntRange(R.Width, /*NonNegative*/ true); 10523 } 10524 } 10525 LLVM_FALLTHROUGH; 10526 10527 case BO_ShlAssign: 10528 return IntRange::forValueOfType(C, GetExprType(E)); 10529 10530 // Right shift by a constant can narrow its left argument. 10531 case BO_Shr: 10532 case BO_ShrAssign: { 10533 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext, 10534 Approximate); 10535 10536 // If the shift amount is a positive constant, drop the width by 10537 // that much. 10538 if (Optional<llvm::APSInt> shift = 10539 BO->getRHS()->getIntegerConstantExpr(C)) { 10540 if (shift->isNonNegative()) { 10541 unsigned zext = shift->getZExtValue(); 10542 if (zext >= L.Width) 10543 L.Width = (L.NonNegative ? 0 : 1); 10544 else 10545 L.Width -= zext; 10546 } 10547 } 10548 10549 return L; 10550 } 10551 10552 // Comma acts as its right operand. 10553 case BO_Comma: 10554 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 10555 Approximate); 10556 10557 case BO_Add: 10558 if (!Approximate) 10559 Combine = IntRange::sum; 10560 break; 10561 10562 case BO_Sub: 10563 if (BO->getLHS()->getType()->isPointerType()) 10564 return IntRange::forValueOfType(C, GetExprType(E)); 10565 if (!Approximate) 10566 Combine = IntRange::difference; 10567 break; 10568 10569 case BO_Mul: 10570 if (!Approximate) 10571 Combine = IntRange::product; 10572 break; 10573 10574 // The width of a division result is mostly determined by the size 10575 // of the LHS. 10576 case BO_Div: { 10577 // Don't 'pre-truncate' the operands. 10578 unsigned opWidth = C.getIntWidth(GetExprType(E)); 10579 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, 10580 Approximate); 10581 10582 // If the divisor is constant, use that. 10583 if (Optional<llvm::APSInt> divisor = 10584 BO->getRHS()->getIntegerConstantExpr(C)) { 10585 unsigned log2 = divisor->logBase2(); // floor(log_2(divisor)) 10586 if (log2 >= L.Width) 10587 L.Width = (L.NonNegative ? 0 : 1); 10588 else 10589 L.Width = std::min(L.Width - log2, MaxWidth); 10590 return L; 10591 } 10592 10593 // Otherwise, just use the LHS's width. 10594 // FIXME: This is wrong if the LHS could be its minimal value and the RHS 10595 // could be -1. 10596 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, 10597 Approximate); 10598 return IntRange(L.Width, L.NonNegative && R.NonNegative); 10599 } 10600 10601 case BO_Rem: 10602 Combine = IntRange::rem; 10603 break; 10604 10605 // The default behavior is okay for these. 10606 case BO_Xor: 10607 case BO_Or: 10608 break; 10609 } 10610 10611 // Combine the two ranges, but limit the result to the type in which we 10612 // performed the computation. 10613 QualType T = GetExprType(E); 10614 unsigned opWidth = C.getIntWidth(T); 10615 IntRange L = 10616 GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, Approximate); 10617 IntRange R = 10618 GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, Approximate); 10619 IntRange C = Combine(L, R); 10620 C.NonNegative |= T->isUnsignedIntegerOrEnumerationType(); 10621 C.Width = std::min(C.Width, MaxWidth); 10622 return C; 10623 } 10624 10625 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 10626 switch (UO->getOpcode()) { 10627 // Boolean-valued operations are white-listed. 10628 case UO_LNot: 10629 return IntRange::forBoolType(); 10630 10631 // Operations with opaque sources are black-listed. 10632 case UO_Deref: 10633 case UO_AddrOf: // should be impossible 10634 return IntRange::forValueOfType(C, GetExprType(E)); 10635 10636 default: 10637 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext, 10638 Approximate); 10639 } 10640 } 10641 10642 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 10643 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext, 10644 Approximate); 10645 10646 if (const auto *BitField = E->getSourceBitField()) 10647 return IntRange(BitField->getBitWidthValue(C), 10648 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 10649 10650 return IntRange::forValueOfType(C, GetExprType(E)); 10651 } 10652 10653 static IntRange GetExprRange(ASTContext &C, const Expr *E, 10654 bool InConstantContext, bool Approximate) { 10655 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext, 10656 Approximate); 10657 } 10658 10659 /// Checks whether the given value, which currently has the given 10660 /// source semantics, has the same value when coerced through the 10661 /// target semantics. 10662 static bool IsSameFloatAfterCast(const llvm::APFloat &value, 10663 const llvm::fltSemantics &Src, 10664 const llvm::fltSemantics &Tgt) { 10665 llvm::APFloat truncated = value; 10666 10667 bool ignored; 10668 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 10669 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 10670 10671 return truncated.bitwiseIsEqual(value); 10672 } 10673 10674 /// Checks whether the given value, which currently has the given 10675 /// source semantics, has the same value when coerced through the 10676 /// target semantics. 10677 /// 10678 /// The value might be a vector of floats (or a complex number). 10679 static bool IsSameFloatAfterCast(const APValue &value, 10680 const llvm::fltSemantics &Src, 10681 const llvm::fltSemantics &Tgt) { 10682 if (value.isFloat()) 10683 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 10684 10685 if (value.isVector()) { 10686 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 10687 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 10688 return false; 10689 return true; 10690 } 10691 10692 assert(value.isComplexFloat()); 10693 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 10694 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 10695 } 10696 10697 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC, 10698 bool IsListInit = false); 10699 10700 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 10701 // Suppress cases where we are comparing against an enum constant. 10702 if (const DeclRefExpr *DR = 10703 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 10704 if (isa<EnumConstantDecl>(DR->getDecl())) 10705 return true; 10706 10707 // Suppress cases where the value is expanded from a macro, unless that macro 10708 // is how a language represents a boolean literal. This is the case in both C 10709 // and Objective-C. 10710 SourceLocation BeginLoc = E->getBeginLoc(); 10711 if (BeginLoc.isMacroID()) { 10712 StringRef MacroName = Lexer::getImmediateMacroName( 10713 BeginLoc, S.getSourceManager(), S.getLangOpts()); 10714 return MacroName != "YES" && MacroName != "NO" && 10715 MacroName != "true" && MacroName != "false"; 10716 } 10717 10718 return false; 10719 } 10720 10721 static bool isKnownToHaveUnsignedValue(Expr *E) { 10722 return E->getType()->isIntegerType() && 10723 (!E->getType()->isSignedIntegerType() || 10724 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 10725 } 10726 10727 namespace { 10728 /// The promoted range of values of a type. In general this has the 10729 /// following structure: 10730 /// 10731 /// |-----------| . . . |-----------| 10732 /// ^ ^ ^ ^ 10733 /// Min HoleMin HoleMax Max 10734 /// 10735 /// ... where there is only a hole if a signed type is promoted to unsigned 10736 /// (in which case Min and Max are the smallest and largest representable 10737 /// values). 10738 struct PromotedRange { 10739 // Min, or HoleMax if there is a hole. 10740 llvm::APSInt PromotedMin; 10741 // Max, or HoleMin if there is a hole. 10742 llvm::APSInt PromotedMax; 10743 10744 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 10745 if (R.Width == 0) 10746 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 10747 else if (R.Width >= BitWidth && !Unsigned) { 10748 // Promotion made the type *narrower*. This happens when promoting 10749 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 10750 // Treat all values of 'signed int' as being in range for now. 10751 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 10752 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 10753 } else { 10754 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 10755 .extOrTrunc(BitWidth); 10756 PromotedMin.setIsUnsigned(Unsigned); 10757 10758 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 10759 .extOrTrunc(BitWidth); 10760 PromotedMax.setIsUnsigned(Unsigned); 10761 } 10762 } 10763 10764 // Determine whether this range is contiguous (has no hole). 10765 bool isContiguous() const { return PromotedMin <= PromotedMax; } 10766 10767 // Where a constant value is within the range. 10768 enum ComparisonResult { 10769 LT = 0x1, 10770 LE = 0x2, 10771 GT = 0x4, 10772 GE = 0x8, 10773 EQ = 0x10, 10774 NE = 0x20, 10775 InRangeFlag = 0x40, 10776 10777 Less = LE | LT | NE, 10778 Min = LE | InRangeFlag, 10779 InRange = InRangeFlag, 10780 Max = GE | InRangeFlag, 10781 Greater = GE | GT | NE, 10782 10783 OnlyValue = LE | GE | EQ | InRangeFlag, 10784 InHole = NE 10785 }; 10786 10787 ComparisonResult compare(const llvm::APSInt &Value) const { 10788 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 10789 Value.isUnsigned() == PromotedMin.isUnsigned()); 10790 if (!isContiguous()) { 10791 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 10792 if (Value.isMinValue()) return Min; 10793 if (Value.isMaxValue()) return Max; 10794 if (Value >= PromotedMin) return InRange; 10795 if (Value <= PromotedMax) return InRange; 10796 return InHole; 10797 } 10798 10799 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 10800 case -1: return Less; 10801 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 10802 case 1: 10803 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 10804 case -1: return InRange; 10805 case 0: return Max; 10806 case 1: return Greater; 10807 } 10808 } 10809 10810 llvm_unreachable("impossible compare result"); 10811 } 10812 10813 static llvm::Optional<StringRef> 10814 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 10815 if (Op == BO_Cmp) { 10816 ComparisonResult LTFlag = LT, GTFlag = GT; 10817 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 10818 10819 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 10820 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 10821 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 10822 return llvm::None; 10823 } 10824 10825 ComparisonResult TrueFlag, FalseFlag; 10826 if (Op == BO_EQ) { 10827 TrueFlag = EQ; 10828 FalseFlag = NE; 10829 } else if (Op == BO_NE) { 10830 TrueFlag = NE; 10831 FalseFlag = EQ; 10832 } else { 10833 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 10834 TrueFlag = LT; 10835 FalseFlag = GE; 10836 } else { 10837 TrueFlag = GT; 10838 FalseFlag = LE; 10839 } 10840 if (Op == BO_GE || Op == BO_LE) 10841 std::swap(TrueFlag, FalseFlag); 10842 } 10843 if (R & TrueFlag) 10844 return StringRef("true"); 10845 if (R & FalseFlag) 10846 return StringRef("false"); 10847 return llvm::None; 10848 } 10849 }; 10850 } 10851 10852 static bool HasEnumType(Expr *E) { 10853 // Strip off implicit integral promotions. 10854 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 10855 if (ICE->getCastKind() != CK_IntegralCast && 10856 ICE->getCastKind() != CK_NoOp) 10857 break; 10858 E = ICE->getSubExpr(); 10859 } 10860 10861 return E->getType()->isEnumeralType(); 10862 } 10863 10864 static int classifyConstantValue(Expr *Constant) { 10865 // The values of this enumeration are used in the diagnostics 10866 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 10867 enum ConstantValueKind { 10868 Miscellaneous = 0, 10869 LiteralTrue, 10870 LiteralFalse 10871 }; 10872 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 10873 return BL->getValue() ? ConstantValueKind::LiteralTrue 10874 : ConstantValueKind::LiteralFalse; 10875 return ConstantValueKind::Miscellaneous; 10876 } 10877 10878 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 10879 Expr *Constant, Expr *Other, 10880 const llvm::APSInt &Value, 10881 bool RhsConstant) { 10882 if (S.inTemplateInstantiation()) 10883 return false; 10884 10885 Expr *OriginalOther = Other; 10886 10887 Constant = Constant->IgnoreParenImpCasts(); 10888 Other = Other->IgnoreParenImpCasts(); 10889 10890 // Suppress warnings on tautological comparisons between values of the same 10891 // enumeration type. There are only two ways we could warn on this: 10892 // - If the constant is outside the range of representable values of 10893 // the enumeration. In such a case, we should warn about the cast 10894 // to enumeration type, not about the comparison. 10895 // - If the constant is the maximum / minimum in-range value. For an 10896 // enumeratin type, such comparisons can be meaningful and useful. 10897 if (Constant->getType()->isEnumeralType() && 10898 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 10899 return false; 10900 10901 IntRange OtherValueRange = GetExprRange( 10902 S.Context, Other, S.isConstantEvaluated(), /*Approximate*/ false); 10903 10904 QualType OtherT = Other->getType(); 10905 if (const auto *AT = OtherT->getAs<AtomicType>()) 10906 OtherT = AT->getValueType(); 10907 IntRange OtherTypeRange = IntRange::forValueOfType(S.Context, OtherT); 10908 10909 // Special case for ObjC BOOL on targets where its a typedef for a signed char 10910 // (Namely, macOS). FIXME: IntRange::forValueOfType should do this. 10911 bool IsObjCSignedCharBool = S.getLangOpts().ObjC && 10912 S.NSAPIObj->isObjCBOOLType(OtherT) && 10913 OtherT->isSpecificBuiltinType(BuiltinType::SChar); 10914 10915 // Whether we're treating Other as being a bool because of the form of 10916 // expression despite it having another type (typically 'int' in C). 10917 bool OtherIsBooleanDespiteType = 10918 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 10919 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool) 10920 OtherTypeRange = OtherValueRange = IntRange::forBoolType(); 10921 10922 // Check if all values in the range of possible values of this expression 10923 // lead to the same comparison outcome. 10924 PromotedRange OtherPromotedValueRange(OtherValueRange, Value.getBitWidth(), 10925 Value.isUnsigned()); 10926 auto Cmp = OtherPromotedValueRange.compare(Value); 10927 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 10928 if (!Result) 10929 return false; 10930 10931 // Also consider the range determined by the type alone. This allows us to 10932 // classify the warning under the proper diagnostic group. 10933 bool TautologicalTypeCompare = false; 10934 { 10935 PromotedRange OtherPromotedTypeRange(OtherTypeRange, Value.getBitWidth(), 10936 Value.isUnsigned()); 10937 auto TypeCmp = OtherPromotedTypeRange.compare(Value); 10938 if (auto TypeResult = PromotedRange::constantValue(E->getOpcode(), TypeCmp, 10939 RhsConstant)) { 10940 TautologicalTypeCompare = true; 10941 Cmp = TypeCmp; 10942 Result = TypeResult; 10943 } 10944 } 10945 10946 // Don't warn if the non-constant operand actually always evaluates to the 10947 // same value. 10948 if (!TautologicalTypeCompare && OtherValueRange.Width == 0) 10949 return false; 10950 10951 // Suppress the diagnostic for an in-range comparison if the constant comes 10952 // from a macro or enumerator. We don't want to diagnose 10953 // 10954 // some_long_value <= INT_MAX 10955 // 10956 // when sizeof(int) == sizeof(long). 10957 bool InRange = Cmp & PromotedRange::InRangeFlag; 10958 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 10959 return false; 10960 10961 // A comparison of an unsigned bit-field against 0 is really a type problem, 10962 // even though at the type level the bit-field might promote to 'signed int'. 10963 if (Other->refersToBitField() && InRange && Value == 0 && 10964 Other->getType()->isUnsignedIntegerOrEnumerationType()) 10965 TautologicalTypeCompare = true; 10966 10967 // If this is a comparison to an enum constant, include that 10968 // constant in the diagnostic. 10969 const EnumConstantDecl *ED = nullptr; 10970 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 10971 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 10972 10973 // Should be enough for uint128 (39 decimal digits) 10974 SmallString<64> PrettySourceValue; 10975 llvm::raw_svector_ostream OS(PrettySourceValue); 10976 if (ED) { 10977 OS << '\'' << *ED << "' (" << Value << ")"; 10978 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>( 10979 Constant->IgnoreParenImpCasts())) { 10980 OS << (BL->getValue() ? "YES" : "NO"); 10981 } else { 10982 OS << Value; 10983 } 10984 10985 if (!TautologicalTypeCompare) { 10986 S.Diag(E->getOperatorLoc(), diag::warn_tautological_compare_value_range) 10987 << RhsConstant << OtherValueRange.Width << OtherValueRange.NonNegative 10988 << E->getOpcodeStr() << OS.str() << *Result 10989 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 10990 return true; 10991 } 10992 10993 if (IsObjCSignedCharBool) { 10994 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 10995 S.PDiag(diag::warn_tautological_compare_objc_bool) 10996 << OS.str() << *Result); 10997 return true; 10998 } 10999 11000 // FIXME: We use a somewhat different formatting for the in-range cases and 11001 // cases involving boolean values for historical reasons. We should pick a 11002 // consistent way of presenting these diagnostics. 11003 if (!InRange || Other->isKnownToHaveBooleanValue()) { 11004 11005 S.DiagRuntimeBehavior( 11006 E->getOperatorLoc(), E, 11007 S.PDiag(!InRange ? diag::warn_out_of_range_compare 11008 : diag::warn_tautological_bool_compare) 11009 << OS.str() << classifyConstantValue(Constant) << OtherT 11010 << OtherIsBooleanDespiteType << *Result 11011 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 11012 } else { 11013 unsigned Diag = (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 11014 ? (HasEnumType(OriginalOther) 11015 ? diag::warn_unsigned_enum_always_true_comparison 11016 : diag::warn_unsigned_always_true_comparison) 11017 : diag::warn_tautological_constant_compare; 11018 11019 S.Diag(E->getOperatorLoc(), Diag) 11020 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 11021 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 11022 } 11023 11024 return true; 11025 } 11026 11027 /// Analyze the operands of the given comparison. Implements the 11028 /// fallback case from AnalyzeComparison. 11029 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 11030 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 11031 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 11032 } 11033 11034 /// Implements -Wsign-compare. 11035 /// 11036 /// \param E the binary operator to check for warnings 11037 static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 11038 // The type the comparison is being performed in. 11039 QualType T = E->getLHS()->getType(); 11040 11041 // Only analyze comparison operators where both sides have been converted to 11042 // the same type. 11043 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 11044 return AnalyzeImpConvsInComparison(S, E); 11045 11046 // Don't analyze value-dependent comparisons directly. 11047 if (E->isValueDependent()) 11048 return AnalyzeImpConvsInComparison(S, E); 11049 11050 Expr *LHS = E->getLHS(); 11051 Expr *RHS = E->getRHS(); 11052 11053 if (T->isIntegralType(S.Context)) { 11054 Optional<llvm::APSInt> RHSValue = RHS->getIntegerConstantExpr(S.Context); 11055 Optional<llvm::APSInt> LHSValue = LHS->getIntegerConstantExpr(S.Context); 11056 11057 // We don't care about expressions whose result is a constant. 11058 if (RHSValue && LHSValue) 11059 return AnalyzeImpConvsInComparison(S, E); 11060 11061 // We only care about expressions where just one side is literal 11062 if ((bool)RHSValue ^ (bool)LHSValue) { 11063 // Is the constant on the RHS or LHS? 11064 const bool RhsConstant = (bool)RHSValue; 11065 Expr *Const = RhsConstant ? RHS : LHS; 11066 Expr *Other = RhsConstant ? LHS : RHS; 11067 const llvm::APSInt &Value = RhsConstant ? *RHSValue : *LHSValue; 11068 11069 // Check whether an integer constant comparison results in a value 11070 // of 'true' or 'false'. 11071 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 11072 return AnalyzeImpConvsInComparison(S, E); 11073 } 11074 } 11075 11076 if (!T->hasUnsignedIntegerRepresentation()) { 11077 // We don't do anything special if this isn't an unsigned integral 11078 // comparison: we're only interested in integral comparisons, and 11079 // signed comparisons only happen in cases we don't care to warn about. 11080 return AnalyzeImpConvsInComparison(S, E); 11081 } 11082 11083 LHS = LHS->IgnoreParenImpCasts(); 11084 RHS = RHS->IgnoreParenImpCasts(); 11085 11086 if (!S.getLangOpts().CPlusPlus) { 11087 // Avoid warning about comparison of integers with different signs when 11088 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 11089 // the type of `E`. 11090 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 11091 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 11092 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 11093 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 11094 } 11095 11096 // Check to see if one of the (unmodified) operands is of different 11097 // signedness. 11098 Expr *signedOperand, *unsignedOperand; 11099 if (LHS->getType()->hasSignedIntegerRepresentation()) { 11100 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 11101 "unsigned comparison between two signed integer expressions?"); 11102 signedOperand = LHS; 11103 unsignedOperand = RHS; 11104 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 11105 signedOperand = RHS; 11106 unsignedOperand = LHS; 11107 } else { 11108 return AnalyzeImpConvsInComparison(S, E); 11109 } 11110 11111 // Otherwise, calculate the effective range of the signed operand. 11112 IntRange signedRange = GetExprRange( 11113 S.Context, signedOperand, S.isConstantEvaluated(), /*Approximate*/ true); 11114 11115 // Go ahead and analyze implicit conversions in the operands. Note 11116 // that we skip the implicit conversions on both sides. 11117 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 11118 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 11119 11120 // If the signed range is non-negative, -Wsign-compare won't fire. 11121 if (signedRange.NonNegative) 11122 return; 11123 11124 // For (in)equality comparisons, if the unsigned operand is a 11125 // constant which cannot collide with a overflowed signed operand, 11126 // then reinterpreting the signed operand as unsigned will not 11127 // change the result of the comparison. 11128 if (E->isEqualityOp()) { 11129 unsigned comparisonWidth = S.Context.getIntWidth(T); 11130 IntRange unsignedRange = 11131 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated(), 11132 /*Approximate*/ true); 11133 11134 // We should never be unable to prove that the unsigned operand is 11135 // non-negative. 11136 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 11137 11138 if (unsignedRange.Width < comparisonWidth) 11139 return; 11140 } 11141 11142 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 11143 S.PDiag(diag::warn_mixed_sign_comparison) 11144 << LHS->getType() << RHS->getType() 11145 << LHS->getSourceRange() << RHS->getSourceRange()); 11146 } 11147 11148 /// Analyzes an attempt to assign the given value to a bitfield. 11149 /// 11150 /// Returns true if there was something fishy about the attempt. 11151 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 11152 SourceLocation InitLoc) { 11153 assert(Bitfield->isBitField()); 11154 if (Bitfield->isInvalidDecl()) 11155 return false; 11156 11157 // White-list bool bitfields. 11158 QualType BitfieldType = Bitfield->getType(); 11159 if (BitfieldType->isBooleanType()) 11160 return false; 11161 11162 if (BitfieldType->isEnumeralType()) { 11163 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl(); 11164 // If the underlying enum type was not explicitly specified as an unsigned 11165 // type and the enum contain only positive values, MSVC++ will cause an 11166 // inconsistency by storing this as a signed type. 11167 if (S.getLangOpts().CPlusPlus11 && 11168 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 11169 BitfieldEnumDecl->getNumPositiveBits() > 0 && 11170 BitfieldEnumDecl->getNumNegativeBits() == 0) { 11171 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 11172 << BitfieldEnumDecl; 11173 } 11174 } 11175 11176 if (Bitfield->getType()->isBooleanType()) 11177 return false; 11178 11179 // Ignore value- or type-dependent expressions. 11180 if (Bitfield->getBitWidth()->isValueDependent() || 11181 Bitfield->getBitWidth()->isTypeDependent() || 11182 Init->isValueDependent() || 11183 Init->isTypeDependent()) 11184 return false; 11185 11186 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 11187 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 11188 11189 Expr::EvalResult Result; 11190 if (!OriginalInit->EvaluateAsInt(Result, S.Context, 11191 Expr::SE_AllowSideEffects)) { 11192 // The RHS is not constant. If the RHS has an enum type, make sure the 11193 // bitfield is wide enough to hold all the values of the enum without 11194 // truncation. 11195 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 11196 EnumDecl *ED = EnumTy->getDecl(); 11197 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 11198 11199 // Enum types are implicitly signed on Windows, so check if there are any 11200 // negative enumerators to see if the enum was intended to be signed or 11201 // not. 11202 bool SignedEnum = ED->getNumNegativeBits() > 0; 11203 11204 // Check for surprising sign changes when assigning enum values to a 11205 // bitfield of different signedness. If the bitfield is signed and we 11206 // have exactly the right number of bits to store this unsigned enum, 11207 // suggest changing the enum to an unsigned type. This typically happens 11208 // on Windows where unfixed enums always use an underlying type of 'int'. 11209 unsigned DiagID = 0; 11210 if (SignedEnum && !SignedBitfield) { 11211 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 11212 } else if (SignedBitfield && !SignedEnum && 11213 ED->getNumPositiveBits() == FieldWidth) { 11214 DiagID = diag::warn_signed_bitfield_enum_conversion; 11215 } 11216 11217 if (DiagID) { 11218 S.Diag(InitLoc, DiagID) << Bitfield << ED; 11219 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 11220 SourceRange TypeRange = 11221 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 11222 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 11223 << SignedEnum << TypeRange; 11224 } 11225 11226 // Compute the required bitwidth. If the enum has negative values, we need 11227 // one more bit than the normal number of positive bits to represent the 11228 // sign bit. 11229 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 11230 ED->getNumNegativeBits()) 11231 : ED->getNumPositiveBits(); 11232 11233 // Check the bitwidth. 11234 if (BitsNeeded > FieldWidth) { 11235 Expr *WidthExpr = Bitfield->getBitWidth(); 11236 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 11237 << Bitfield << ED; 11238 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 11239 << BitsNeeded << ED << WidthExpr->getSourceRange(); 11240 } 11241 } 11242 11243 return false; 11244 } 11245 11246 llvm::APSInt Value = Result.Val.getInt(); 11247 11248 unsigned OriginalWidth = Value.getBitWidth(); 11249 11250 if (!Value.isSigned() || Value.isNegative()) 11251 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 11252 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 11253 OriginalWidth = Value.getMinSignedBits(); 11254 11255 if (OriginalWidth <= FieldWidth) 11256 return false; 11257 11258 // Compute the value which the bitfield will contain. 11259 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 11260 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 11261 11262 // Check whether the stored value is equal to the original value. 11263 TruncatedValue = TruncatedValue.extend(OriginalWidth); 11264 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 11265 return false; 11266 11267 // Special-case bitfields of width 1: booleans are naturally 0/1, and 11268 // therefore don't strictly fit into a signed bitfield of width 1. 11269 if (FieldWidth == 1 && Value == 1) 11270 return false; 11271 11272 std::string PrettyValue = Value.toString(10); 11273 std::string PrettyTrunc = TruncatedValue.toString(10); 11274 11275 S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant) 11276 << PrettyValue << PrettyTrunc << OriginalInit->getType() 11277 << Init->getSourceRange(); 11278 11279 return true; 11280 } 11281 11282 /// Analyze the given simple or compound assignment for warning-worthy 11283 /// operations. 11284 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 11285 // Just recurse on the LHS. 11286 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 11287 11288 // We want to recurse on the RHS as normal unless we're assigning to 11289 // a bitfield. 11290 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 11291 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 11292 E->getOperatorLoc())) { 11293 // Recurse, ignoring any implicit conversions on the RHS. 11294 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 11295 E->getOperatorLoc()); 11296 } 11297 } 11298 11299 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 11300 11301 // Diagnose implicitly sequentially-consistent atomic assignment. 11302 if (E->getLHS()->getType()->isAtomicType()) 11303 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 11304 } 11305 11306 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 11307 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 11308 SourceLocation CContext, unsigned diag, 11309 bool pruneControlFlow = false) { 11310 if (pruneControlFlow) { 11311 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11312 S.PDiag(diag) 11313 << SourceType << T << E->getSourceRange() 11314 << SourceRange(CContext)); 11315 return; 11316 } 11317 S.Diag(E->getExprLoc(), diag) 11318 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 11319 } 11320 11321 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 11322 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 11323 SourceLocation CContext, 11324 unsigned diag, bool pruneControlFlow = false) { 11325 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 11326 } 11327 11328 static bool isObjCSignedCharBool(Sema &S, QualType Ty) { 11329 return Ty->isSpecificBuiltinType(BuiltinType::SChar) && 11330 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty); 11331 } 11332 11333 static void adornObjCBoolConversionDiagWithTernaryFixit( 11334 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) { 11335 Expr *Ignored = SourceExpr->IgnoreImplicit(); 11336 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored)) 11337 Ignored = OVE->getSourceExpr(); 11338 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) || 11339 isa<BinaryOperator>(Ignored) || 11340 isa<CXXOperatorCallExpr>(Ignored); 11341 SourceLocation EndLoc = S.getLocForEndOfToken(SourceExpr->getEndLoc()); 11342 if (NeedsParens) 11343 Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(") 11344 << FixItHint::CreateInsertion(EndLoc, ")"); 11345 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO"); 11346 } 11347 11348 /// Diagnose an implicit cast from a floating point value to an integer value. 11349 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 11350 SourceLocation CContext) { 11351 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 11352 const bool PruneWarnings = S.inTemplateInstantiation(); 11353 11354 Expr *InnerE = E->IgnoreParenImpCasts(); 11355 // We also want to warn on, e.g., "int i = -1.234" 11356 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 11357 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 11358 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 11359 11360 const bool IsLiteral = 11361 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 11362 11363 llvm::APFloat Value(0.0); 11364 bool IsConstant = 11365 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 11366 if (!IsConstant) { 11367 if (isObjCSignedCharBool(S, T)) { 11368 return adornObjCBoolConversionDiagWithTernaryFixit( 11369 S, E, 11370 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool) 11371 << E->getType()); 11372 } 11373 11374 return DiagnoseImpCast(S, E, T, CContext, 11375 diag::warn_impcast_float_integer, PruneWarnings); 11376 } 11377 11378 bool isExact = false; 11379 11380 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 11381 T->hasUnsignedIntegerRepresentation()); 11382 llvm::APFloat::opStatus Result = Value.convertToInteger( 11383 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 11384 11385 // FIXME: Force the precision of the source value down so we don't print 11386 // digits which are usually useless (we don't really care here if we 11387 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 11388 // would automatically print the shortest representation, but it's a bit 11389 // tricky to implement. 11390 SmallString<16> PrettySourceValue; 11391 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 11392 precision = (precision * 59 + 195) / 196; 11393 Value.toString(PrettySourceValue, precision); 11394 11395 if (isObjCSignedCharBool(S, T) && IntegerValue != 0 && IntegerValue != 1) { 11396 return adornObjCBoolConversionDiagWithTernaryFixit( 11397 S, E, 11398 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool) 11399 << PrettySourceValue); 11400 } 11401 11402 if (Result == llvm::APFloat::opOK && isExact) { 11403 if (IsLiteral) return; 11404 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 11405 PruneWarnings); 11406 } 11407 11408 // Conversion of a floating-point value to a non-bool integer where the 11409 // integral part cannot be represented by the integer type is undefined. 11410 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 11411 return DiagnoseImpCast( 11412 S, E, T, CContext, 11413 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 11414 : diag::warn_impcast_float_to_integer_out_of_range, 11415 PruneWarnings); 11416 11417 unsigned DiagID = 0; 11418 if (IsLiteral) { 11419 // Warn on floating point literal to integer. 11420 DiagID = diag::warn_impcast_literal_float_to_integer; 11421 } else if (IntegerValue == 0) { 11422 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 11423 return DiagnoseImpCast(S, E, T, CContext, 11424 diag::warn_impcast_float_integer, PruneWarnings); 11425 } 11426 // Warn on non-zero to zero conversion. 11427 DiagID = diag::warn_impcast_float_to_integer_zero; 11428 } else { 11429 if (IntegerValue.isUnsigned()) { 11430 if (!IntegerValue.isMaxValue()) { 11431 return DiagnoseImpCast(S, E, T, CContext, 11432 diag::warn_impcast_float_integer, PruneWarnings); 11433 } 11434 } else { // IntegerValue.isSigned() 11435 if (!IntegerValue.isMaxSignedValue() && 11436 !IntegerValue.isMinSignedValue()) { 11437 return DiagnoseImpCast(S, E, T, CContext, 11438 diag::warn_impcast_float_integer, PruneWarnings); 11439 } 11440 } 11441 // Warn on evaluatable floating point expression to integer conversion. 11442 DiagID = diag::warn_impcast_float_to_integer; 11443 } 11444 11445 SmallString<16> PrettyTargetValue; 11446 if (IsBool) 11447 PrettyTargetValue = Value.isZero() ? "false" : "true"; 11448 else 11449 IntegerValue.toString(PrettyTargetValue); 11450 11451 if (PruneWarnings) { 11452 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11453 S.PDiag(DiagID) 11454 << E->getType() << T.getUnqualifiedType() 11455 << PrettySourceValue << PrettyTargetValue 11456 << E->getSourceRange() << SourceRange(CContext)); 11457 } else { 11458 S.Diag(E->getExprLoc(), DiagID) 11459 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 11460 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 11461 } 11462 } 11463 11464 /// Analyze the given compound assignment for the possible losing of 11465 /// floating-point precision. 11466 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 11467 assert(isa<CompoundAssignOperator>(E) && 11468 "Must be compound assignment operation"); 11469 // Recurse on the LHS and RHS in here 11470 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 11471 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 11472 11473 if (E->getLHS()->getType()->isAtomicType()) 11474 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 11475 11476 // Now check the outermost expression 11477 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 11478 const auto *RBT = cast<CompoundAssignOperator>(E) 11479 ->getComputationResultType() 11480 ->getAs<BuiltinType>(); 11481 11482 // The below checks assume source is floating point. 11483 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 11484 11485 // If source is floating point but target is an integer. 11486 if (ResultBT->isInteger()) 11487 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(), 11488 E->getExprLoc(), diag::warn_impcast_float_integer); 11489 11490 if (!ResultBT->isFloatingPoint()) 11491 return; 11492 11493 // If both source and target are floating points, warn about losing precision. 11494 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 11495 QualType(ResultBT, 0), QualType(RBT, 0)); 11496 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 11497 // warn about dropping FP rank. 11498 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 11499 diag::warn_impcast_float_result_precision); 11500 } 11501 11502 static std::string PrettyPrintInRange(const llvm::APSInt &Value, 11503 IntRange Range) { 11504 if (!Range.Width) return "0"; 11505 11506 llvm::APSInt ValueInRange = Value; 11507 ValueInRange.setIsSigned(!Range.NonNegative); 11508 ValueInRange = ValueInRange.trunc(Range.Width); 11509 return ValueInRange.toString(10); 11510 } 11511 11512 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 11513 if (!isa<ImplicitCastExpr>(Ex)) 11514 return false; 11515 11516 Expr *InnerE = Ex->IgnoreParenImpCasts(); 11517 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 11518 const Type *Source = 11519 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 11520 if (Target->isDependentType()) 11521 return false; 11522 11523 const BuiltinType *FloatCandidateBT = 11524 dyn_cast<BuiltinType>(ToBool ? Source : Target); 11525 const Type *BoolCandidateType = ToBool ? Target : Source; 11526 11527 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 11528 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 11529 } 11530 11531 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 11532 SourceLocation CC) { 11533 unsigned NumArgs = TheCall->getNumArgs(); 11534 for (unsigned i = 0; i < NumArgs; ++i) { 11535 Expr *CurrA = TheCall->getArg(i); 11536 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 11537 continue; 11538 11539 bool IsSwapped = ((i > 0) && 11540 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 11541 IsSwapped |= ((i < (NumArgs - 1)) && 11542 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 11543 if (IsSwapped) { 11544 // Warn on this floating-point to bool conversion. 11545 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 11546 CurrA->getType(), CC, 11547 diag::warn_impcast_floating_point_to_bool); 11548 } 11549 } 11550 } 11551 11552 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 11553 SourceLocation CC) { 11554 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 11555 E->getExprLoc())) 11556 return; 11557 11558 // Don't warn on functions which have return type nullptr_t. 11559 if (isa<CallExpr>(E)) 11560 return; 11561 11562 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 11563 const Expr::NullPointerConstantKind NullKind = 11564 E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull); 11565 if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr) 11566 return; 11567 11568 // Return if target type is a safe conversion. 11569 if (T->isAnyPointerType() || T->isBlockPointerType() || 11570 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 11571 return; 11572 11573 SourceLocation Loc = E->getSourceRange().getBegin(); 11574 11575 // Venture through the macro stacks to get to the source of macro arguments. 11576 // The new location is a better location than the complete location that was 11577 // passed in. 11578 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 11579 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 11580 11581 // __null is usually wrapped in a macro. Go up a macro if that is the case. 11582 if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) { 11583 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 11584 Loc, S.SourceMgr, S.getLangOpts()); 11585 if (MacroName == "NULL") 11586 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 11587 } 11588 11589 // Only warn if the null and context location are in the same macro expansion. 11590 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 11591 return; 11592 11593 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 11594 << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC) 11595 << FixItHint::CreateReplacement(Loc, 11596 S.getFixItZeroLiteralForType(T, Loc)); 11597 } 11598 11599 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 11600 ObjCArrayLiteral *ArrayLiteral); 11601 11602 static void 11603 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 11604 ObjCDictionaryLiteral *DictionaryLiteral); 11605 11606 /// Check a single element within a collection literal against the 11607 /// target element type. 11608 static void checkObjCCollectionLiteralElement(Sema &S, 11609 QualType TargetElementType, 11610 Expr *Element, 11611 unsigned ElementKind) { 11612 // Skip a bitcast to 'id' or qualified 'id'. 11613 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 11614 if (ICE->getCastKind() == CK_BitCast && 11615 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 11616 Element = ICE->getSubExpr(); 11617 } 11618 11619 QualType ElementType = Element->getType(); 11620 ExprResult ElementResult(Element); 11621 if (ElementType->getAs<ObjCObjectPointerType>() && 11622 S.CheckSingleAssignmentConstraints(TargetElementType, 11623 ElementResult, 11624 false, false) 11625 != Sema::Compatible) { 11626 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 11627 << ElementType << ElementKind << TargetElementType 11628 << Element->getSourceRange(); 11629 } 11630 11631 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 11632 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 11633 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 11634 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 11635 } 11636 11637 /// Check an Objective-C array literal being converted to the given 11638 /// target type. 11639 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 11640 ObjCArrayLiteral *ArrayLiteral) { 11641 if (!S.NSArrayDecl) 11642 return; 11643 11644 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 11645 if (!TargetObjCPtr) 11646 return; 11647 11648 if (TargetObjCPtr->isUnspecialized() || 11649 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 11650 != S.NSArrayDecl->getCanonicalDecl()) 11651 return; 11652 11653 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 11654 if (TypeArgs.size() != 1) 11655 return; 11656 11657 QualType TargetElementType = TypeArgs[0]; 11658 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 11659 checkObjCCollectionLiteralElement(S, TargetElementType, 11660 ArrayLiteral->getElement(I), 11661 0); 11662 } 11663 } 11664 11665 /// Check an Objective-C dictionary literal being converted to the given 11666 /// target type. 11667 static void 11668 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 11669 ObjCDictionaryLiteral *DictionaryLiteral) { 11670 if (!S.NSDictionaryDecl) 11671 return; 11672 11673 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 11674 if (!TargetObjCPtr) 11675 return; 11676 11677 if (TargetObjCPtr->isUnspecialized() || 11678 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 11679 != S.NSDictionaryDecl->getCanonicalDecl()) 11680 return; 11681 11682 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 11683 if (TypeArgs.size() != 2) 11684 return; 11685 11686 QualType TargetKeyType = TypeArgs[0]; 11687 QualType TargetObjectType = TypeArgs[1]; 11688 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 11689 auto Element = DictionaryLiteral->getKeyValueElement(I); 11690 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 11691 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 11692 } 11693 } 11694 11695 // Helper function to filter out cases for constant width constant conversion. 11696 // Don't warn on char array initialization or for non-decimal values. 11697 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 11698 SourceLocation CC) { 11699 // If initializing from a constant, and the constant starts with '0', 11700 // then it is a binary, octal, or hexadecimal. Allow these constants 11701 // to fill all the bits, even if there is a sign change. 11702 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 11703 const char FirstLiteralCharacter = 11704 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 11705 if (FirstLiteralCharacter == '0') 11706 return false; 11707 } 11708 11709 // If the CC location points to a '{', and the type is char, then assume 11710 // assume it is an array initialization. 11711 if (CC.isValid() && T->isCharType()) { 11712 const char FirstContextCharacter = 11713 S.getSourceManager().getCharacterData(CC)[0]; 11714 if (FirstContextCharacter == '{') 11715 return false; 11716 } 11717 11718 return true; 11719 } 11720 11721 static const IntegerLiteral *getIntegerLiteral(Expr *E) { 11722 const auto *IL = dyn_cast<IntegerLiteral>(E); 11723 if (!IL) { 11724 if (auto *UO = dyn_cast<UnaryOperator>(E)) { 11725 if (UO->getOpcode() == UO_Minus) 11726 return dyn_cast<IntegerLiteral>(UO->getSubExpr()); 11727 } 11728 } 11729 11730 return IL; 11731 } 11732 11733 static void DiagnoseIntInBoolContext(Sema &S, Expr *E) { 11734 E = E->IgnoreParenImpCasts(); 11735 SourceLocation ExprLoc = E->getExprLoc(); 11736 11737 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 11738 BinaryOperator::Opcode Opc = BO->getOpcode(); 11739 Expr::EvalResult Result; 11740 // Do not diagnose unsigned shifts. 11741 if (Opc == BO_Shl) { 11742 const auto *LHS = getIntegerLiteral(BO->getLHS()); 11743 const auto *RHS = getIntegerLiteral(BO->getRHS()); 11744 if (LHS && LHS->getValue() == 0) 11745 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0; 11746 else if (!E->isValueDependent() && LHS && RHS && 11747 RHS->getValue().isNonNegative() && 11748 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) 11749 S.Diag(ExprLoc, diag::warn_left_shift_always) 11750 << (Result.Val.getInt() != 0); 11751 else if (E->getType()->isSignedIntegerType()) 11752 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E; 11753 } 11754 } 11755 11756 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 11757 const auto *LHS = getIntegerLiteral(CO->getTrueExpr()); 11758 const auto *RHS = getIntegerLiteral(CO->getFalseExpr()); 11759 if (!LHS || !RHS) 11760 return; 11761 if ((LHS->getValue() == 0 || LHS->getValue() == 1) && 11762 (RHS->getValue() == 0 || RHS->getValue() == 1)) 11763 // Do not diagnose common idioms. 11764 return; 11765 if (LHS->getValue() != 0 && RHS->getValue() != 0) 11766 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true); 11767 } 11768 } 11769 11770 static void CheckImplicitConversion(Sema &S, Expr *E, QualType T, 11771 SourceLocation CC, 11772 bool *ICContext = nullptr, 11773 bool IsListInit = false) { 11774 if (E->isTypeDependent() || E->isValueDependent()) return; 11775 11776 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 11777 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 11778 if (Source == Target) return; 11779 if (Target->isDependentType()) return; 11780 11781 // If the conversion context location is invalid don't complain. We also 11782 // don't want to emit a warning if the issue occurs from the expansion of 11783 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 11784 // delay this check as long as possible. Once we detect we are in that 11785 // scenario, we just return. 11786 if (CC.isInvalid()) 11787 return; 11788 11789 if (Source->isAtomicType()) 11790 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 11791 11792 // Diagnose implicit casts to bool. 11793 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 11794 if (isa<StringLiteral>(E)) 11795 // Warn on string literal to bool. Checks for string literals in logical 11796 // and expressions, for instance, assert(0 && "error here"), are 11797 // prevented by a check in AnalyzeImplicitConversions(). 11798 return DiagnoseImpCast(S, E, T, CC, 11799 diag::warn_impcast_string_literal_to_bool); 11800 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 11801 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 11802 // This covers the literal expressions that evaluate to Objective-C 11803 // objects. 11804 return DiagnoseImpCast(S, E, T, CC, 11805 diag::warn_impcast_objective_c_literal_to_bool); 11806 } 11807 if (Source->isPointerType() || Source->canDecayToPointerType()) { 11808 // Warn on pointer to bool conversion that is always true. 11809 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 11810 SourceRange(CC)); 11811 } 11812 } 11813 11814 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL 11815 // is a typedef for signed char (macOS), then that constant value has to be 1 11816 // or 0. 11817 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) { 11818 Expr::EvalResult Result; 11819 if (E->EvaluateAsInt(Result, S.getASTContext(), 11820 Expr::SE_AllowSideEffects)) { 11821 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) { 11822 adornObjCBoolConversionDiagWithTernaryFixit( 11823 S, E, 11824 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool) 11825 << Result.Val.getInt().toString(10)); 11826 } 11827 return; 11828 } 11829 } 11830 11831 // Check implicit casts from Objective-C collection literals to specialized 11832 // collection types, e.g., NSArray<NSString *> *. 11833 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 11834 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 11835 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 11836 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 11837 11838 // Strip vector types. 11839 if (isa<VectorType>(Source)) { 11840 if (!isa<VectorType>(Target)) { 11841 if (S.SourceMgr.isInSystemMacro(CC)) 11842 return; 11843 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 11844 } 11845 11846 // If the vector cast is cast between two vectors of the same size, it is 11847 // a bitcast, not a conversion. 11848 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 11849 return; 11850 11851 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 11852 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 11853 } 11854 if (auto VecTy = dyn_cast<VectorType>(Target)) 11855 Target = VecTy->getElementType().getTypePtr(); 11856 11857 // Strip complex types. 11858 if (isa<ComplexType>(Source)) { 11859 if (!isa<ComplexType>(Target)) { 11860 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 11861 return; 11862 11863 return DiagnoseImpCast(S, E, T, CC, 11864 S.getLangOpts().CPlusPlus 11865 ? diag::err_impcast_complex_scalar 11866 : diag::warn_impcast_complex_scalar); 11867 } 11868 11869 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 11870 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 11871 } 11872 11873 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 11874 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 11875 11876 // If the source is floating point... 11877 if (SourceBT && SourceBT->isFloatingPoint()) { 11878 // ...and the target is floating point... 11879 if (TargetBT && TargetBT->isFloatingPoint()) { 11880 // ...then warn if we're dropping FP rank. 11881 11882 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 11883 QualType(SourceBT, 0), QualType(TargetBT, 0)); 11884 if (Order > 0) { 11885 // Don't warn about float constants that are precisely 11886 // representable in the target type. 11887 Expr::EvalResult result; 11888 if (E->EvaluateAsRValue(result, S.Context)) { 11889 // Value might be a float, a float vector, or a float complex. 11890 if (IsSameFloatAfterCast(result.Val, 11891 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 11892 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 11893 return; 11894 } 11895 11896 if (S.SourceMgr.isInSystemMacro(CC)) 11897 return; 11898 11899 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 11900 } 11901 // ... or possibly if we're increasing rank, too 11902 else if (Order < 0) { 11903 if (S.SourceMgr.isInSystemMacro(CC)) 11904 return; 11905 11906 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 11907 } 11908 return; 11909 } 11910 11911 // If the target is integral, always warn. 11912 if (TargetBT && TargetBT->isInteger()) { 11913 if (S.SourceMgr.isInSystemMacro(CC)) 11914 return; 11915 11916 DiagnoseFloatingImpCast(S, E, T, CC); 11917 } 11918 11919 // Detect the case where a call result is converted from floating-point to 11920 // to bool, and the final argument to the call is converted from bool, to 11921 // discover this typo: 11922 // 11923 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 11924 // 11925 // FIXME: This is an incredibly special case; is there some more general 11926 // way to detect this class of misplaced-parentheses bug? 11927 if (Target->isBooleanType() && isa<CallExpr>(E)) { 11928 // Check last argument of function call to see if it is an 11929 // implicit cast from a type matching the type the result 11930 // is being cast to. 11931 CallExpr *CEx = cast<CallExpr>(E); 11932 if (unsigned NumArgs = CEx->getNumArgs()) { 11933 Expr *LastA = CEx->getArg(NumArgs - 1); 11934 Expr *InnerE = LastA->IgnoreParenImpCasts(); 11935 if (isa<ImplicitCastExpr>(LastA) && 11936 InnerE->getType()->isBooleanType()) { 11937 // Warn on this floating-point to bool conversion 11938 DiagnoseImpCast(S, E, T, CC, 11939 diag::warn_impcast_floating_point_to_bool); 11940 } 11941 } 11942 } 11943 return; 11944 } 11945 11946 // Valid casts involving fixed point types should be accounted for here. 11947 if (Source->isFixedPointType()) { 11948 if (Target->isUnsaturatedFixedPointType()) { 11949 Expr::EvalResult Result; 11950 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects, 11951 S.isConstantEvaluated())) { 11952 llvm::APFixedPoint Value = Result.Val.getFixedPoint(); 11953 llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(T); 11954 llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(T); 11955 if (Value > MaxVal || Value < MinVal) { 11956 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11957 S.PDiag(diag::warn_impcast_fixed_point_range) 11958 << Value.toString() << T 11959 << E->getSourceRange() 11960 << clang::SourceRange(CC)); 11961 return; 11962 } 11963 } 11964 } else if (Target->isIntegerType()) { 11965 Expr::EvalResult Result; 11966 if (!S.isConstantEvaluated() && 11967 E->EvaluateAsFixedPoint(Result, S.Context, 11968 Expr::SE_AllowSideEffects)) { 11969 llvm::APFixedPoint FXResult = Result.Val.getFixedPoint(); 11970 11971 bool Overflowed; 11972 llvm::APSInt IntResult = FXResult.convertToInt( 11973 S.Context.getIntWidth(T), 11974 Target->isSignedIntegerOrEnumerationType(), &Overflowed); 11975 11976 if (Overflowed) { 11977 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11978 S.PDiag(diag::warn_impcast_fixed_point_range) 11979 << FXResult.toString() << T 11980 << E->getSourceRange() 11981 << clang::SourceRange(CC)); 11982 return; 11983 } 11984 } 11985 } 11986 } else if (Target->isUnsaturatedFixedPointType()) { 11987 if (Source->isIntegerType()) { 11988 Expr::EvalResult Result; 11989 if (!S.isConstantEvaluated() && 11990 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 11991 llvm::APSInt Value = Result.Val.getInt(); 11992 11993 bool Overflowed; 11994 llvm::APFixedPoint IntResult = llvm::APFixedPoint::getFromIntValue( 11995 Value, S.Context.getFixedPointSemantics(T), &Overflowed); 11996 11997 if (Overflowed) { 11998 S.DiagRuntimeBehavior(E->getExprLoc(), E, 11999 S.PDiag(diag::warn_impcast_fixed_point_range) 12000 << Value.toString(/*Radix=*/10) << T 12001 << E->getSourceRange() 12002 << clang::SourceRange(CC)); 12003 return; 12004 } 12005 } 12006 } 12007 } 12008 12009 // If we are casting an integer type to a floating point type without 12010 // initialization-list syntax, we might lose accuracy if the floating 12011 // point type has a narrower significand than the integer type. 12012 if (SourceBT && TargetBT && SourceBT->isIntegerType() && 12013 TargetBT->isFloatingType() && !IsListInit) { 12014 // Determine the number of precision bits in the source integer type. 12015 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated(), 12016 /*Approximate*/ true); 12017 unsigned int SourcePrecision = SourceRange.Width; 12018 12019 // Determine the number of precision bits in the 12020 // target floating point type. 12021 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision( 12022 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 12023 12024 if (SourcePrecision > 0 && TargetPrecision > 0 && 12025 SourcePrecision > TargetPrecision) { 12026 12027 if (Optional<llvm::APSInt> SourceInt = 12028 E->getIntegerConstantExpr(S.Context)) { 12029 // If the source integer is a constant, convert it to the target 12030 // floating point type. Issue a warning if the value changes 12031 // during the whole conversion. 12032 llvm::APFloat TargetFloatValue( 12033 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 12034 llvm::APFloat::opStatus ConversionStatus = 12035 TargetFloatValue.convertFromAPInt( 12036 *SourceInt, SourceBT->isSignedInteger(), 12037 llvm::APFloat::rmNearestTiesToEven); 12038 12039 if (ConversionStatus != llvm::APFloat::opOK) { 12040 std::string PrettySourceValue = SourceInt->toString(10); 12041 SmallString<32> PrettyTargetValue; 12042 TargetFloatValue.toString(PrettyTargetValue, TargetPrecision); 12043 12044 S.DiagRuntimeBehavior( 12045 E->getExprLoc(), E, 12046 S.PDiag(diag::warn_impcast_integer_float_precision_constant) 12047 << PrettySourceValue << PrettyTargetValue << E->getType() << T 12048 << E->getSourceRange() << clang::SourceRange(CC)); 12049 } 12050 } else { 12051 // Otherwise, the implicit conversion may lose precision. 12052 DiagnoseImpCast(S, E, T, CC, 12053 diag::warn_impcast_integer_float_precision); 12054 } 12055 } 12056 } 12057 12058 DiagnoseNullConversion(S, E, T, CC); 12059 12060 S.DiscardMisalignedMemberAddress(Target, E); 12061 12062 if (Target->isBooleanType()) 12063 DiagnoseIntInBoolContext(S, E); 12064 12065 if (!Source->isIntegerType() || !Target->isIntegerType()) 12066 return; 12067 12068 // TODO: remove this early return once the false positives for constant->bool 12069 // in templates, macros, etc, are reduced or removed. 12070 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 12071 return; 12072 12073 if (isObjCSignedCharBool(S, T) && !Source->isCharType() && 12074 !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) { 12075 return adornObjCBoolConversionDiagWithTernaryFixit( 12076 S, E, 12077 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool) 12078 << E->getType()); 12079 } 12080 12081 IntRange SourceTypeRange = 12082 IntRange::forTargetOfCanonicalType(S.Context, Source); 12083 IntRange LikelySourceRange = 12084 GetExprRange(S.Context, E, S.isConstantEvaluated(), /*Approximate*/ true); 12085 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 12086 12087 if (LikelySourceRange.Width > TargetRange.Width) { 12088 // If the source is a constant, use a default-on diagnostic. 12089 // TODO: this should happen for bitfield stores, too. 12090 Expr::EvalResult Result; 12091 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects, 12092 S.isConstantEvaluated())) { 12093 llvm::APSInt Value(32); 12094 Value = Result.Val.getInt(); 12095 12096 if (S.SourceMgr.isInSystemMacro(CC)) 12097 return; 12098 12099 std::string PrettySourceValue = Value.toString(10); 12100 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 12101 12102 S.DiagRuntimeBehavior( 12103 E->getExprLoc(), E, 12104 S.PDiag(diag::warn_impcast_integer_precision_constant) 12105 << PrettySourceValue << PrettyTargetValue << E->getType() << T 12106 << E->getSourceRange() << SourceRange(CC)); 12107 return; 12108 } 12109 12110 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 12111 if (S.SourceMgr.isInSystemMacro(CC)) 12112 return; 12113 12114 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 12115 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 12116 /* pruneControlFlow */ true); 12117 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 12118 } 12119 12120 if (TargetRange.Width > SourceTypeRange.Width) { 12121 if (auto *UO = dyn_cast<UnaryOperator>(E)) 12122 if (UO->getOpcode() == UO_Minus) 12123 if (Source->isUnsignedIntegerType()) { 12124 if (Target->isUnsignedIntegerType()) 12125 return DiagnoseImpCast(S, E, T, CC, 12126 diag::warn_impcast_high_order_zero_bits); 12127 if (Target->isSignedIntegerType()) 12128 return DiagnoseImpCast(S, E, T, CC, 12129 diag::warn_impcast_nonnegative_result); 12130 } 12131 } 12132 12133 if (TargetRange.Width == LikelySourceRange.Width && 12134 !TargetRange.NonNegative && LikelySourceRange.NonNegative && 12135 Source->isSignedIntegerType()) { 12136 // Warn when doing a signed to signed conversion, warn if the positive 12137 // source value is exactly the width of the target type, which will 12138 // cause a negative value to be stored. 12139 12140 Expr::EvalResult Result; 12141 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) && 12142 !S.SourceMgr.isInSystemMacro(CC)) { 12143 llvm::APSInt Value = Result.Val.getInt(); 12144 if (isSameWidthConstantConversion(S, E, T, CC)) { 12145 std::string PrettySourceValue = Value.toString(10); 12146 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 12147 12148 S.DiagRuntimeBehavior( 12149 E->getExprLoc(), E, 12150 S.PDiag(diag::warn_impcast_integer_precision_constant) 12151 << PrettySourceValue << PrettyTargetValue << E->getType() << T 12152 << E->getSourceRange() << SourceRange(CC)); 12153 return; 12154 } 12155 } 12156 12157 // Fall through for non-constants to give a sign conversion warning. 12158 } 12159 12160 if ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) || 12161 (!TargetRange.NonNegative && LikelySourceRange.NonNegative && 12162 LikelySourceRange.Width == TargetRange.Width)) { 12163 if (S.SourceMgr.isInSystemMacro(CC)) 12164 return; 12165 12166 unsigned DiagID = diag::warn_impcast_integer_sign; 12167 12168 // Traditionally, gcc has warned about this under -Wsign-compare. 12169 // We also want to warn about it in -Wconversion. 12170 // So if -Wconversion is off, use a completely identical diagnostic 12171 // in the sign-compare group. 12172 // The conditional-checking code will 12173 if (ICContext) { 12174 DiagID = diag::warn_impcast_integer_sign_conditional; 12175 *ICContext = true; 12176 } 12177 12178 return DiagnoseImpCast(S, E, T, CC, DiagID); 12179 } 12180 12181 // Diagnose conversions between different enumeration types. 12182 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 12183 // type, to give us better diagnostics. 12184 QualType SourceType = E->getType(); 12185 if (!S.getLangOpts().CPlusPlus) { 12186 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 12187 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 12188 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 12189 SourceType = S.Context.getTypeDeclType(Enum); 12190 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 12191 } 12192 } 12193 12194 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 12195 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 12196 if (SourceEnum->getDecl()->hasNameForLinkage() && 12197 TargetEnum->getDecl()->hasNameForLinkage() && 12198 SourceEnum != TargetEnum) { 12199 if (S.SourceMgr.isInSystemMacro(CC)) 12200 return; 12201 12202 return DiagnoseImpCast(S, E, SourceType, T, CC, 12203 diag::warn_impcast_different_enum_types); 12204 } 12205 } 12206 12207 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 12208 SourceLocation CC, QualType T); 12209 12210 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 12211 SourceLocation CC, bool &ICContext) { 12212 E = E->IgnoreParenImpCasts(); 12213 12214 if (auto *CO = dyn_cast<AbstractConditionalOperator>(E)) 12215 return CheckConditionalOperator(S, CO, CC, T); 12216 12217 AnalyzeImplicitConversions(S, E, CC); 12218 if (E->getType() != T) 12219 return CheckImplicitConversion(S, E, T, CC, &ICContext); 12220 } 12221 12222 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 12223 SourceLocation CC, QualType T) { 12224 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 12225 12226 Expr *TrueExpr = E->getTrueExpr(); 12227 if (auto *BCO = dyn_cast<BinaryConditionalOperator>(E)) 12228 TrueExpr = BCO->getCommon(); 12229 12230 bool Suspicious = false; 12231 CheckConditionalOperand(S, TrueExpr, T, CC, Suspicious); 12232 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 12233 12234 if (T->isBooleanType()) 12235 DiagnoseIntInBoolContext(S, E); 12236 12237 // If -Wconversion would have warned about either of the candidates 12238 // for a signedness conversion to the context type... 12239 if (!Suspicious) return; 12240 12241 // ...but it's currently ignored... 12242 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 12243 return; 12244 12245 // ...then check whether it would have warned about either of the 12246 // candidates for a signedness conversion to the condition type. 12247 if (E->getType() == T) return; 12248 12249 Suspicious = false; 12250 CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(), 12251 E->getType(), CC, &Suspicious); 12252 if (!Suspicious) 12253 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 12254 E->getType(), CC, &Suspicious); 12255 } 12256 12257 /// Check conversion of given expression to boolean. 12258 /// Input argument E is a logical expression. 12259 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 12260 if (S.getLangOpts().Bool) 12261 return; 12262 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 12263 return; 12264 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 12265 } 12266 12267 namespace { 12268 struct AnalyzeImplicitConversionsWorkItem { 12269 Expr *E; 12270 SourceLocation CC; 12271 bool IsListInit; 12272 }; 12273 } 12274 12275 /// Data recursive variant of AnalyzeImplicitConversions. Subexpressions 12276 /// that should be visited are added to WorkList. 12277 static void AnalyzeImplicitConversions( 12278 Sema &S, AnalyzeImplicitConversionsWorkItem Item, 12279 llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) { 12280 Expr *OrigE = Item.E; 12281 SourceLocation CC = Item.CC; 12282 12283 QualType T = OrigE->getType(); 12284 Expr *E = OrigE->IgnoreParenImpCasts(); 12285 12286 // Propagate whether we are in a C++ list initialization expression. 12287 // If so, we do not issue warnings for implicit int-float conversion 12288 // precision loss, because C++11 narrowing already handles it. 12289 bool IsListInit = Item.IsListInit || 12290 (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus); 12291 12292 if (E->isTypeDependent() || E->isValueDependent()) 12293 return; 12294 12295 Expr *SourceExpr = E; 12296 // Examine, but don't traverse into the source expression of an 12297 // OpaqueValueExpr, since it may have multiple parents and we don't want to 12298 // emit duplicate diagnostics. Its fine to examine the form or attempt to 12299 // evaluate it in the context of checking the specific conversion to T though. 12300 if (auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 12301 if (auto *Src = OVE->getSourceExpr()) 12302 SourceExpr = Src; 12303 12304 if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr)) 12305 if (UO->getOpcode() == UO_Not && 12306 UO->getSubExpr()->isKnownToHaveBooleanValue()) 12307 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool) 12308 << OrigE->getSourceRange() << T->isBooleanType() 12309 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!"); 12310 12311 // For conditional operators, we analyze the arguments as if they 12312 // were being fed directly into the output. 12313 if (auto *CO = dyn_cast<AbstractConditionalOperator>(SourceExpr)) { 12314 CheckConditionalOperator(S, CO, CC, T); 12315 return; 12316 } 12317 12318 // Check implicit argument conversions for function calls. 12319 if (CallExpr *Call = dyn_cast<CallExpr>(SourceExpr)) 12320 CheckImplicitArgumentConversions(S, Call, CC); 12321 12322 // Go ahead and check any implicit conversions we might have skipped. 12323 // The non-canonical typecheck is just an optimization; 12324 // CheckImplicitConversion will filter out dead implicit conversions. 12325 if (SourceExpr->getType() != T) 12326 CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit); 12327 12328 // Now continue drilling into this expression. 12329 12330 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 12331 // The bound subexpressions in a PseudoObjectExpr are not reachable 12332 // as transitive children. 12333 // FIXME: Use a more uniform representation for this. 12334 for (auto *SE : POE->semantics()) 12335 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 12336 WorkList.push_back({OVE->getSourceExpr(), CC, IsListInit}); 12337 } 12338 12339 // Skip past explicit casts. 12340 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 12341 E = CE->getSubExpr()->IgnoreParenImpCasts(); 12342 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 12343 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 12344 WorkList.push_back({E, CC, IsListInit}); 12345 return; 12346 } 12347 12348 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 12349 // Do a somewhat different check with comparison operators. 12350 if (BO->isComparisonOp()) 12351 return AnalyzeComparison(S, BO); 12352 12353 // And with simple assignments. 12354 if (BO->getOpcode() == BO_Assign) 12355 return AnalyzeAssignment(S, BO); 12356 // And with compound assignments. 12357 if (BO->isAssignmentOp()) 12358 return AnalyzeCompoundAssignment(S, BO); 12359 } 12360 12361 // These break the otherwise-useful invariant below. Fortunately, 12362 // we don't really need to recurse into them, because any internal 12363 // expressions should have been analyzed already when they were 12364 // built into statements. 12365 if (isa<StmtExpr>(E)) return; 12366 12367 // Don't descend into unevaluated contexts. 12368 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 12369 12370 // Now just recurse over the expression's children. 12371 CC = E->getExprLoc(); 12372 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 12373 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 12374 for (Stmt *SubStmt : E->children()) { 12375 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 12376 if (!ChildExpr) 12377 continue; 12378 12379 if (IsLogicalAndOperator && 12380 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 12381 // Ignore checking string literals that are in logical and operators. 12382 // This is a common pattern for asserts. 12383 continue; 12384 WorkList.push_back({ChildExpr, CC, IsListInit}); 12385 } 12386 12387 if (BO && BO->isLogicalOp()) { 12388 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 12389 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 12390 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 12391 12392 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 12393 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 12394 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 12395 } 12396 12397 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 12398 if (U->getOpcode() == UO_LNot) { 12399 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 12400 } else if (U->getOpcode() != UO_AddrOf) { 12401 if (U->getSubExpr()->getType()->isAtomicType()) 12402 S.Diag(U->getSubExpr()->getBeginLoc(), 12403 diag::warn_atomic_implicit_seq_cst); 12404 } 12405 } 12406 } 12407 12408 /// AnalyzeImplicitConversions - Find and report any interesting 12409 /// implicit conversions in the given expression. There are a couple 12410 /// of competing diagnostics here, -Wconversion and -Wsign-compare. 12411 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC, 12412 bool IsListInit/*= false*/) { 12413 llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList; 12414 WorkList.push_back({OrigE, CC, IsListInit}); 12415 while (!WorkList.empty()) 12416 AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList); 12417 } 12418 12419 /// Diagnose integer type and any valid implicit conversion to it. 12420 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 12421 // Taking into account implicit conversions, 12422 // allow any integer. 12423 if (!E->getType()->isIntegerType()) { 12424 S.Diag(E->getBeginLoc(), 12425 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 12426 return true; 12427 } 12428 // Potentially emit standard warnings for implicit conversions if enabled 12429 // using -Wconversion. 12430 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 12431 return false; 12432 } 12433 12434 // Helper function for Sema::DiagnoseAlwaysNonNullPointer. 12435 // Returns true when emitting a warning about taking the address of a reference. 12436 static bool CheckForReference(Sema &SemaRef, const Expr *E, 12437 const PartialDiagnostic &PD) { 12438 E = E->IgnoreParenImpCasts(); 12439 12440 const FunctionDecl *FD = nullptr; 12441 12442 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 12443 if (!DRE->getDecl()->getType()->isReferenceType()) 12444 return false; 12445 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 12446 if (!M->getMemberDecl()->getType()->isReferenceType()) 12447 return false; 12448 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 12449 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 12450 return false; 12451 FD = Call->getDirectCallee(); 12452 } else { 12453 return false; 12454 } 12455 12456 SemaRef.Diag(E->getExprLoc(), PD); 12457 12458 // If possible, point to location of function. 12459 if (FD) { 12460 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 12461 } 12462 12463 return true; 12464 } 12465 12466 // Returns true if the SourceLocation is expanded from any macro body. 12467 // Returns false if the SourceLocation is invalid, is from not in a macro 12468 // expansion, or is from expanded from a top-level macro argument. 12469 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 12470 if (Loc.isInvalid()) 12471 return false; 12472 12473 while (Loc.isMacroID()) { 12474 if (SM.isMacroBodyExpansion(Loc)) 12475 return true; 12476 Loc = SM.getImmediateMacroCallerLoc(Loc); 12477 } 12478 12479 return false; 12480 } 12481 12482 /// Diagnose pointers that are always non-null. 12483 /// \param E the expression containing the pointer 12484 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 12485 /// compared to a null pointer 12486 /// \param IsEqual True when the comparison is equal to a null pointer 12487 /// \param Range Extra SourceRange to highlight in the diagnostic 12488 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 12489 Expr::NullPointerConstantKind NullKind, 12490 bool IsEqual, SourceRange Range) { 12491 if (!E) 12492 return; 12493 12494 // Don't warn inside macros. 12495 if (E->getExprLoc().isMacroID()) { 12496 const SourceManager &SM = getSourceManager(); 12497 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 12498 IsInAnyMacroBody(SM, Range.getBegin())) 12499 return; 12500 } 12501 E = E->IgnoreImpCasts(); 12502 12503 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 12504 12505 if (isa<CXXThisExpr>(E)) { 12506 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 12507 : diag::warn_this_bool_conversion; 12508 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 12509 return; 12510 } 12511 12512 bool IsAddressOf = false; 12513 12514 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 12515 if (UO->getOpcode() != UO_AddrOf) 12516 return; 12517 IsAddressOf = true; 12518 E = UO->getSubExpr(); 12519 } 12520 12521 if (IsAddressOf) { 12522 unsigned DiagID = IsCompare 12523 ? diag::warn_address_of_reference_null_compare 12524 : diag::warn_address_of_reference_bool_conversion; 12525 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 12526 << IsEqual; 12527 if (CheckForReference(*this, E, PD)) { 12528 return; 12529 } 12530 } 12531 12532 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 12533 bool IsParam = isa<NonNullAttr>(NonnullAttr); 12534 std::string Str; 12535 llvm::raw_string_ostream S(Str); 12536 E->printPretty(S, nullptr, getPrintingPolicy()); 12537 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 12538 : diag::warn_cast_nonnull_to_bool; 12539 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 12540 << E->getSourceRange() << Range << IsEqual; 12541 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 12542 }; 12543 12544 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 12545 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 12546 if (auto *Callee = Call->getDirectCallee()) { 12547 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 12548 ComplainAboutNonnullParamOrCall(A); 12549 return; 12550 } 12551 } 12552 } 12553 12554 // Expect to find a single Decl. Skip anything more complicated. 12555 ValueDecl *D = nullptr; 12556 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 12557 D = R->getDecl(); 12558 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 12559 D = M->getMemberDecl(); 12560 } 12561 12562 // Weak Decls can be null. 12563 if (!D || D->isWeak()) 12564 return; 12565 12566 // Check for parameter decl with nonnull attribute 12567 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 12568 if (getCurFunction() && 12569 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 12570 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 12571 ComplainAboutNonnullParamOrCall(A); 12572 return; 12573 } 12574 12575 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 12576 // Skip function template not specialized yet. 12577 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 12578 return; 12579 auto ParamIter = llvm::find(FD->parameters(), PV); 12580 assert(ParamIter != FD->param_end()); 12581 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 12582 12583 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 12584 if (!NonNull->args_size()) { 12585 ComplainAboutNonnullParamOrCall(NonNull); 12586 return; 12587 } 12588 12589 for (const ParamIdx &ArgNo : NonNull->args()) { 12590 if (ArgNo.getASTIndex() == ParamNo) { 12591 ComplainAboutNonnullParamOrCall(NonNull); 12592 return; 12593 } 12594 } 12595 } 12596 } 12597 } 12598 } 12599 12600 QualType T = D->getType(); 12601 const bool IsArray = T->isArrayType(); 12602 const bool IsFunction = T->isFunctionType(); 12603 12604 // Address of function is used to silence the function warning. 12605 if (IsAddressOf && IsFunction) { 12606 return; 12607 } 12608 12609 // Found nothing. 12610 if (!IsAddressOf && !IsFunction && !IsArray) 12611 return; 12612 12613 // Pretty print the expression for the diagnostic. 12614 std::string Str; 12615 llvm::raw_string_ostream S(Str); 12616 E->printPretty(S, nullptr, getPrintingPolicy()); 12617 12618 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 12619 : diag::warn_impcast_pointer_to_bool; 12620 enum { 12621 AddressOf, 12622 FunctionPointer, 12623 ArrayPointer 12624 } DiagType; 12625 if (IsAddressOf) 12626 DiagType = AddressOf; 12627 else if (IsFunction) 12628 DiagType = FunctionPointer; 12629 else if (IsArray) 12630 DiagType = ArrayPointer; 12631 else 12632 llvm_unreachable("Could not determine diagnostic."); 12633 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 12634 << Range << IsEqual; 12635 12636 if (!IsFunction) 12637 return; 12638 12639 // Suggest '&' to silence the function warning. 12640 Diag(E->getExprLoc(), diag::note_function_warning_silence) 12641 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 12642 12643 // Check to see if '()' fixit should be emitted. 12644 QualType ReturnType; 12645 UnresolvedSet<4> NonTemplateOverloads; 12646 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 12647 if (ReturnType.isNull()) 12648 return; 12649 12650 if (IsCompare) { 12651 // There are two cases here. If there is null constant, the only suggest 12652 // for a pointer return type. If the null is 0, then suggest if the return 12653 // type is a pointer or an integer type. 12654 if (!ReturnType->isPointerType()) { 12655 if (NullKind == Expr::NPCK_ZeroExpression || 12656 NullKind == Expr::NPCK_ZeroLiteral) { 12657 if (!ReturnType->isIntegerType()) 12658 return; 12659 } else { 12660 return; 12661 } 12662 } 12663 } else { // !IsCompare 12664 // For function to bool, only suggest if the function pointer has bool 12665 // return type. 12666 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 12667 return; 12668 } 12669 Diag(E->getExprLoc(), diag::note_function_to_function_call) 12670 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 12671 } 12672 12673 /// Diagnoses "dangerous" implicit conversions within the given 12674 /// expression (which is a full expression). Implements -Wconversion 12675 /// and -Wsign-compare. 12676 /// 12677 /// \param CC the "context" location of the implicit conversion, i.e. 12678 /// the most location of the syntactic entity requiring the implicit 12679 /// conversion 12680 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 12681 // Don't diagnose in unevaluated contexts. 12682 if (isUnevaluatedContext()) 12683 return; 12684 12685 // Don't diagnose for value- or type-dependent expressions. 12686 if (E->isTypeDependent() || E->isValueDependent()) 12687 return; 12688 12689 // Check for array bounds violations in cases where the check isn't triggered 12690 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 12691 // ArraySubscriptExpr is on the RHS of a variable initialization. 12692 CheckArrayAccess(E); 12693 12694 // This is not the right CC for (e.g.) a variable initialization. 12695 AnalyzeImplicitConversions(*this, E, CC); 12696 } 12697 12698 /// CheckBoolLikeConversion - Check conversion of given expression to boolean. 12699 /// Input argument E is a logical expression. 12700 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 12701 ::CheckBoolLikeConversion(*this, E, CC); 12702 } 12703 12704 /// Diagnose when expression is an integer constant expression and its evaluation 12705 /// results in integer overflow 12706 void Sema::CheckForIntOverflow (Expr *E) { 12707 // Use a work list to deal with nested struct initializers. 12708 SmallVector<Expr *, 2> Exprs(1, E); 12709 12710 do { 12711 Expr *OriginalE = Exprs.pop_back_val(); 12712 Expr *E = OriginalE->IgnoreParenCasts(); 12713 12714 if (isa<BinaryOperator>(E)) { 12715 E->EvaluateForOverflow(Context); 12716 continue; 12717 } 12718 12719 if (auto InitList = dyn_cast<InitListExpr>(OriginalE)) 12720 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 12721 else if (isa<ObjCBoxedExpr>(OriginalE)) 12722 E->EvaluateForOverflow(Context); 12723 else if (auto Call = dyn_cast<CallExpr>(E)) 12724 Exprs.append(Call->arg_begin(), Call->arg_end()); 12725 else if (auto Message = dyn_cast<ObjCMessageExpr>(E)) 12726 Exprs.append(Message->arg_begin(), Message->arg_end()); 12727 } while (!Exprs.empty()); 12728 } 12729 12730 namespace { 12731 12732 /// Visitor for expressions which looks for unsequenced operations on the 12733 /// same object. 12734 class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> { 12735 using Base = ConstEvaluatedExprVisitor<SequenceChecker>; 12736 12737 /// A tree of sequenced regions within an expression. Two regions are 12738 /// unsequenced if one is an ancestor or a descendent of the other. When we 12739 /// finish processing an expression with sequencing, such as a comma 12740 /// expression, we fold its tree nodes into its parent, since they are 12741 /// unsequenced with respect to nodes we will visit later. 12742 class SequenceTree { 12743 struct Value { 12744 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 12745 unsigned Parent : 31; 12746 unsigned Merged : 1; 12747 }; 12748 SmallVector<Value, 8> Values; 12749 12750 public: 12751 /// A region within an expression which may be sequenced with respect 12752 /// to some other region. 12753 class Seq { 12754 friend class SequenceTree; 12755 12756 unsigned Index; 12757 12758 explicit Seq(unsigned N) : Index(N) {} 12759 12760 public: 12761 Seq() : Index(0) {} 12762 }; 12763 12764 SequenceTree() { Values.push_back(Value(0)); } 12765 Seq root() const { return Seq(0); } 12766 12767 /// Create a new sequence of operations, which is an unsequenced 12768 /// subset of \p Parent. This sequence of operations is sequenced with 12769 /// respect to other children of \p Parent. 12770 Seq allocate(Seq Parent) { 12771 Values.push_back(Value(Parent.Index)); 12772 return Seq(Values.size() - 1); 12773 } 12774 12775 /// Merge a sequence of operations into its parent. 12776 void merge(Seq S) { 12777 Values[S.Index].Merged = true; 12778 } 12779 12780 /// Determine whether two operations are unsequenced. This operation 12781 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 12782 /// should have been merged into its parent as appropriate. 12783 bool isUnsequenced(Seq Cur, Seq Old) { 12784 unsigned C = representative(Cur.Index); 12785 unsigned Target = representative(Old.Index); 12786 while (C >= Target) { 12787 if (C == Target) 12788 return true; 12789 C = Values[C].Parent; 12790 } 12791 return false; 12792 } 12793 12794 private: 12795 /// Pick a representative for a sequence. 12796 unsigned representative(unsigned K) { 12797 if (Values[K].Merged) 12798 // Perform path compression as we go. 12799 return Values[K].Parent = representative(Values[K].Parent); 12800 return K; 12801 } 12802 }; 12803 12804 /// An object for which we can track unsequenced uses. 12805 using Object = const NamedDecl *; 12806 12807 /// Different flavors of object usage which we track. We only track the 12808 /// least-sequenced usage of each kind. 12809 enum UsageKind { 12810 /// A read of an object. Multiple unsequenced reads are OK. 12811 UK_Use, 12812 12813 /// A modification of an object which is sequenced before the value 12814 /// computation of the expression, such as ++n in C++. 12815 UK_ModAsValue, 12816 12817 /// A modification of an object which is not sequenced before the value 12818 /// computation of the expression, such as n++. 12819 UK_ModAsSideEffect, 12820 12821 UK_Count = UK_ModAsSideEffect + 1 12822 }; 12823 12824 /// Bundle together a sequencing region and the expression corresponding 12825 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo. 12826 struct Usage { 12827 const Expr *UsageExpr; 12828 SequenceTree::Seq Seq; 12829 12830 Usage() : UsageExpr(nullptr), Seq() {} 12831 }; 12832 12833 struct UsageInfo { 12834 Usage Uses[UK_Count]; 12835 12836 /// Have we issued a diagnostic for this object already? 12837 bool Diagnosed; 12838 12839 UsageInfo() : Uses(), Diagnosed(false) {} 12840 }; 12841 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 12842 12843 Sema &SemaRef; 12844 12845 /// Sequenced regions within the expression. 12846 SequenceTree Tree; 12847 12848 /// Declaration modifications and references which we have seen. 12849 UsageInfoMap UsageMap; 12850 12851 /// The region we are currently within. 12852 SequenceTree::Seq Region; 12853 12854 /// Filled in with declarations which were modified as a side-effect 12855 /// (that is, post-increment operations). 12856 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 12857 12858 /// Expressions to check later. We defer checking these to reduce 12859 /// stack usage. 12860 SmallVectorImpl<const Expr *> &WorkList; 12861 12862 /// RAII object wrapping the visitation of a sequenced subexpression of an 12863 /// expression. At the end of this process, the side-effects of the evaluation 12864 /// become sequenced with respect to the value computation of the result, so 12865 /// we downgrade any UK_ModAsSideEffect within the evaluation to 12866 /// UK_ModAsValue. 12867 struct SequencedSubexpression { 12868 SequencedSubexpression(SequenceChecker &Self) 12869 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 12870 Self.ModAsSideEffect = &ModAsSideEffect; 12871 } 12872 12873 ~SequencedSubexpression() { 12874 for (const std::pair<Object, Usage> &M : llvm::reverse(ModAsSideEffect)) { 12875 // Add a new usage with usage kind UK_ModAsValue, and then restore 12876 // the previous usage with UK_ModAsSideEffect (thus clearing it if 12877 // the previous one was empty). 12878 UsageInfo &UI = Self.UsageMap[M.first]; 12879 auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect]; 12880 Self.addUsage(M.first, UI, SideEffectUsage.UsageExpr, UK_ModAsValue); 12881 SideEffectUsage = M.second; 12882 } 12883 Self.ModAsSideEffect = OldModAsSideEffect; 12884 } 12885 12886 SequenceChecker &Self; 12887 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 12888 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 12889 }; 12890 12891 /// RAII object wrapping the visitation of a subexpression which we might 12892 /// choose to evaluate as a constant. If any subexpression is evaluated and 12893 /// found to be non-constant, this allows us to suppress the evaluation of 12894 /// the outer expression. 12895 class EvaluationTracker { 12896 public: 12897 EvaluationTracker(SequenceChecker &Self) 12898 : Self(Self), Prev(Self.EvalTracker) { 12899 Self.EvalTracker = this; 12900 } 12901 12902 ~EvaluationTracker() { 12903 Self.EvalTracker = Prev; 12904 if (Prev) 12905 Prev->EvalOK &= EvalOK; 12906 } 12907 12908 bool evaluate(const Expr *E, bool &Result) { 12909 if (!EvalOK || E->isValueDependent()) 12910 return false; 12911 EvalOK = E->EvaluateAsBooleanCondition( 12912 Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated()); 12913 return EvalOK; 12914 } 12915 12916 private: 12917 SequenceChecker &Self; 12918 EvaluationTracker *Prev; 12919 bool EvalOK = true; 12920 } *EvalTracker = nullptr; 12921 12922 /// Find the object which is produced by the specified expression, 12923 /// if any. 12924 Object getObject(const Expr *E, bool Mod) const { 12925 E = E->IgnoreParenCasts(); 12926 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 12927 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 12928 return getObject(UO->getSubExpr(), Mod); 12929 } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 12930 if (BO->getOpcode() == BO_Comma) 12931 return getObject(BO->getRHS(), Mod); 12932 if (Mod && BO->isAssignmentOp()) 12933 return getObject(BO->getLHS(), Mod); 12934 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 12935 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 12936 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 12937 return ME->getMemberDecl(); 12938 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 12939 // FIXME: If this is a reference, map through to its value. 12940 return DRE->getDecl(); 12941 return nullptr; 12942 } 12943 12944 /// Note that an object \p O was modified or used by an expression 12945 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for 12946 /// the object \p O as obtained via the \p UsageMap. 12947 void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) { 12948 // Get the old usage for the given object and usage kind. 12949 Usage &U = UI.Uses[UK]; 12950 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) { 12951 // If we have a modification as side effect and are in a sequenced 12952 // subexpression, save the old Usage so that we can restore it later 12953 // in SequencedSubexpression::~SequencedSubexpression. 12954 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 12955 ModAsSideEffect->push_back(std::make_pair(O, U)); 12956 // Then record the new usage with the current sequencing region. 12957 U.UsageExpr = UsageExpr; 12958 U.Seq = Region; 12959 } 12960 } 12961 12962 /// Check whether a modification or use of an object \p O in an expression 12963 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is 12964 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap. 12965 /// \p IsModMod is true when we are checking for a mod-mod unsequenced 12966 /// usage and false we are checking for a mod-use unsequenced usage. 12967 void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, 12968 UsageKind OtherKind, bool IsModMod) { 12969 if (UI.Diagnosed) 12970 return; 12971 12972 const Usage &U = UI.Uses[OtherKind]; 12973 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) 12974 return; 12975 12976 const Expr *Mod = U.UsageExpr; 12977 const Expr *ModOrUse = UsageExpr; 12978 if (OtherKind == UK_Use) 12979 std::swap(Mod, ModOrUse); 12980 12981 SemaRef.DiagRuntimeBehavior( 12982 Mod->getExprLoc(), {Mod, ModOrUse}, 12983 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod 12984 : diag::warn_unsequenced_mod_use) 12985 << O << SourceRange(ModOrUse->getExprLoc())); 12986 UI.Diagnosed = true; 12987 } 12988 12989 // A note on note{Pre, Post}{Use, Mod}: 12990 // 12991 // (It helps to follow the algorithm with an expression such as 12992 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced 12993 // operations before C++17 and both are well-defined in C++17). 12994 // 12995 // When visiting a node which uses/modify an object we first call notePreUse 12996 // or notePreMod before visiting its sub-expression(s). At this point the 12997 // children of the current node have not yet been visited and so the eventual 12998 // uses/modifications resulting from the children of the current node have not 12999 // been recorded yet. 13000 // 13001 // We then visit the children of the current node. After that notePostUse or 13002 // notePostMod is called. These will 1) detect an unsequenced modification 13003 // as side effect (as in "k++ + k") and 2) add a new usage with the 13004 // appropriate usage kind. 13005 // 13006 // We also have to be careful that some operation sequences modification as 13007 // side effect as well (for example: || or ,). To account for this we wrap 13008 // the visitation of such a sub-expression (for example: the LHS of || or ,) 13009 // with SequencedSubexpression. SequencedSubexpression is an RAII object 13010 // which record usages which are modifications as side effect, and then 13011 // downgrade them (or more accurately restore the previous usage which was a 13012 // modification as side effect) when exiting the scope of the sequenced 13013 // subexpression. 13014 13015 void notePreUse(Object O, const Expr *UseExpr) { 13016 UsageInfo &UI = UsageMap[O]; 13017 // Uses conflict with other modifications. 13018 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false); 13019 } 13020 13021 void notePostUse(Object O, const Expr *UseExpr) { 13022 UsageInfo &UI = UsageMap[O]; 13023 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsSideEffect, 13024 /*IsModMod=*/false); 13025 addUsage(O, UI, UseExpr, /*UsageKind=*/UK_Use); 13026 } 13027 13028 void notePreMod(Object O, const Expr *ModExpr) { 13029 UsageInfo &UI = UsageMap[O]; 13030 // Modifications conflict with other modifications and with uses. 13031 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true); 13032 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false); 13033 } 13034 13035 void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) { 13036 UsageInfo &UI = UsageMap[O]; 13037 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsSideEffect, 13038 /*IsModMod=*/true); 13039 addUsage(O, UI, ModExpr, /*UsageKind=*/UK); 13040 } 13041 13042 public: 13043 SequenceChecker(Sema &S, const Expr *E, 13044 SmallVectorImpl<const Expr *> &WorkList) 13045 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 13046 Visit(E); 13047 // Silence a -Wunused-private-field since WorkList is now unused. 13048 // TODO: Evaluate if it can be used, and if not remove it. 13049 (void)this->WorkList; 13050 } 13051 13052 void VisitStmt(const Stmt *S) { 13053 // Skip all statements which aren't expressions for now. 13054 } 13055 13056 void VisitExpr(const Expr *E) { 13057 // By default, just recurse to evaluated subexpressions. 13058 Base::VisitStmt(E); 13059 } 13060 13061 void VisitCastExpr(const CastExpr *E) { 13062 Object O = Object(); 13063 if (E->getCastKind() == CK_LValueToRValue) 13064 O = getObject(E->getSubExpr(), false); 13065 13066 if (O) 13067 notePreUse(O, E); 13068 VisitExpr(E); 13069 if (O) 13070 notePostUse(O, E); 13071 } 13072 13073 void VisitSequencedExpressions(const Expr *SequencedBefore, 13074 const Expr *SequencedAfter) { 13075 SequenceTree::Seq BeforeRegion = Tree.allocate(Region); 13076 SequenceTree::Seq AfterRegion = Tree.allocate(Region); 13077 SequenceTree::Seq OldRegion = Region; 13078 13079 { 13080 SequencedSubexpression SeqBefore(*this); 13081 Region = BeforeRegion; 13082 Visit(SequencedBefore); 13083 } 13084 13085 Region = AfterRegion; 13086 Visit(SequencedAfter); 13087 13088 Region = OldRegion; 13089 13090 Tree.merge(BeforeRegion); 13091 Tree.merge(AfterRegion); 13092 } 13093 13094 void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) { 13095 // C++17 [expr.sub]p1: 13096 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The 13097 // expression E1 is sequenced before the expression E2. 13098 if (SemaRef.getLangOpts().CPlusPlus17) 13099 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS()); 13100 else { 13101 Visit(ASE->getLHS()); 13102 Visit(ASE->getRHS()); 13103 } 13104 } 13105 13106 void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 13107 void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 13108 void VisitBinPtrMem(const BinaryOperator *BO) { 13109 // C++17 [expr.mptr.oper]p4: 13110 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...] 13111 // the expression E1 is sequenced before the expression E2. 13112 if (SemaRef.getLangOpts().CPlusPlus17) 13113 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 13114 else { 13115 Visit(BO->getLHS()); 13116 Visit(BO->getRHS()); 13117 } 13118 } 13119 13120 void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); } 13121 void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); } 13122 void VisitBinShlShr(const BinaryOperator *BO) { 13123 // C++17 [expr.shift]p4: 13124 // The expression E1 is sequenced before the expression E2. 13125 if (SemaRef.getLangOpts().CPlusPlus17) 13126 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 13127 else { 13128 Visit(BO->getLHS()); 13129 Visit(BO->getRHS()); 13130 } 13131 } 13132 13133 void VisitBinComma(const BinaryOperator *BO) { 13134 // C++11 [expr.comma]p1: 13135 // Every value computation and side effect associated with the left 13136 // expression is sequenced before every value computation and side 13137 // effect associated with the right expression. 13138 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 13139 } 13140 13141 void VisitBinAssign(const BinaryOperator *BO) { 13142 SequenceTree::Seq RHSRegion; 13143 SequenceTree::Seq LHSRegion; 13144 if (SemaRef.getLangOpts().CPlusPlus17) { 13145 RHSRegion = Tree.allocate(Region); 13146 LHSRegion = Tree.allocate(Region); 13147 } else { 13148 RHSRegion = Region; 13149 LHSRegion = Region; 13150 } 13151 SequenceTree::Seq OldRegion = Region; 13152 13153 // C++11 [expr.ass]p1: 13154 // [...] the assignment is sequenced after the value computation 13155 // of the right and left operands, [...] 13156 // 13157 // so check it before inspecting the operands and update the 13158 // map afterwards. 13159 Object O = getObject(BO->getLHS(), /*Mod=*/true); 13160 if (O) 13161 notePreMod(O, BO); 13162 13163 if (SemaRef.getLangOpts().CPlusPlus17) { 13164 // C++17 [expr.ass]p1: 13165 // [...] The right operand is sequenced before the left operand. [...] 13166 { 13167 SequencedSubexpression SeqBefore(*this); 13168 Region = RHSRegion; 13169 Visit(BO->getRHS()); 13170 } 13171 13172 Region = LHSRegion; 13173 Visit(BO->getLHS()); 13174 13175 if (O && isa<CompoundAssignOperator>(BO)) 13176 notePostUse(O, BO); 13177 13178 } else { 13179 // C++11 does not specify any sequencing between the LHS and RHS. 13180 Region = LHSRegion; 13181 Visit(BO->getLHS()); 13182 13183 if (O && isa<CompoundAssignOperator>(BO)) 13184 notePostUse(O, BO); 13185 13186 Region = RHSRegion; 13187 Visit(BO->getRHS()); 13188 } 13189 13190 // C++11 [expr.ass]p1: 13191 // the assignment is sequenced [...] before the value computation of the 13192 // assignment expression. 13193 // C11 6.5.16/3 has no such rule. 13194 Region = OldRegion; 13195 if (O) 13196 notePostMod(O, BO, 13197 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 13198 : UK_ModAsSideEffect); 13199 if (SemaRef.getLangOpts().CPlusPlus17) { 13200 Tree.merge(RHSRegion); 13201 Tree.merge(LHSRegion); 13202 } 13203 } 13204 13205 void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) { 13206 VisitBinAssign(CAO); 13207 } 13208 13209 void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 13210 void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 13211 void VisitUnaryPreIncDec(const UnaryOperator *UO) { 13212 Object O = getObject(UO->getSubExpr(), true); 13213 if (!O) 13214 return VisitExpr(UO); 13215 13216 notePreMod(O, UO); 13217 Visit(UO->getSubExpr()); 13218 // C++11 [expr.pre.incr]p1: 13219 // the expression ++x is equivalent to x+=1 13220 notePostMod(O, UO, 13221 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 13222 : UK_ModAsSideEffect); 13223 } 13224 13225 void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 13226 void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 13227 void VisitUnaryPostIncDec(const UnaryOperator *UO) { 13228 Object O = getObject(UO->getSubExpr(), true); 13229 if (!O) 13230 return VisitExpr(UO); 13231 13232 notePreMod(O, UO); 13233 Visit(UO->getSubExpr()); 13234 notePostMod(O, UO, UK_ModAsSideEffect); 13235 } 13236 13237 void VisitBinLOr(const BinaryOperator *BO) { 13238 // C++11 [expr.log.or]p2: 13239 // If the second expression is evaluated, every value computation and 13240 // side effect associated with the first expression is sequenced before 13241 // every value computation and side effect associated with the 13242 // second expression. 13243 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 13244 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 13245 SequenceTree::Seq OldRegion = Region; 13246 13247 EvaluationTracker Eval(*this); 13248 { 13249 SequencedSubexpression Sequenced(*this); 13250 Region = LHSRegion; 13251 Visit(BO->getLHS()); 13252 } 13253 13254 // C++11 [expr.log.or]p1: 13255 // [...] the second operand is not evaluated if the first operand 13256 // evaluates to true. 13257 bool EvalResult = false; 13258 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 13259 bool ShouldVisitRHS = !EvalOK || (EvalOK && !EvalResult); 13260 if (ShouldVisitRHS) { 13261 Region = RHSRegion; 13262 Visit(BO->getRHS()); 13263 } 13264 13265 Region = OldRegion; 13266 Tree.merge(LHSRegion); 13267 Tree.merge(RHSRegion); 13268 } 13269 13270 void VisitBinLAnd(const BinaryOperator *BO) { 13271 // C++11 [expr.log.and]p2: 13272 // If the second expression is evaluated, every value computation and 13273 // side effect associated with the first expression is sequenced before 13274 // every value computation and side effect associated with the 13275 // second expression. 13276 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 13277 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 13278 SequenceTree::Seq OldRegion = Region; 13279 13280 EvaluationTracker Eval(*this); 13281 { 13282 SequencedSubexpression Sequenced(*this); 13283 Region = LHSRegion; 13284 Visit(BO->getLHS()); 13285 } 13286 13287 // C++11 [expr.log.and]p1: 13288 // [...] the second operand is not evaluated if the first operand is false. 13289 bool EvalResult = false; 13290 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 13291 bool ShouldVisitRHS = !EvalOK || (EvalOK && EvalResult); 13292 if (ShouldVisitRHS) { 13293 Region = RHSRegion; 13294 Visit(BO->getRHS()); 13295 } 13296 13297 Region = OldRegion; 13298 Tree.merge(LHSRegion); 13299 Tree.merge(RHSRegion); 13300 } 13301 13302 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) { 13303 // C++11 [expr.cond]p1: 13304 // [...] Every value computation and side effect associated with the first 13305 // expression is sequenced before every value computation and side effect 13306 // associated with the second or third expression. 13307 SequenceTree::Seq ConditionRegion = Tree.allocate(Region); 13308 13309 // No sequencing is specified between the true and false expression. 13310 // However since exactly one of both is going to be evaluated we can 13311 // consider them to be sequenced. This is needed to avoid warning on 13312 // something like "x ? y+= 1 : y += 2;" in the case where we will visit 13313 // both the true and false expressions because we can't evaluate x. 13314 // This will still allow us to detect an expression like (pre C++17) 13315 // "(x ? y += 1 : y += 2) = y". 13316 // 13317 // We don't wrap the visitation of the true and false expression with 13318 // SequencedSubexpression because we don't want to downgrade modifications 13319 // as side effect in the true and false expressions after the visition 13320 // is done. (for example in the expression "(x ? y++ : y++) + y" we should 13321 // not warn between the two "y++", but we should warn between the "y++" 13322 // and the "y". 13323 SequenceTree::Seq TrueRegion = Tree.allocate(Region); 13324 SequenceTree::Seq FalseRegion = Tree.allocate(Region); 13325 SequenceTree::Seq OldRegion = Region; 13326 13327 EvaluationTracker Eval(*this); 13328 { 13329 SequencedSubexpression Sequenced(*this); 13330 Region = ConditionRegion; 13331 Visit(CO->getCond()); 13332 } 13333 13334 // C++11 [expr.cond]p1: 13335 // [...] The first expression is contextually converted to bool (Clause 4). 13336 // It is evaluated and if it is true, the result of the conditional 13337 // expression is the value of the second expression, otherwise that of the 13338 // third expression. Only one of the second and third expressions is 13339 // evaluated. [...] 13340 bool EvalResult = false; 13341 bool EvalOK = Eval.evaluate(CO->getCond(), EvalResult); 13342 bool ShouldVisitTrueExpr = !EvalOK || (EvalOK && EvalResult); 13343 bool ShouldVisitFalseExpr = !EvalOK || (EvalOK && !EvalResult); 13344 if (ShouldVisitTrueExpr) { 13345 Region = TrueRegion; 13346 Visit(CO->getTrueExpr()); 13347 } 13348 if (ShouldVisitFalseExpr) { 13349 Region = FalseRegion; 13350 Visit(CO->getFalseExpr()); 13351 } 13352 13353 Region = OldRegion; 13354 Tree.merge(ConditionRegion); 13355 Tree.merge(TrueRegion); 13356 Tree.merge(FalseRegion); 13357 } 13358 13359 void VisitCallExpr(const CallExpr *CE) { 13360 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 13361 13362 if (CE->isUnevaluatedBuiltinCall(Context)) 13363 return; 13364 13365 // C++11 [intro.execution]p15: 13366 // When calling a function [...], every value computation and side effect 13367 // associated with any argument expression, or with the postfix expression 13368 // designating the called function, is sequenced before execution of every 13369 // expression or statement in the body of the function [and thus before 13370 // the value computation of its result]. 13371 SequencedSubexpression Sequenced(*this); 13372 SemaRef.runWithSufficientStackSpace(CE->getExprLoc(), [&] { 13373 // C++17 [expr.call]p5 13374 // The postfix-expression is sequenced before each expression in the 13375 // expression-list and any default argument. [...] 13376 SequenceTree::Seq CalleeRegion; 13377 SequenceTree::Seq OtherRegion; 13378 if (SemaRef.getLangOpts().CPlusPlus17) { 13379 CalleeRegion = Tree.allocate(Region); 13380 OtherRegion = Tree.allocate(Region); 13381 } else { 13382 CalleeRegion = Region; 13383 OtherRegion = Region; 13384 } 13385 SequenceTree::Seq OldRegion = Region; 13386 13387 // Visit the callee expression first. 13388 Region = CalleeRegion; 13389 if (SemaRef.getLangOpts().CPlusPlus17) { 13390 SequencedSubexpression Sequenced(*this); 13391 Visit(CE->getCallee()); 13392 } else { 13393 Visit(CE->getCallee()); 13394 } 13395 13396 // Then visit the argument expressions. 13397 Region = OtherRegion; 13398 for (const Expr *Argument : CE->arguments()) 13399 Visit(Argument); 13400 13401 Region = OldRegion; 13402 if (SemaRef.getLangOpts().CPlusPlus17) { 13403 Tree.merge(CalleeRegion); 13404 Tree.merge(OtherRegion); 13405 } 13406 }); 13407 } 13408 13409 void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) { 13410 // C++17 [over.match.oper]p2: 13411 // [...] the operator notation is first transformed to the equivalent 13412 // function-call notation as summarized in Table 12 (where @ denotes one 13413 // of the operators covered in the specified subclause). However, the 13414 // operands are sequenced in the order prescribed for the built-in 13415 // operator (Clause 8). 13416 // 13417 // From the above only overloaded binary operators and overloaded call 13418 // operators have sequencing rules in C++17 that we need to handle 13419 // separately. 13420 if (!SemaRef.getLangOpts().CPlusPlus17 || 13421 (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call)) 13422 return VisitCallExpr(CXXOCE); 13423 13424 enum { 13425 NoSequencing, 13426 LHSBeforeRHS, 13427 RHSBeforeLHS, 13428 LHSBeforeRest 13429 } SequencingKind; 13430 switch (CXXOCE->getOperator()) { 13431 case OO_Equal: 13432 case OO_PlusEqual: 13433 case OO_MinusEqual: 13434 case OO_StarEqual: 13435 case OO_SlashEqual: 13436 case OO_PercentEqual: 13437 case OO_CaretEqual: 13438 case OO_AmpEqual: 13439 case OO_PipeEqual: 13440 case OO_LessLessEqual: 13441 case OO_GreaterGreaterEqual: 13442 SequencingKind = RHSBeforeLHS; 13443 break; 13444 13445 case OO_LessLess: 13446 case OO_GreaterGreater: 13447 case OO_AmpAmp: 13448 case OO_PipePipe: 13449 case OO_Comma: 13450 case OO_ArrowStar: 13451 case OO_Subscript: 13452 SequencingKind = LHSBeforeRHS; 13453 break; 13454 13455 case OO_Call: 13456 SequencingKind = LHSBeforeRest; 13457 break; 13458 13459 default: 13460 SequencingKind = NoSequencing; 13461 break; 13462 } 13463 13464 if (SequencingKind == NoSequencing) 13465 return VisitCallExpr(CXXOCE); 13466 13467 // This is a call, so all subexpressions are sequenced before the result. 13468 SequencedSubexpression Sequenced(*this); 13469 13470 SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] { 13471 assert(SemaRef.getLangOpts().CPlusPlus17 && 13472 "Should only get there with C++17 and above!"); 13473 assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) && 13474 "Should only get there with an overloaded binary operator" 13475 " or an overloaded call operator!"); 13476 13477 if (SequencingKind == LHSBeforeRest) { 13478 assert(CXXOCE->getOperator() == OO_Call && 13479 "We should only have an overloaded call operator here!"); 13480 13481 // This is very similar to VisitCallExpr, except that we only have the 13482 // C++17 case. The postfix-expression is the first argument of the 13483 // CXXOperatorCallExpr. The expressions in the expression-list, if any, 13484 // are in the following arguments. 13485 // 13486 // Note that we intentionally do not visit the callee expression since 13487 // it is just a decayed reference to a function. 13488 SequenceTree::Seq PostfixExprRegion = Tree.allocate(Region); 13489 SequenceTree::Seq ArgsRegion = Tree.allocate(Region); 13490 SequenceTree::Seq OldRegion = Region; 13491 13492 assert(CXXOCE->getNumArgs() >= 1 && 13493 "An overloaded call operator must have at least one argument" 13494 " for the postfix-expression!"); 13495 const Expr *PostfixExpr = CXXOCE->getArgs()[0]; 13496 llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1, 13497 CXXOCE->getNumArgs() - 1); 13498 13499 // Visit the postfix-expression first. 13500 { 13501 Region = PostfixExprRegion; 13502 SequencedSubexpression Sequenced(*this); 13503 Visit(PostfixExpr); 13504 } 13505 13506 // Then visit the argument expressions. 13507 Region = ArgsRegion; 13508 for (const Expr *Arg : Args) 13509 Visit(Arg); 13510 13511 Region = OldRegion; 13512 Tree.merge(PostfixExprRegion); 13513 Tree.merge(ArgsRegion); 13514 } else { 13515 assert(CXXOCE->getNumArgs() == 2 && 13516 "Should only have two arguments here!"); 13517 assert((SequencingKind == LHSBeforeRHS || 13518 SequencingKind == RHSBeforeLHS) && 13519 "Unexpected sequencing kind!"); 13520 13521 // We do not visit the callee expression since it is just a decayed 13522 // reference to a function. 13523 const Expr *E1 = CXXOCE->getArg(0); 13524 const Expr *E2 = CXXOCE->getArg(1); 13525 if (SequencingKind == RHSBeforeLHS) 13526 std::swap(E1, E2); 13527 13528 return VisitSequencedExpressions(E1, E2); 13529 } 13530 }); 13531 } 13532 13533 void VisitCXXConstructExpr(const CXXConstructExpr *CCE) { 13534 // This is a call, so all subexpressions are sequenced before the result. 13535 SequencedSubexpression Sequenced(*this); 13536 13537 if (!CCE->isListInitialization()) 13538 return VisitExpr(CCE); 13539 13540 // In C++11, list initializations are sequenced. 13541 SmallVector<SequenceTree::Seq, 32> Elts; 13542 SequenceTree::Seq Parent = Region; 13543 for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(), 13544 E = CCE->arg_end(); 13545 I != E; ++I) { 13546 Region = Tree.allocate(Parent); 13547 Elts.push_back(Region); 13548 Visit(*I); 13549 } 13550 13551 // Forget that the initializers are sequenced. 13552 Region = Parent; 13553 for (unsigned I = 0; I < Elts.size(); ++I) 13554 Tree.merge(Elts[I]); 13555 } 13556 13557 void VisitInitListExpr(const InitListExpr *ILE) { 13558 if (!SemaRef.getLangOpts().CPlusPlus11) 13559 return VisitExpr(ILE); 13560 13561 // In C++11, list initializations are sequenced. 13562 SmallVector<SequenceTree::Seq, 32> Elts; 13563 SequenceTree::Seq Parent = Region; 13564 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 13565 const Expr *E = ILE->getInit(I); 13566 if (!E) 13567 continue; 13568 Region = Tree.allocate(Parent); 13569 Elts.push_back(Region); 13570 Visit(E); 13571 } 13572 13573 // Forget that the initializers are sequenced. 13574 Region = Parent; 13575 for (unsigned I = 0; I < Elts.size(); ++I) 13576 Tree.merge(Elts[I]); 13577 } 13578 }; 13579 13580 } // namespace 13581 13582 void Sema::CheckUnsequencedOperations(const Expr *E) { 13583 SmallVector<const Expr *, 8> WorkList; 13584 WorkList.push_back(E); 13585 while (!WorkList.empty()) { 13586 const Expr *Item = WorkList.pop_back_val(); 13587 SequenceChecker(*this, Item, WorkList); 13588 } 13589 } 13590 13591 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 13592 bool IsConstexpr) { 13593 llvm::SaveAndRestore<bool> ConstantContext( 13594 isConstantEvaluatedOverride, IsConstexpr || isa<ConstantExpr>(E)); 13595 CheckImplicitConversions(E, CheckLoc); 13596 if (!E->isInstantiationDependent()) 13597 CheckUnsequencedOperations(E); 13598 if (!IsConstexpr && !E->isValueDependent()) 13599 CheckForIntOverflow(E); 13600 DiagnoseMisalignedMembers(); 13601 } 13602 13603 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 13604 FieldDecl *BitField, 13605 Expr *Init) { 13606 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 13607 } 13608 13609 static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 13610 SourceLocation Loc) { 13611 if (!PType->isVariablyModifiedType()) 13612 return; 13613 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 13614 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 13615 return; 13616 } 13617 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 13618 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 13619 return; 13620 } 13621 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 13622 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 13623 return; 13624 } 13625 13626 const ArrayType *AT = S.Context.getAsArrayType(PType); 13627 if (!AT) 13628 return; 13629 13630 if (AT->getSizeModifier() != ArrayType::Star) { 13631 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 13632 return; 13633 } 13634 13635 S.Diag(Loc, diag::err_array_star_in_function_definition); 13636 } 13637 13638 /// CheckParmsForFunctionDef - Check that the parameters of the given 13639 /// function are appropriate for the definition of a function. This 13640 /// takes care of any checks that cannot be performed on the 13641 /// declaration itself, e.g., that the types of each of the function 13642 /// parameters are complete. 13643 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 13644 bool CheckParameterNames) { 13645 bool HasInvalidParm = false; 13646 for (ParmVarDecl *Param : Parameters) { 13647 // C99 6.7.5.3p4: the parameters in a parameter type list in a 13648 // function declarator that is part of a function definition of 13649 // that function shall not have incomplete type. 13650 // 13651 // This is also C++ [dcl.fct]p6. 13652 if (!Param->isInvalidDecl() && 13653 RequireCompleteType(Param->getLocation(), Param->getType(), 13654 diag::err_typecheck_decl_incomplete_type)) { 13655 Param->setInvalidDecl(); 13656 HasInvalidParm = true; 13657 } 13658 13659 // C99 6.9.1p5: If the declarator includes a parameter type list, the 13660 // declaration of each parameter shall include an identifier. 13661 if (CheckParameterNames && Param->getIdentifier() == nullptr && 13662 !Param->isImplicit() && !getLangOpts().CPlusPlus) { 13663 // Diagnose this as an extension in C17 and earlier. 13664 if (!getLangOpts().C2x) 13665 Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x); 13666 } 13667 13668 // C99 6.7.5.3p12: 13669 // If the function declarator is not part of a definition of that 13670 // function, parameters may have incomplete type and may use the [*] 13671 // notation in their sequences of declarator specifiers to specify 13672 // variable length array types. 13673 QualType PType = Param->getOriginalType(); 13674 // FIXME: This diagnostic should point the '[*]' if source-location 13675 // information is added for it. 13676 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 13677 13678 // If the parameter is a c++ class type and it has to be destructed in the 13679 // callee function, declare the destructor so that it can be called by the 13680 // callee function. Do not perform any direct access check on the dtor here. 13681 if (!Param->isInvalidDecl()) { 13682 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 13683 if (!ClassDecl->isInvalidDecl() && 13684 !ClassDecl->hasIrrelevantDestructor() && 13685 !ClassDecl->isDependentContext() && 13686 ClassDecl->isParamDestroyedInCallee()) { 13687 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 13688 MarkFunctionReferenced(Param->getLocation(), Destructor); 13689 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 13690 } 13691 } 13692 } 13693 13694 // Parameters with the pass_object_size attribute only need to be marked 13695 // constant at function definitions. Because we lack information about 13696 // whether we're on a declaration or definition when we're instantiating the 13697 // attribute, we need to check for constness here. 13698 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 13699 if (!Param->getType().isConstQualified()) 13700 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 13701 << Attr->getSpelling() << 1; 13702 13703 // Check for parameter names shadowing fields from the class. 13704 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) { 13705 // The owning context for the parameter should be the function, but we 13706 // want to see if this function's declaration context is a record. 13707 DeclContext *DC = Param->getDeclContext(); 13708 if (DC && DC->isFunctionOrMethod()) { 13709 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent())) 13710 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(), 13711 RD, /*DeclIsField*/ false); 13712 } 13713 } 13714 } 13715 13716 return HasInvalidParm; 13717 } 13718 13719 Optional<std::pair<CharUnits, CharUnits>> 13720 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx); 13721 13722 /// Compute the alignment and offset of the base class object given the 13723 /// derived-to-base cast expression and the alignment and offset of the derived 13724 /// class object. 13725 static std::pair<CharUnits, CharUnits> 13726 getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType, 13727 CharUnits BaseAlignment, CharUnits Offset, 13728 ASTContext &Ctx) { 13729 for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE; 13730 ++PathI) { 13731 const CXXBaseSpecifier *Base = *PathI; 13732 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl(); 13733 if (Base->isVirtual()) { 13734 // The complete object may have a lower alignment than the non-virtual 13735 // alignment of the base, in which case the base may be misaligned. Choose 13736 // the smaller of the non-virtual alignment and BaseAlignment, which is a 13737 // conservative lower bound of the complete object alignment. 13738 CharUnits NonVirtualAlignment = 13739 Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment(); 13740 BaseAlignment = std::min(BaseAlignment, NonVirtualAlignment); 13741 Offset = CharUnits::Zero(); 13742 } else { 13743 const ASTRecordLayout &RL = 13744 Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl()); 13745 Offset += RL.getBaseClassOffset(BaseDecl); 13746 } 13747 DerivedType = Base->getType(); 13748 } 13749 13750 return std::make_pair(BaseAlignment, Offset); 13751 } 13752 13753 /// Compute the alignment and offset of a binary additive operator. 13754 static Optional<std::pair<CharUnits, CharUnits>> 13755 getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE, 13756 bool IsSub, ASTContext &Ctx) { 13757 QualType PointeeType = PtrE->getType()->getPointeeType(); 13758 13759 if (!PointeeType->isConstantSizeType()) 13760 return llvm::None; 13761 13762 auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx); 13763 13764 if (!P) 13765 return llvm::None; 13766 13767 CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType); 13768 if (Optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) { 13769 CharUnits Offset = EltSize * IdxRes->getExtValue(); 13770 if (IsSub) 13771 Offset = -Offset; 13772 return std::make_pair(P->first, P->second + Offset); 13773 } 13774 13775 // If the integer expression isn't a constant expression, compute the lower 13776 // bound of the alignment using the alignment and offset of the pointer 13777 // expression and the element size. 13778 return std::make_pair( 13779 P->first.alignmentAtOffset(P->second).alignmentAtOffset(EltSize), 13780 CharUnits::Zero()); 13781 } 13782 13783 /// This helper function takes an lvalue expression and returns the alignment of 13784 /// a VarDecl and a constant offset from the VarDecl. 13785 Optional<std::pair<CharUnits, CharUnits>> 13786 static getBaseAlignmentAndOffsetFromLValue(const Expr *E, ASTContext &Ctx) { 13787 E = E->IgnoreParens(); 13788 switch (E->getStmtClass()) { 13789 default: 13790 break; 13791 case Stmt::CStyleCastExprClass: 13792 case Stmt::CXXStaticCastExprClass: 13793 case Stmt::ImplicitCastExprClass: { 13794 auto *CE = cast<CastExpr>(E); 13795 const Expr *From = CE->getSubExpr(); 13796 switch (CE->getCastKind()) { 13797 default: 13798 break; 13799 case CK_NoOp: 13800 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 13801 case CK_UncheckedDerivedToBase: 13802 case CK_DerivedToBase: { 13803 auto P = getBaseAlignmentAndOffsetFromLValue(From, Ctx); 13804 if (!P) 13805 break; 13806 return getDerivedToBaseAlignmentAndOffset(CE, From->getType(), P->first, 13807 P->second, Ctx); 13808 } 13809 } 13810 break; 13811 } 13812 case Stmt::ArraySubscriptExprClass: { 13813 auto *ASE = cast<ArraySubscriptExpr>(E); 13814 return getAlignmentAndOffsetFromBinAddOrSub(ASE->getBase(), ASE->getIdx(), 13815 false, Ctx); 13816 } 13817 case Stmt::DeclRefExprClass: { 13818 if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) { 13819 // FIXME: If VD is captured by copy or is an escaping __block variable, 13820 // use the alignment of VD's type. 13821 if (!VD->getType()->isReferenceType()) 13822 return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero()); 13823 if (VD->hasInit()) 13824 return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx); 13825 } 13826 break; 13827 } 13828 case Stmt::MemberExprClass: { 13829 auto *ME = cast<MemberExpr>(E); 13830 auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); 13831 if (!FD || FD->getType()->isReferenceType()) 13832 break; 13833 Optional<std::pair<CharUnits, CharUnits>> P; 13834 if (ME->isArrow()) 13835 P = getBaseAlignmentAndOffsetFromPtr(ME->getBase(), Ctx); 13836 else 13837 P = getBaseAlignmentAndOffsetFromLValue(ME->getBase(), Ctx); 13838 if (!P) 13839 break; 13840 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent()); 13841 uint64_t Offset = Layout.getFieldOffset(FD->getFieldIndex()); 13842 return std::make_pair(P->first, 13843 P->second + CharUnits::fromQuantity(Offset)); 13844 } 13845 case Stmt::UnaryOperatorClass: { 13846 auto *UO = cast<UnaryOperator>(E); 13847 switch (UO->getOpcode()) { 13848 default: 13849 break; 13850 case UO_Deref: 13851 return getBaseAlignmentAndOffsetFromPtr(UO->getSubExpr(), Ctx); 13852 } 13853 break; 13854 } 13855 case Stmt::BinaryOperatorClass: { 13856 auto *BO = cast<BinaryOperator>(E); 13857 auto Opcode = BO->getOpcode(); 13858 switch (Opcode) { 13859 default: 13860 break; 13861 case BO_Comma: 13862 return getBaseAlignmentAndOffsetFromLValue(BO->getRHS(), Ctx); 13863 } 13864 break; 13865 } 13866 } 13867 return llvm::None; 13868 } 13869 13870 /// This helper function takes a pointer expression and returns the alignment of 13871 /// a VarDecl and a constant offset from the VarDecl. 13872 Optional<std::pair<CharUnits, CharUnits>> 13873 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx) { 13874 E = E->IgnoreParens(); 13875 switch (E->getStmtClass()) { 13876 default: 13877 break; 13878 case Stmt::CStyleCastExprClass: 13879 case Stmt::CXXStaticCastExprClass: 13880 case Stmt::ImplicitCastExprClass: { 13881 auto *CE = cast<CastExpr>(E); 13882 const Expr *From = CE->getSubExpr(); 13883 switch (CE->getCastKind()) { 13884 default: 13885 break; 13886 case CK_NoOp: 13887 return getBaseAlignmentAndOffsetFromPtr(From, Ctx); 13888 case CK_ArrayToPointerDecay: 13889 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 13890 case CK_UncheckedDerivedToBase: 13891 case CK_DerivedToBase: { 13892 auto P = getBaseAlignmentAndOffsetFromPtr(From, Ctx); 13893 if (!P) 13894 break; 13895 return getDerivedToBaseAlignmentAndOffset( 13896 CE, From->getType()->getPointeeType(), P->first, P->second, Ctx); 13897 } 13898 } 13899 break; 13900 } 13901 case Stmt::CXXThisExprClass: { 13902 auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl(); 13903 CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment(); 13904 return std::make_pair(Alignment, CharUnits::Zero()); 13905 } 13906 case Stmt::UnaryOperatorClass: { 13907 auto *UO = cast<UnaryOperator>(E); 13908 if (UO->getOpcode() == UO_AddrOf) 13909 return getBaseAlignmentAndOffsetFromLValue(UO->getSubExpr(), Ctx); 13910 break; 13911 } 13912 case Stmt::BinaryOperatorClass: { 13913 auto *BO = cast<BinaryOperator>(E); 13914 auto Opcode = BO->getOpcode(); 13915 switch (Opcode) { 13916 default: 13917 break; 13918 case BO_Add: 13919 case BO_Sub: { 13920 const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS(); 13921 if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType()) 13922 std::swap(LHS, RHS); 13923 return getAlignmentAndOffsetFromBinAddOrSub(LHS, RHS, Opcode == BO_Sub, 13924 Ctx); 13925 } 13926 case BO_Comma: 13927 return getBaseAlignmentAndOffsetFromPtr(BO->getRHS(), Ctx); 13928 } 13929 break; 13930 } 13931 } 13932 return llvm::None; 13933 } 13934 13935 static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) { 13936 // See if we can compute the alignment of a VarDecl and an offset from it. 13937 Optional<std::pair<CharUnits, CharUnits>> P = 13938 getBaseAlignmentAndOffsetFromPtr(E, S.Context); 13939 13940 if (P) 13941 return P->first.alignmentAtOffset(P->second); 13942 13943 // If that failed, return the type's alignment. 13944 return S.Context.getTypeAlignInChars(E->getType()->getPointeeType()); 13945 } 13946 13947 /// CheckCastAlign - Implements -Wcast-align, which warns when a 13948 /// pointer cast increases the alignment requirements. 13949 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 13950 // This is actually a lot of work to potentially be doing on every 13951 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 13952 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 13953 return; 13954 13955 // Ignore dependent types. 13956 if (T->isDependentType() || Op->getType()->isDependentType()) 13957 return; 13958 13959 // Require that the destination be a pointer type. 13960 const PointerType *DestPtr = T->getAs<PointerType>(); 13961 if (!DestPtr) return; 13962 13963 // If the destination has alignment 1, we're done. 13964 QualType DestPointee = DestPtr->getPointeeType(); 13965 if (DestPointee->isIncompleteType()) return; 13966 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 13967 if (DestAlign.isOne()) return; 13968 13969 // Require that the source be a pointer type. 13970 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 13971 if (!SrcPtr) return; 13972 QualType SrcPointee = SrcPtr->getPointeeType(); 13973 13974 // Explicitly allow casts from cv void*. We already implicitly 13975 // allowed casts to cv void*, since they have alignment 1. 13976 // Also allow casts involving incomplete types, which implicitly 13977 // includes 'void'. 13978 if (SrcPointee->isIncompleteType()) return; 13979 13980 CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this); 13981 13982 if (SrcAlign >= DestAlign) return; 13983 13984 Diag(TRange.getBegin(), diag::warn_cast_align) 13985 << Op->getType() << T 13986 << static_cast<unsigned>(SrcAlign.getQuantity()) 13987 << static_cast<unsigned>(DestAlign.getQuantity()) 13988 << TRange << Op->getSourceRange(); 13989 } 13990 13991 /// Check whether this array fits the idiom of a size-one tail padded 13992 /// array member of a struct. 13993 /// 13994 /// We avoid emitting out-of-bounds access warnings for such arrays as they are 13995 /// commonly used to emulate flexible arrays in C89 code. 13996 static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size, 13997 const NamedDecl *ND) { 13998 if (Size != 1 || !ND) return false; 13999 14000 const FieldDecl *FD = dyn_cast<FieldDecl>(ND); 14001 if (!FD) return false; 14002 14003 // Don't consider sizes resulting from macro expansions or template argument 14004 // substitution to form C89 tail-padded arrays. 14005 14006 TypeSourceInfo *TInfo = FD->getTypeSourceInfo(); 14007 while (TInfo) { 14008 TypeLoc TL = TInfo->getTypeLoc(); 14009 // Look through typedefs. 14010 if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) { 14011 const TypedefNameDecl *TDL = TTL.getTypedefNameDecl(); 14012 TInfo = TDL->getTypeSourceInfo(); 14013 continue; 14014 } 14015 if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) { 14016 const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr()); 14017 if (!SizeExpr || SizeExpr->getExprLoc().isMacroID()) 14018 return false; 14019 } 14020 break; 14021 } 14022 14023 const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext()); 14024 if (!RD) return false; 14025 if (RD->isUnion()) return false; 14026 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 14027 if (!CRD->isStandardLayout()) return false; 14028 } 14029 14030 // See if this is the last field decl in the record. 14031 const Decl *D = FD; 14032 while ((D = D->getNextDeclInContext())) 14033 if (isa<FieldDecl>(D)) 14034 return false; 14035 return true; 14036 } 14037 14038 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 14039 const ArraySubscriptExpr *ASE, 14040 bool AllowOnePastEnd, bool IndexNegated) { 14041 // Already diagnosed by the constant evaluator. 14042 if (isConstantEvaluated()) 14043 return; 14044 14045 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 14046 if (IndexExpr->isValueDependent()) 14047 return; 14048 14049 const Type *EffectiveType = 14050 BaseExpr->getType()->getPointeeOrArrayElementType(); 14051 BaseExpr = BaseExpr->IgnoreParenCasts(); 14052 const ConstantArrayType *ArrayTy = 14053 Context.getAsConstantArrayType(BaseExpr->getType()); 14054 14055 if (!ArrayTy) 14056 return; 14057 14058 const Type *BaseType = ArrayTy->getElementType().getTypePtr(); 14059 if (EffectiveType->isDependentType() || BaseType->isDependentType()) 14060 return; 14061 14062 Expr::EvalResult Result; 14063 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) 14064 return; 14065 14066 llvm::APSInt index = Result.Val.getInt(); 14067 if (IndexNegated) 14068 index = -index; 14069 14070 const NamedDecl *ND = nullptr; 14071 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 14072 ND = DRE->getDecl(); 14073 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 14074 ND = ME->getMemberDecl(); 14075 14076 if (index.isUnsigned() || !index.isNegative()) { 14077 // It is possible that the type of the base expression after 14078 // IgnoreParenCasts is incomplete, even though the type of the base 14079 // expression before IgnoreParenCasts is complete (see PR39746 for an 14080 // example). In this case we have no information about whether the array 14081 // access exceeds the array bounds. However we can still diagnose an array 14082 // access which precedes the array bounds. 14083 if (BaseType->isIncompleteType()) 14084 return; 14085 14086 llvm::APInt size = ArrayTy->getSize(); 14087 if (!size.isStrictlyPositive()) 14088 return; 14089 14090 if (BaseType != EffectiveType) { 14091 // Make sure we're comparing apples to apples when comparing index to size 14092 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 14093 uint64_t array_typesize = Context.getTypeSize(BaseType); 14094 // Handle ptrarith_typesize being zero, such as when casting to void* 14095 if (!ptrarith_typesize) ptrarith_typesize = 1; 14096 if (ptrarith_typesize != array_typesize) { 14097 // There's a cast to a different size type involved 14098 uint64_t ratio = array_typesize / ptrarith_typesize; 14099 // TODO: Be smarter about handling cases where array_typesize is not a 14100 // multiple of ptrarith_typesize 14101 if (ptrarith_typesize * ratio == array_typesize) 14102 size *= llvm::APInt(size.getBitWidth(), ratio); 14103 } 14104 } 14105 14106 if (size.getBitWidth() > index.getBitWidth()) 14107 index = index.zext(size.getBitWidth()); 14108 else if (size.getBitWidth() < index.getBitWidth()) 14109 size = size.zext(index.getBitWidth()); 14110 14111 // For array subscripting the index must be less than size, but for pointer 14112 // arithmetic also allow the index (offset) to be equal to size since 14113 // computing the next address after the end of the array is legal and 14114 // commonly done e.g. in C++ iterators and range-based for loops. 14115 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 14116 return; 14117 14118 // Also don't warn for arrays of size 1 which are members of some 14119 // structure. These are often used to approximate flexible arrays in C89 14120 // code. 14121 if (IsTailPaddedMemberArray(*this, size, ND)) 14122 return; 14123 14124 // Suppress the warning if the subscript expression (as identified by the 14125 // ']' location) and the index expression are both from macro expansions 14126 // within a system header. 14127 if (ASE) { 14128 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 14129 ASE->getRBracketLoc()); 14130 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 14131 SourceLocation IndexLoc = 14132 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 14133 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 14134 return; 14135 } 14136 } 14137 14138 unsigned DiagID = diag::warn_ptr_arith_exceeds_bounds; 14139 if (ASE) 14140 DiagID = diag::warn_array_index_exceeds_bounds; 14141 14142 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 14143 PDiag(DiagID) << index.toString(10, true) 14144 << size.toString(10, true) 14145 << (unsigned)size.getLimitedValue(~0U) 14146 << IndexExpr->getSourceRange()); 14147 } else { 14148 unsigned DiagID = diag::warn_array_index_precedes_bounds; 14149 if (!ASE) { 14150 DiagID = diag::warn_ptr_arith_precedes_bounds; 14151 if (index.isNegative()) index = -index; 14152 } 14153 14154 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 14155 PDiag(DiagID) << index.toString(10, true) 14156 << IndexExpr->getSourceRange()); 14157 } 14158 14159 if (!ND) { 14160 // Try harder to find a NamedDecl to point at in the note. 14161 while (const ArraySubscriptExpr *ASE = 14162 dyn_cast<ArraySubscriptExpr>(BaseExpr)) 14163 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 14164 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 14165 ND = DRE->getDecl(); 14166 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 14167 ND = ME->getMemberDecl(); 14168 } 14169 14170 if (ND) 14171 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 14172 PDiag(diag::note_array_declared_here) << ND); 14173 } 14174 14175 void Sema::CheckArrayAccess(const Expr *expr) { 14176 int AllowOnePastEnd = 0; 14177 while (expr) { 14178 expr = expr->IgnoreParenImpCasts(); 14179 switch (expr->getStmtClass()) { 14180 case Stmt::ArraySubscriptExprClass: { 14181 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 14182 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 14183 AllowOnePastEnd > 0); 14184 expr = ASE->getBase(); 14185 break; 14186 } 14187 case Stmt::MemberExprClass: { 14188 expr = cast<MemberExpr>(expr)->getBase(); 14189 break; 14190 } 14191 case Stmt::OMPArraySectionExprClass: { 14192 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 14193 if (ASE->getLowerBound()) 14194 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 14195 /*ASE=*/nullptr, AllowOnePastEnd > 0); 14196 return; 14197 } 14198 case Stmt::UnaryOperatorClass: { 14199 // Only unwrap the * and & unary operators 14200 const UnaryOperator *UO = cast<UnaryOperator>(expr); 14201 expr = UO->getSubExpr(); 14202 switch (UO->getOpcode()) { 14203 case UO_AddrOf: 14204 AllowOnePastEnd++; 14205 break; 14206 case UO_Deref: 14207 AllowOnePastEnd--; 14208 break; 14209 default: 14210 return; 14211 } 14212 break; 14213 } 14214 case Stmt::ConditionalOperatorClass: { 14215 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 14216 if (const Expr *lhs = cond->getLHS()) 14217 CheckArrayAccess(lhs); 14218 if (const Expr *rhs = cond->getRHS()) 14219 CheckArrayAccess(rhs); 14220 return; 14221 } 14222 case Stmt::CXXOperatorCallExprClass: { 14223 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 14224 for (const auto *Arg : OCE->arguments()) 14225 CheckArrayAccess(Arg); 14226 return; 14227 } 14228 default: 14229 return; 14230 } 14231 } 14232 } 14233 14234 //===--- CHECK: Objective-C retain cycles ----------------------------------// 14235 14236 namespace { 14237 14238 struct RetainCycleOwner { 14239 VarDecl *Variable = nullptr; 14240 SourceRange Range; 14241 SourceLocation Loc; 14242 bool Indirect = false; 14243 14244 RetainCycleOwner() = default; 14245 14246 void setLocsFrom(Expr *e) { 14247 Loc = e->getExprLoc(); 14248 Range = e->getSourceRange(); 14249 } 14250 }; 14251 14252 } // namespace 14253 14254 /// Consider whether capturing the given variable can possibly lead to 14255 /// a retain cycle. 14256 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 14257 // In ARC, it's captured strongly iff the variable has __strong 14258 // lifetime. In MRR, it's captured strongly if the variable is 14259 // __block and has an appropriate type. 14260 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 14261 return false; 14262 14263 owner.Variable = var; 14264 if (ref) 14265 owner.setLocsFrom(ref); 14266 return true; 14267 } 14268 14269 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 14270 while (true) { 14271 e = e->IgnoreParens(); 14272 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 14273 switch (cast->getCastKind()) { 14274 case CK_BitCast: 14275 case CK_LValueBitCast: 14276 case CK_LValueToRValue: 14277 case CK_ARCReclaimReturnedObject: 14278 e = cast->getSubExpr(); 14279 continue; 14280 14281 default: 14282 return false; 14283 } 14284 } 14285 14286 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 14287 ObjCIvarDecl *ivar = ref->getDecl(); 14288 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 14289 return false; 14290 14291 // Try to find a retain cycle in the base. 14292 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 14293 return false; 14294 14295 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 14296 owner.Indirect = true; 14297 return true; 14298 } 14299 14300 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 14301 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 14302 if (!var) return false; 14303 return considerVariable(var, ref, owner); 14304 } 14305 14306 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 14307 if (member->isArrow()) return false; 14308 14309 // Don't count this as an indirect ownership. 14310 e = member->getBase(); 14311 continue; 14312 } 14313 14314 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 14315 // Only pay attention to pseudo-objects on property references. 14316 ObjCPropertyRefExpr *pre 14317 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 14318 ->IgnoreParens()); 14319 if (!pre) return false; 14320 if (pre->isImplicitProperty()) return false; 14321 ObjCPropertyDecl *property = pre->getExplicitProperty(); 14322 if (!property->isRetaining() && 14323 !(property->getPropertyIvarDecl() && 14324 property->getPropertyIvarDecl()->getType() 14325 .getObjCLifetime() == Qualifiers::OCL_Strong)) 14326 return false; 14327 14328 owner.Indirect = true; 14329 if (pre->isSuperReceiver()) { 14330 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 14331 if (!owner.Variable) 14332 return false; 14333 owner.Loc = pre->getLocation(); 14334 owner.Range = pre->getSourceRange(); 14335 return true; 14336 } 14337 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 14338 ->getSourceExpr()); 14339 continue; 14340 } 14341 14342 // Array ivars? 14343 14344 return false; 14345 } 14346 } 14347 14348 namespace { 14349 14350 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 14351 ASTContext &Context; 14352 VarDecl *Variable; 14353 Expr *Capturer = nullptr; 14354 bool VarWillBeReased = false; 14355 14356 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 14357 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 14358 Context(Context), Variable(variable) {} 14359 14360 void VisitDeclRefExpr(DeclRefExpr *ref) { 14361 if (ref->getDecl() == Variable && !Capturer) 14362 Capturer = ref; 14363 } 14364 14365 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 14366 if (Capturer) return; 14367 Visit(ref->getBase()); 14368 if (Capturer && ref->isFreeIvar()) 14369 Capturer = ref; 14370 } 14371 14372 void VisitBlockExpr(BlockExpr *block) { 14373 // Look inside nested blocks 14374 if (block->getBlockDecl()->capturesVariable(Variable)) 14375 Visit(block->getBlockDecl()->getBody()); 14376 } 14377 14378 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 14379 if (Capturer) return; 14380 if (OVE->getSourceExpr()) 14381 Visit(OVE->getSourceExpr()); 14382 } 14383 14384 void VisitBinaryOperator(BinaryOperator *BinOp) { 14385 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 14386 return; 14387 Expr *LHS = BinOp->getLHS(); 14388 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 14389 if (DRE->getDecl() != Variable) 14390 return; 14391 if (Expr *RHS = BinOp->getRHS()) { 14392 RHS = RHS->IgnoreParenCasts(); 14393 Optional<llvm::APSInt> Value; 14394 VarWillBeReased = 14395 (RHS && (Value = RHS->getIntegerConstantExpr(Context)) && 14396 *Value == 0); 14397 } 14398 } 14399 } 14400 }; 14401 14402 } // namespace 14403 14404 /// Check whether the given argument is a block which captures a 14405 /// variable. 14406 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 14407 assert(owner.Variable && owner.Loc.isValid()); 14408 14409 e = e->IgnoreParenCasts(); 14410 14411 // Look through [^{...} copy] and Block_copy(^{...}). 14412 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 14413 Selector Cmd = ME->getSelector(); 14414 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 14415 e = ME->getInstanceReceiver(); 14416 if (!e) 14417 return nullptr; 14418 e = e->IgnoreParenCasts(); 14419 } 14420 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 14421 if (CE->getNumArgs() == 1) { 14422 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 14423 if (Fn) { 14424 const IdentifierInfo *FnI = Fn->getIdentifier(); 14425 if (FnI && FnI->isStr("_Block_copy")) { 14426 e = CE->getArg(0)->IgnoreParenCasts(); 14427 } 14428 } 14429 } 14430 } 14431 14432 BlockExpr *block = dyn_cast<BlockExpr>(e); 14433 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 14434 return nullptr; 14435 14436 FindCaptureVisitor visitor(S.Context, owner.Variable); 14437 visitor.Visit(block->getBlockDecl()->getBody()); 14438 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 14439 } 14440 14441 static void diagnoseRetainCycle(Sema &S, Expr *capturer, 14442 RetainCycleOwner &owner) { 14443 assert(capturer); 14444 assert(owner.Variable && owner.Loc.isValid()); 14445 14446 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 14447 << owner.Variable << capturer->getSourceRange(); 14448 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 14449 << owner.Indirect << owner.Range; 14450 } 14451 14452 /// Check for a keyword selector that starts with the word 'add' or 14453 /// 'set'. 14454 static bool isSetterLikeSelector(Selector sel) { 14455 if (sel.isUnarySelector()) return false; 14456 14457 StringRef str = sel.getNameForSlot(0); 14458 while (!str.empty() && str.front() == '_') str = str.substr(1); 14459 if (str.startswith("set")) 14460 str = str.substr(3); 14461 else if (str.startswith("add")) { 14462 // Specially allow 'addOperationWithBlock:'. 14463 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) 14464 return false; 14465 str = str.substr(3); 14466 } 14467 else 14468 return false; 14469 14470 if (str.empty()) return true; 14471 return !isLowercase(str.front()); 14472 } 14473 14474 static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S, 14475 ObjCMessageExpr *Message) { 14476 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 14477 Message->getReceiverInterface(), 14478 NSAPI::ClassId_NSMutableArray); 14479 if (!IsMutableArray) { 14480 return None; 14481 } 14482 14483 Selector Sel = Message->getSelector(); 14484 14485 Optional<NSAPI::NSArrayMethodKind> MKOpt = 14486 S.NSAPIObj->getNSArrayMethodKind(Sel); 14487 if (!MKOpt) { 14488 return None; 14489 } 14490 14491 NSAPI::NSArrayMethodKind MK = *MKOpt; 14492 14493 switch (MK) { 14494 case NSAPI::NSMutableArr_addObject: 14495 case NSAPI::NSMutableArr_insertObjectAtIndex: 14496 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 14497 return 0; 14498 case NSAPI::NSMutableArr_replaceObjectAtIndex: 14499 return 1; 14500 14501 default: 14502 return None; 14503 } 14504 14505 return None; 14506 } 14507 14508 static 14509 Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S, 14510 ObjCMessageExpr *Message) { 14511 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 14512 Message->getReceiverInterface(), 14513 NSAPI::ClassId_NSMutableDictionary); 14514 if (!IsMutableDictionary) { 14515 return None; 14516 } 14517 14518 Selector Sel = Message->getSelector(); 14519 14520 Optional<NSAPI::NSDictionaryMethodKind> MKOpt = 14521 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 14522 if (!MKOpt) { 14523 return None; 14524 } 14525 14526 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 14527 14528 switch (MK) { 14529 case NSAPI::NSMutableDict_setObjectForKey: 14530 case NSAPI::NSMutableDict_setValueForKey: 14531 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 14532 return 0; 14533 14534 default: 14535 return None; 14536 } 14537 14538 return None; 14539 } 14540 14541 static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 14542 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 14543 Message->getReceiverInterface(), 14544 NSAPI::ClassId_NSMutableSet); 14545 14546 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 14547 Message->getReceiverInterface(), 14548 NSAPI::ClassId_NSMutableOrderedSet); 14549 if (!IsMutableSet && !IsMutableOrderedSet) { 14550 return None; 14551 } 14552 14553 Selector Sel = Message->getSelector(); 14554 14555 Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel); 14556 if (!MKOpt) { 14557 return None; 14558 } 14559 14560 NSAPI::NSSetMethodKind MK = *MKOpt; 14561 14562 switch (MK) { 14563 case NSAPI::NSMutableSet_addObject: 14564 case NSAPI::NSOrderedSet_setObjectAtIndex: 14565 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 14566 case NSAPI::NSOrderedSet_insertObjectAtIndex: 14567 return 0; 14568 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 14569 return 1; 14570 } 14571 14572 return None; 14573 } 14574 14575 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 14576 if (!Message->isInstanceMessage()) { 14577 return; 14578 } 14579 14580 Optional<int> ArgOpt; 14581 14582 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 14583 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 14584 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 14585 return; 14586 } 14587 14588 int ArgIndex = *ArgOpt; 14589 14590 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 14591 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 14592 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 14593 } 14594 14595 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 14596 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 14597 if (ArgRE->isObjCSelfExpr()) { 14598 Diag(Message->getSourceRange().getBegin(), 14599 diag::warn_objc_circular_container) 14600 << ArgRE->getDecl() << StringRef("'super'"); 14601 } 14602 } 14603 } else { 14604 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 14605 14606 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 14607 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 14608 } 14609 14610 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 14611 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 14612 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 14613 ValueDecl *Decl = ReceiverRE->getDecl(); 14614 Diag(Message->getSourceRange().getBegin(), 14615 diag::warn_objc_circular_container) 14616 << Decl << Decl; 14617 if (!ArgRE->isObjCSelfExpr()) { 14618 Diag(Decl->getLocation(), 14619 diag::note_objc_circular_container_declared_here) 14620 << Decl; 14621 } 14622 } 14623 } 14624 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 14625 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 14626 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 14627 ObjCIvarDecl *Decl = IvarRE->getDecl(); 14628 Diag(Message->getSourceRange().getBegin(), 14629 diag::warn_objc_circular_container) 14630 << Decl << Decl; 14631 Diag(Decl->getLocation(), 14632 diag::note_objc_circular_container_declared_here) 14633 << Decl; 14634 } 14635 } 14636 } 14637 } 14638 } 14639 14640 /// Check a message send to see if it's likely to cause a retain cycle. 14641 void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 14642 // Only check instance methods whose selector looks like a setter. 14643 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 14644 return; 14645 14646 // Try to find a variable that the receiver is strongly owned by. 14647 RetainCycleOwner owner; 14648 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 14649 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 14650 return; 14651 } else { 14652 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 14653 owner.Variable = getCurMethodDecl()->getSelfDecl(); 14654 owner.Loc = msg->getSuperLoc(); 14655 owner.Range = msg->getSuperLoc(); 14656 } 14657 14658 // Check whether the receiver is captured by any of the arguments. 14659 const ObjCMethodDecl *MD = msg->getMethodDecl(); 14660 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 14661 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 14662 // noescape blocks should not be retained by the method. 14663 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 14664 continue; 14665 return diagnoseRetainCycle(*this, capturer, owner); 14666 } 14667 } 14668 } 14669 14670 /// Check a property assign to see if it's likely to cause a retain cycle. 14671 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 14672 RetainCycleOwner owner; 14673 if (!findRetainCycleOwner(*this, receiver, owner)) 14674 return; 14675 14676 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 14677 diagnoseRetainCycle(*this, capturer, owner); 14678 } 14679 14680 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 14681 RetainCycleOwner Owner; 14682 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 14683 return; 14684 14685 // Because we don't have an expression for the variable, we have to set the 14686 // location explicitly here. 14687 Owner.Loc = Var->getLocation(); 14688 Owner.Range = Var->getSourceRange(); 14689 14690 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 14691 diagnoseRetainCycle(*this, Capturer, Owner); 14692 } 14693 14694 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 14695 Expr *RHS, bool isProperty) { 14696 // Check if RHS is an Objective-C object literal, which also can get 14697 // immediately zapped in a weak reference. Note that we explicitly 14698 // allow ObjCStringLiterals, since those are designed to never really die. 14699 RHS = RHS->IgnoreParenImpCasts(); 14700 14701 // This enum needs to match with the 'select' in 14702 // warn_objc_arc_literal_assign (off-by-1). 14703 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 14704 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 14705 return false; 14706 14707 S.Diag(Loc, diag::warn_arc_literal_assign) 14708 << (unsigned) Kind 14709 << (isProperty ? 0 : 1) 14710 << RHS->getSourceRange(); 14711 14712 return true; 14713 } 14714 14715 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 14716 Qualifiers::ObjCLifetime LT, 14717 Expr *RHS, bool isProperty) { 14718 // Strip off any implicit cast added to get to the one ARC-specific. 14719 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 14720 if (cast->getCastKind() == CK_ARCConsumeObject) { 14721 S.Diag(Loc, diag::warn_arc_retained_assign) 14722 << (LT == Qualifiers::OCL_ExplicitNone) 14723 << (isProperty ? 0 : 1) 14724 << RHS->getSourceRange(); 14725 return true; 14726 } 14727 RHS = cast->getSubExpr(); 14728 } 14729 14730 if (LT == Qualifiers::OCL_Weak && 14731 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 14732 return true; 14733 14734 return false; 14735 } 14736 14737 bool Sema::checkUnsafeAssigns(SourceLocation Loc, 14738 QualType LHS, Expr *RHS) { 14739 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 14740 14741 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 14742 return false; 14743 14744 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 14745 return true; 14746 14747 return false; 14748 } 14749 14750 void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 14751 Expr *LHS, Expr *RHS) { 14752 QualType LHSType; 14753 // PropertyRef on LHS type need be directly obtained from 14754 // its declaration as it has a PseudoType. 14755 ObjCPropertyRefExpr *PRE 14756 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 14757 if (PRE && !PRE->isImplicitProperty()) { 14758 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 14759 if (PD) 14760 LHSType = PD->getType(); 14761 } 14762 14763 if (LHSType.isNull()) 14764 LHSType = LHS->getType(); 14765 14766 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 14767 14768 if (LT == Qualifiers::OCL_Weak) { 14769 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 14770 getCurFunction()->markSafeWeakUse(LHS); 14771 } 14772 14773 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 14774 return; 14775 14776 // FIXME. Check for other life times. 14777 if (LT != Qualifiers::OCL_None) 14778 return; 14779 14780 if (PRE) { 14781 if (PRE->isImplicitProperty()) 14782 return; 14783 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 14784 if (!PD) 14785 return; 14786 14787 unsigned Attributes = PD->getPropertyAttributes(); 14788 if (Attributes & ObjCPropertyAttribute::kind_assign) { 14789 // when 'assign' attribute was not explicitly specified 14790 // by user, ignore it and rely on property type itself 14791 // for lifetime info. 14792 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 14793 if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) && 14794 LHSType->isObjCRetainableType()) 14795 return; 14796 14797 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 14798 if (cast->getCastKind() == CK_ARCConsumeObject) { 14799 Diag(Loc, diag::warn_arc_retained_property_assign) 14800 << RHS->getSourceRange(); 14801 return; 14802 } 14803 RHS = cast->getSubExpr(); 14804 } 14805 } else if (Attributes & ObjCPropertyAttribute::kind_weak) { 14806 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 14807 return; 14808 } 14809 } 14810 } 14811 14812 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 14813 14814 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 14815 SourceLocation StmtLoc, 14816 const NullStmt *Body) { 14817 // Do not warn if the body is a macro that expands to nothing, e.g: 14818 // 14819 // #define CALL(x) 14820 // if (condition) 14821 // CALL(0); 14822 if (Body->hasLeadingEmptyMacro()) 14823 return false; 14824 14825 // Get line numbers of statement and body. 14826 bool StmtLineInvalid; 14827 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 14828 &StmtLineInvalid); 14829 if (StmtLineInvalid) 14830 return false; 14831 14832 bool BodyLineInvalid; 14833 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 14834 &BodyLineInvalid); 14835 if (BodyLineInvalid) 14836 return false; 14837 14838 // Warn if null statement and body are on the same line. 14839 if (StmtLine != BodyLine) 14840 return false; 14841 14842 return true; 14843 } 14844 14845 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 14846 const Stmt *Body, 14847 unsigned DiagID) { 14848 // Since this is a syntactic check, don't emit diagnostic for template 14849 // instantiations, this just adds noise. 14850 if (CurrentInstantiationScope) 14851 return; 14852 14853 // The body should be a null statement. 14854 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 14855 if (!NBody) 14856 return; 14857 14858 // Do the usual checks. 14859 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 14860 return; 14861 14862 Diag(NBody->getSemiLoc(), DiagID); 14863 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 14864 } 14865 14866 void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 14867 const Stmt *PossibleBody) { 14868 assert(!CurrentInstantiationScope); // Ensured by caller 14869 14870 SourceLocation StmtLoc; 14871 const Stmt *Body; 14872 unsigned DiagID; 14873 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 14874 StmtLoc = FS->getRParenLoc(); 14875 Body = FS->getBody(); 14876 DiagID = diag::warn_empty_for_body; 14877 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 14878 StmtLoc = WS->getCond()->getSourceRange().getEnd(); 14879 Body = WS->getBody(); 14880 DiagID = diag::warn_empty_while_body; 14881 } else 14882 return; // Neither `for' nor `while'. 14883 14884 // The body should be a null statement. 14885 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 14886 if (!NBody) 14887 return; 14888 14889 // Skip expensive checks if diagnostic is disabled. 14890 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 14891 return; 14892 14893 // Do the usual checks. 14894 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 14895 return; 14896 14897 // `for(...);' and `while(...);' are popular idioms, so in order to keep 14898 // noise level low, emit diagnostics only if for/while is followed by a 14899 // CompoundStmt, e.g.: 14900 // for (int i = 0; i < n; i++); 14901 // { 14902 // a(i); 14903 // } 14904 // or if for/while is followed by a statement with more indentation 14905 // than for/while itself: 14906 // for (int i = 0; i < n; i++); 14907 // a(i); 14908 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 14909 if (!ProbableTypo) { 14910 bool BodyColInvalid; 14911 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 14912 PossibleBody->getBeginLoc(), &BodyColInvalid); 14913 if (BodyColInvalid) 14914 return; 14915 14916 bool StmtColInvalid; 14917 unsigned StmtCol = 14918 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 14919 if (StmtColInvalid) 14920 return; 14921 14922 if (BodyCol > StmtCol) 14923 ProbableTypo = true; 14924 } 14925 14926 if (ProbableTypo) { 14927 Diag(NBody->getSemiLoc(), DiagID); 14928 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 14929 } 14930 } 14931 14932 //===--- CHECK: Warn on self move with std::move. -------------------------===// 14933 14934 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 14935 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 14936 SourceLocation OpLoc) { 14937 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 14938 return; 14939 14940 if (inTemplateInstantiation()) 14941 return; 14942 14943 // Strip parens and casts away. 14944 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 14945 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 14946 14947 // Check for a call expression 14948 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 14949 if (!CE || CE->getNumArgs() != 1) 14950 return; 14951 14952 // Check for a call to std::move 14953 if (!CE->isCallToStdMove()) 14954 return; 14955 14956 // Get argument from std::move 14957 RHSExpr = CE->getArg(0); 14958 14959 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 14960 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 14961 14962 // Two DeclRefExpr's, check that the decls are the same. 14963 if (LHSDeclRef && RHSDeclRef) { 14964 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 14965 return; 14966 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 14967 RHSDeclRef->getDecl()->getCanonicalDecl()) 14968 return; 14969 14970 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 14971 << LHSExpr->getSourceRange() 14972 << RHSExpr->getSourceRange(); 14973 return; 14974 } 14975 14976 // Member variables require a different approach to check for self moves. 14977 // MemberExpr's are the same if every nested MemberExpr refers to the same 14978 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 14979 // the base Expr's are CXXThisExpr's. 14980 const Expr *LHSBase = LHSExpr; 14981 const Expr *RHSBase = RHSExpr; 14982 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 14983 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 14984 if (!LHSME || !RHSME) 14985 return; 14986 14987 while (LHSME && RHSME) { 14988 if (LHSME->getMemberDecl()->getCanonicalDecl() != 14989 RHSME->getMemberDecl()->getCanonicalDecl()) 14990 return; 14991 14992 LHSBase = LHSME->getBase(); 14993 RHSBase = RHSME->getBase(); 14994 LHSME = dyn_cast<MemberExpr>(LHSBase); 14995 RHSME = dyn_cast<MemberExpr>(RHSBase); 14996 } 14997 14998 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 14999 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 15000 if (LHSDeclRef && RHSDeclRef) { 15001 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 15002 return; 15003 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 15004 RHSDeclRef->getDecl()->getCanonicalDecl()) 15005 return; 15006 15007 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 15008 << LHSExpr->getSourceRange() 15009 << RHSExpr->getSourceRange(); 15010 return; 15011 } 15012 15013 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 15014 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 15015 << LHSExpr->getSourceRange() 15016 << RHSExpr->getSourceRange(); 15017 } 15018 15019 //===--- Layout compatibility ----------------------------------------------// 15020 15021 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 15022 15023 /// Check if two enumeration types are layout-compatible. 15024 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 15025 // C++11 [dcl.enum] p8: 15026 // Two enumeration types are layout-compatible if they have the same 15027 // underlying type. 15028 return ED1->isComplete() && ED2->isComplete() && 15029 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 15030 } 15031 15032 /// Check if two fields are layout-compatible. 15033 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 15034 FieldDecl *Field2) { 15035 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 15036 return false; 15037 15038 if (Field1->isBitField() != Field2->isBitField()) 15039 return false; 15040 15041 if (Field1->isBitField()) { 15042 // Make sure that the bit-fields are the same length. 15043 unsigned Bits1 = Field1->getBitWidthValue(C); 15044 unsigned Bits2 = Field2->getBitWidthValue(C); 15045 15046 if (Bits1 != Bits2) 15047 return false; 15048 } 15049 15050 return true; 15051 } 15052 15053 /// Check if two standard-layout structs are layout-compatible. 15054 /// (C++11 [class.mem] p17) 15055 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 15056 RecordDecl *RD2) { 15057 // If both records are C++ classes, check that base classes match. 15058 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 15059 // If one of records is a CXXRecordDecl we are in C++ mode, 15060 // thus the other one is a CXXRecordDecl, too. 15061 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 15062 // Check number of base classes. 15063 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 15064 return false; 15065 15066 // Check the base classes. 15067 for (CXXRecordDecl::base_class_const_iterator 15068 Base1 = D1CXX->bases_begin(), 15069 BaseEnd1 = D1CXX->bases_end(), 15070 Base2 = D2CXX->bases_begin(); 15071 Base1 != BaseEnd1; 15072 ++Base1, ++Base2) { 15073 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 15074 return false; 15075 } 15076 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 15077 // If only RD2 is a C++ class, it should have zero base classes. 15078 if (D2CXX->getNumBases() > 0) 15079 return false; 15080 } 15081 15082 // Check the fields. 15083 RecordDecl::field_iterator Field2 = RD2->field_begin(), 15084 Field2End = RD2->field_end(), 15085 Field1 = RD1->field_begin(), 15086 Field1End = RD1->field_end(); 15087 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 15088 if (!isLayoutCompatible(C, *Field1, *Field2)) 15089 return false; 15090 } 15091 if (Field1 != Field1End || Field2 != Field2End) 15092 return false; 15093 15094 return true; 15095 } 15096 15097 /// Check if two standard-layout unions are layout-compatible. 15098 /// (C++11 [class.mem] p18) 15099 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 15100 RecordDecl *RD2) { 15101 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 15102 for (auto *Field2 : RD2->fields()) 15103 UnmatchedFields.insert(Field2); 15104 15105 for (auto *Field1 : RD1->fields()) { 15106 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 15107 I = UnmatchedFields.begin(), 15108 E = UnmatchedFields.end(); 15109 15110 for ( ; I != E; ++I) { 15111 if (isLayoutCompatible(C, Field1, *I)) { 15112 bool Result = UnmatchedFields.erase(*I); 15113 (void) Result; 15114 assert(Result); 15115 break; 15116 } 15117 } 15118 if (I == E) 15119 return false; 15120 } 15121 15122 return UnmatchedFields.empty(); 15123 } 15124 15125 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 15126 RecordDecl *RD2) { 15127 if (RD1->isUnion() != RD2->isUnion()) 15128 return false; 15129 15130 if (RD1->isUnion()) 15131 return isLayoutCompatibleUnion(C, RD1, RD2); 15132 else 15133 return isLayoutCompatibleStruct(C, RD1, RD2); 15134 } 15135 15136 /// Check if two types are layout-compatible in C++11 sense. 15137 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 15138 if (T1.isNull() || T2.isNull()) 15139 return false; 15140 15141 // C++11 [basic.types] p11: 15142 // If two types T1 and T2 are the same type, then T1 and T2 are 15143 // layout-compatible types. 15144 if (C.hasSameType(T1, T2)) 15145 return true; 15146 15147 T1 = T1.getCanonicalType().getUnqualifiedType(); 15148 T2 = T2.getCanonicalType().getUnqualifiedType(); 15149 15150 const Type::TypeClass TC1 = T1->getTypeClass(); 15151 const Type::TypeClass TC2 = T2->getTypeClass(); 15152 15153 if (TC1 != TC2) 15154 return false; 15155 15156 if (TC1 == Type::Enum) { 15157 return isLayoutCompatible(C, 15158 cast<EnumType>(T1)->getDecl(), 15159 cast<EnumType>(T2)->getDecl()); 15160 } else if (TC1 == Type::Record) { 15161 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 15162 return false; 15163 15164 return isLayoutCompatible(C, 15165 cast<RecordType>(T1)->getDecl(), 15166 cast<RecordType>(T2)->getDecl()); 15167 } 15168 15169 return false; 15170 } 15171 15172 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 15173 15174 /// Given a type tag expression find the type tag itself. 15175 /// 15176 /// \param TypeExpr Type tag expression, as it appears in user's code. 15177 /// 15178 /// \param VD Declaration of an identifier that appears in a type tag. 15179 /// 15180 /// \param MagicValue Type tag magic value. 15181 /// 15182 /// \param isConstantEvaluated wether the evalaution should be performed in 15183 15184 /// constant context. 15185 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 15186 const ValueDecl **VD, uint64_t *MagicValue, 15187 bool isConstantEvaluated) { 15188 while(true) { 15189 if (!TypeExpr) 15190 return false; 15191 15192 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 15193 15194 switch (TypeExpr->getStmtClass()) { 15195 case Stmt::UnaryOperatorClass: { 15196 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 15197 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 15198 TypeExpr = UO->getSubExpr(); 15199 continue; 15200 } 15201 return false; 15202 } 15203 15204 case Stmt::DeclRefExprClass: { 15205 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 15206 *VD = DRE->getDecl(); 15207 return true; 15208 } 15209 15210 case Stmt::IntegerLiteralClass: { 15211 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 15212 llvm::APInt MagicValueAPInt = IL->getValue(); 15213 if (MagicValueAPInt.getActiveBits() <= 64) { 15214 *MagicValue = MagicValueAPInt.getZExtValue(); 15215 return true; 15216 } else 15217 return false; 15218 } 15219 15220 case Stmt::BinaryConditionalOperatorClass: 15221 case Stmt::ConditionalOperatorClass: { 15222 const AbstractConditionalOperator *ACO = 15223 cast<AbstractConditionalOperator>(TypeExpr); 15224 bool Result; 15225 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx, 15226 isConstantEvaluated)) { 15227 if (Result) 15228 TypeExpr = ACO->getTrueExpr(); 15229 else 15230 TypeExpr = ACO->getFalseExpr(); 15231 continue; 15232 } 15233 return false; 15234 } 15235 15236 case Stmt::BinaryOperatorClass: { 15237 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 15238 if (BO->getOpcode() == BO_Comma) { 15239 TypeExpr = BO->getRHS(); 15240 continue; 15241 } 15242 return false; 15243 } 15244 15245 default: 15246 return false; 15247 } 15248 } 15249 } 15250 15251 /// Retrieve the C type corresponding to type tag TypeExpr. 15252 /// 15253 /// \param TypeExpr Expression that specifies a type tag. 15254 /// 15255 /// \param MagicValues Registered magic values. 15256 /// 15257 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 15258 /// kind. 15259 /// 15260 /// \param TypeInfo Information about the corresponding C type. 15261 /// 15262 /// \param isConstantEvaluated wether the evalaution should be performed in 15263 /// constant context. 15264 /// 15265 /// \returns true if the corresponding C type was found. 15266 static bool GetMatchingCType( 15267 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr, 15268 const ASTContext &Ctx, 15269 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData> 15270 *MagicValues, 15271 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo, 15272 bool isConstantEvaluated) { 15273 FoundWrongKind = false; 15274 15275 // Variable declaration that has type_tag_for_datatype attribute. 15276 const ValueDecl *VD = nullptr; 15277 15278 uint64_t MagicValue; 15279 15280 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated)) 15281 return false; 15282 15283 if (VD) { 15284 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 15285 if (I->getArgumentKind() != ArgumentKind) { 15286 FoundWrongKind = true; 15287 return false; 15288 } 15289 TypeInfo.Type = I->getMatchingCType(); 15290 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 15291 TypeInfo.MustBeNull = I->getMustBeNull(); 15292 return true; 15293 } 15294 return false; 15295 } 15296 15297 if (!MagicValues) 15298 return false; 15299 15300 llvm::DenseMap<Sema::TypeTagMagicValue, 15301 Sema::TypeTagData>::const_iterator I = 15302 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 15303 if (I == MagicValues->end()) 15304 return false; 15305 15306 TypeInfo = I->second; 15307 return true; 15308 } 15309 15310 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 15311 uint64_t MagicValue, QualType Type, 15312 bool LayoutCompatible, 15313 bool MustBeNull) { 15314 if (!TypeTagForDatatypeMagicValues) 15315 TypeTagForDatatypeMagicValues.reset( 15316 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 15317 15318 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 15319 (*TypeTagForDatatypeMagicValues)[Magic] = 15320 TypeTagData(Type, LayoutCompatible, MustBeNull); 15321 } 15322 15323 static bool IsSameCharType(QualType T1, QualType T2) { 15324 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 15325 if (!BT1) 15326 return false; 15327 15328 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 15329 if (!BT2) 15330 return false; 15331 15332 BuiltinType::Kind T1Kind = BT1->getKind(); 15333 BuiltinType::Kind T2Kind = BT2->getKind(); 15334 15335 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 15336 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 15337 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 15338 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 15339 } 15340 15341 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 15342 const ArrayRef<const Expr *> ExprArgs, 15343 SourceLocation CallSiteLoc) { 15344 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 15345 bool IsPointerAttr = Attr->getIsPointer(); 15346 15347 // Retrieve the argument representing the 'type_tag'. 15348 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 15349 if (TypeTagIdxAST >= ExprArgs.size()) { 15350 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 15351 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 15352 return; 15353 } 15354 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 15355 bool FoundWrongKind; 15356 TypeTagData TypeInfo; 15357 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 15358 TypeTagForDatatypeMagicValues.get(), FoundWrongKind, 15359 TypeInfo, isConstantEvaluated())) { 15360 if (FoundWrongKind) 15361 Diag(TypeTagExpr->getExprLoc(), 15362 diag::warn_type_tag_for_datatype_wrong_kind) 15363 << TypeTagExpr->getSourceRange(); 15364 return; 15365 } 15366 15367 // Retrieve the argument representing the 'arg_idx'. 15368 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 15369 if (ArgumentIdxAST >= ExprArgs.size()) { 15370 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 15371 << 1 << Attr->getArgumentIdx().getSourceIndex(); 15372 return; 15373 } 15374 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 15375 if (IsPointerAttr) { 15376 // Skip implicit cast of pointer to `void *' (as a function argument). 15377 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 15378 if (ICE->getType()->isVoidPointerType() && 15379 ICE->getCastKind() == CK_BitCast) 15380 ArgumentExpr = ICE->getSubExpr(); 15381 } 15382 QualType ArgumentType = ArgumentExpr->getType(); 15383 15384 // Passing a `void*' pointer shouldn't trigger a warning. 15385 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 15386 return; 15387 15388 if (TypeInfo.MustBeNull) { 15389 // Type tag with matching void type requires a null pointer. 15390 if (!ArgumentExpr->isNullPointerConstant(Context, 15391 Expr::NPC_ValueDependentIsNotNull)) { 15392 Diag(ArgumentExpr->getExprLoc(), 15393 diag::warn_type_safety_null_pointer_required) 15394 << ArgumentKind->getName() 15395 << ArgumentExpr->getSourceRange() 15396 << TypeTagExpr->getSourceRange(); 15397 } 15398 return; 15399 } 15400 15401 QualType RequiredType = TypeInfo.Type; 15402 if (IsPointerAttr) 15403 RequiredType = Context.getPointerType(RequiredType); 15404 15405 bool mismatch = false; 15406 if (!TypeInfo.LayoutCompatible) { 15407 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 15408 15409 // C++11 [basic.fundamental] p1: 15410 // Plain char, signed char, and unsigned char are three distinct types. 15411 // 15412 // But we treat plain `char' as equivalent to `signed char' or `unsigned 15413 // char' depending on the current char signedness mode. 15414 if (mismatch) 15415 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 15416 RequiredType->getPointeeType())) || 15417 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 15418 mismatch = false; 15419 } else 15420 if (IsPointerAttr) 15421 mismatch = !isLayoutCompatible(Context, 15422 ArgumentType->getPointeeType(), 15423 RequiredType->getPointeeType()); 15424 else 15425 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 15426 15427 if (mismatch) 15428 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 15429 << ArgumentType << ArgumentKind 15430 << TypeInfo.LayoutCompatible << RequiredType 15431 << ArgumentExpr->getSourceRange() 15432 << TypeTagExpr->getSourceRange(); 15433 } 15434 15435 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 15436 CharUnits Alignment) { 15437 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 15438 } 15439 15440 void Sema::DiagnoseMisalignedMembers() { 15441 for (MisalignedMember &m : MisalignedMembers) { 15442 const NamedDecl *ND = m.RD; 15443 if (ND->getName().empty()) { 15444 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 15445 ND = TD; 15446 } 15447 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 15448 << m.MD << ND << m.E->getSourceRange(); 15449 } 15450 MisalignedMembers.clear(); 15451 } 15452 15453 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 15454 E = E->IgnoreParens(); 15455 if (!T->isPointerType() && !T->isIntegerType()) 15456 return; 15457 if (isa<UnaryOperator>(E) && 15458 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 15459 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 15460 if (isa<MemberExpr>(Op)) { 15461 auto MA = llvm::find(MisalignedMembers, MisalignedMember(Op)); 15462 if (MA != MisalignedMembers.end() && 15463 (T->isIntegerType() || 15464 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 15465 Context.getTypeAlignInChars( 15466 T->getPointeeType()) <= MA->Alignment)))) 15467 MisalignedMembers.erase(MA); 15468 } 15469 } 15470 } 15471 15472 void Sema::RefersToMemberWithReducedAlignment( 15473 Expr *E, 15474 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 15475 Action) { 15476 const auto *ME = dyn_cast<MemberExpr>(E); 15477 if (!ME) 15478 return; 15479 15480 // No need to check expressions with an __unaligned-qualified type. 15481 if (E->getType().getQualifiers().hasUnaligned()) 15482 return; 15483 15484 // For a chain of MemberExpr like "a.b.c.d" this list 15485 // will keep FieldDecl's like [d, c, b]. 15486 SmallVector<FieldDecl *, 4> ReverseMemberChain; 15487 const MemberExpr *TopME = nullptr; 15488 bool AnyIsPacked = false; 15489 do { 15490 QualType BaseType = ME->getBase()->getType(); 15491 if (BaseType->isDependentType()) 15492 return; 15493 if (ME->isArrow()) 15494 BaseType = BaseType->getPointeeType(); 15495 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl(); 15496 if (RD->isInvalidDecl()) 15497 return; 15498 15499 ValueDecl *MD = ME->getMemberDecl(); 15500 auto *FD = dyn_cast<FieldDecl>(MD); 15501 // We do not care about non-data members. 15502 if (!FD || FD->isInvalidDecl()) 15503 return; 15504 15505 AnyIsPacked = 15506 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 15507 ReverseMemberChain.push_back(FD); 15508 15509 TopME = ME; 15510 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 15511 } while (ME); 15512 assert(TopME && "We did not compute a topmost MemberExpr!"); 15513 15514 // Not the scope of this diagnostic. 15515 if (!AnyIsPacked) 15516 return; 15517 15518 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 15519 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 15520 // TODO: The innermost base of the member expression may be too complicated. 15521 // For now, just disregard these cases. This is left for future 15522 // improvement. 15523 if (!DRE && !isa<CXXThisExpr>(TopBase)) 15524 return; 15525 15526 // Alignment expected by the whole expression. 15527 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 15528 15529 // No need to do anything else with this case. 15530 if (ExpectedAlignment.isOne()) 15531 return; 15532 15533 // Synthesize offset of the whole access. 15534 CharUnits Offset; 15535 for (auto I = ReverseMemberChain.rbegin(); I != ReverseMemberChain.rend(); 15536 I++) { 15537 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(*I)); 15538 } 15539 15540 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 15541 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 15542 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 15543 15544 // The base expression of the innermost MemberExpr may give 15545 // stronger guarantees than the class containing the member. 15546 if (DRE && !TopME->isArrow()) { 15547 const ValueDecl *VD = DRE->getDecl(); 15548 if (!VD->getType()->isReferenceType()) 15549 CompleteObjectAlignment = 15550 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 15551 } 15552 15553 // Check if the synthesized offset fulfills the alignment. 15554 if (Offset % ExpectedAlignment != 0 || 15555 // It may fulfill the offset it but the effective alignment may still be 15556 // lower than the expected expression alignment. 15557 CompleteObjectAlignment < ExpectedAlignment) { 15558 // If this happens, we want to determine a sensible culprit of this. 15559 // Intuitively, watching the chain of member expressions from right to 15560 // left, we start with the required alignment (as required by the field 15561 // type) but some packed attribute in that chain has reduced the alignment. 15562 // It may happen that another packed structure increases it again. But if 15563 // we are here such increase has not been enough. So pointing the first 15564 // FieldDecl that either is packed or else its RecordDecl is, 15565 // seems reasonable. 15566 FieldDecl *FD = nullptr; 15567 CharUnits Alignment; 15568 for (FieldDecl *FDI : ReverseMemberChain) { 15569 if (FDI->hasAttr<PackedAttr>() || 15570 FDI->getParent()->hasAttr<PackedAttr>()) { 15571 FD = FDI; 15572 Alignment = std::min( 15573 Context.getTypeAlignInChars(FD->getType()), 15574 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 15575 break; 15576 } 15577 } 15578 assert(FD && "We did not find a packed FieldDecl!"); 15579 Action(E, FD->getParent(), FD, Alignment); 15580 } 15581 } 15582 15583 void Sema::CheckAddressOfPackedMember(Expr *rhs) { 15584 using namespace std::placeholders; 15585 15586 RefersToMemberWithReducedAlignment( 15587 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 15588 _2, _3, _4)); 15589 } 15590 15591 ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall, 15592 ExprResult CallResult) { 15593 if (checkArgCount(*this, TheCall, 1)) 15594 return ExprError(); 15595 15596 ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0)); 15597 if (MatrixArg.isInvalid()) 15598 return MatrixArg; 15599 Expr *Matrix = MatrixArg.get(); 15600 15601 auto *MType = Matrix->getType()->getAs<ConstantMatrixType>(); 15602 if (!MType) { 15603 Diag(Matrix->getBeginLoc(), diag::err_builtin_matrix_arg); 15604 return ExprError(); 15605 } 15606 15607 // Create returned matrix type by swapping rows and columns of the argument 15608 // matrix type. 15609 QualType ResultType = Context.getConstantMatrixType( 15610 MType->getElementType(), MType->getNumColumns(), MType->getNumRows()); 15611 15612 // Change the return type to the type of the returned matrix. 15613 TheCall->setType(ResultType); 15614 15615 // Update call argument to use the possibly converted matrix argument. 15616 TheCall->setArg(0, Matrix); 15617 return CallResult; 15618 } 15619 15620 // Get and verify the matrix dimensions. 15621 static llvm::Optional<unsigned> 15622 getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) { 15623 SourceLocation ErrorPos; 15624 Optional<llvm::APSInt> Value = 15625 Expr->getIntegerConstantExpr(S.Context, &ErrorPos); 15626 if (!Value) { 15627 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg) 15628 << Name; 15629 return {}; 15630 } 15631 uint64_t Dim = Value->getZExtValue(); 15632 if (!ConstantMatrixType::isDimensionValid(Dim)) { 15633 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension) 15634 << Name << ConstantMatrixType::getMaxElementsPerDimension(); 15635 return {}; 15636 } 15637 return Dim; 15638 } 15639 15640 ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, 15641 ExprResult CallResult) { 15642 if (!getLangOpts().MatrixTypes) { 15643 Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled); 15644 return ExprError(); 15645 } 15646 15647 if (checkArgCount(*this, TheCall, 4)) 15648 return ExprError(); 15649 15650 unsigned PtrArgIdx = 0; 15651 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 15652 Expr *RowsExpr = TheCall->getArg(1); 15653 Expr *ColumnsExpr = TheCall->getArg(2); 15654 Expr *StrideExpr = TheCall->getArg(3); 15655 15656 bool ArgError = false; 15657 15658 // Check pointer argument. 15659 { 15660 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 15661 if (PtrConv.isInvalid()) 15662 return PtrConv; 15663 PtrExpr = PtrConv.get(); 15664 TheCall->setArg(0, PtrExpr); 15665 if (PtrExpr->isTypeDependent()) { 15666 TheCall->setType(Context.DependentTy); 15667 return TheCall; 15668 } 15669 } 15670 15671 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 15672 QualType ElementTy; 15673 if (!PtrTy) { 15674 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg) 15675 << PtrArgIdx + 1; 15676 ArgError = true; 15677 } else { 15678 ElementTy = PtrTy->getPointeeType().getUnqualifiedType(); 15679 15680 if (!ConstantMatrixType::isValidElementType(ElementTy)) { 15681 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg) 15682 << PtrArgIdx + 1; 15683 ArgError = true; 15684 } 15685 } 15686 15687 // Apply default Lvalue conversions and convert the expression to size_t. 15688 auto ApplyArgumentConversions = [this](Expr *E) { 15689 ExprResult Conv = DefaultLvalueConversion(E); 15690 if (Conv.isInvalid()) 15691 return Conv; 15692 15693 return tryConvertExprToType(Conv.get(), Context.getSizeType()); 15694 }; 15695 15696 // Apply conversion to row and column expressions. 15697 ExprResult RowsConv = ApplyArgumentConversions(RowsExpr); 15698 if (!RowsConv.isInvalid()) { 15699 RowsExpr = RowsConv.get(); 15700 TheCall->setArg(1, RowsExpr); 15701 } else 15702 RowsExpr = nullptr; 15703 15704 ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr); 15705 if (!ColumnsConv.isInvalid()) { 15706 ColumnsExpr = ColumnsConv.get(); 15707 TheCall->setArg(2, ColumnsExpr); 15708 } else 15709 ColumnsExpr = nullptr; 15710 15711 // If any any part of the result matrix type is still pending, just use 15712 // Context.DependentTy, until all parts are resolved. 15713 if ((RowsExpr && RowsExpr->isTypeDependent()) || 15714 (ColumnsExpr && ColumnsExpr->isTypeDependent())) { 15715 TheCall->setType(Context.DependentTy); 15716 return CallResult; 15717 } 15718 15719 // Check row and column dimenions. 15720 llvm::Optional<unsigned> MaybeRows; 15721 if (RowsExpr) 15722 MaybeRows = getAndVerifyMatrixDimension(RowsExpr, "row", *this); 15723 15724 llvm::Optional<unsigned> MaybeColumns; 15725 if (ColumnsExpr) 15726 MaybeColumns = getAndVerifyMatrixDimension(ColumnsExpr, "column", *this); 15727 15728 // Check stride argument. 15729 ExprResult StrideConv = ApplyArgumentConversions(StrideExpr); 15730 if (StrideConv.isInvalid()) 15731 return ExprError(); 15732 StrideExpr = StrideConv.get(); 15733 TheCall->setArg(3, StrideExpr); 15734 15735 if (MaybeRows) { 15736 if (Optional<llvm::APSInt> Value = 15737 StrideExpr->getIntegerConstantExpr(Context)) { 15738 uint64_t Stride = Value->getZExtValue(); 15739 if (Stride < *MaybeRows) { 15740 Diag(StrideExpr->getBeginLoc(), 15741 diag::err_builtin_matrix_stride_too_small); 15742 ArgError = true; 15743 } 15744 } 15745 } 15746 15747 if (ArgError || !MaybeRows || !MaybeColumns) 15748 return ExprError(); 15749 15750 TheCall->setType( 15751 Context.getConstantMatrixType(ElementTy, *MaybeRows, *MaybeColumns)); 15752 return CallResult; 15753 } 15754 15755 ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, 15756 ExprResult CallResult) { 15757 if (checkArgCount(*this, TheCall, 3)) 15758 return ExprError(); 15759 15760 unsigned PtrArgIdx = 1; 15761 Expr *MatrixExpr = TheCall->getArg(0); 15762 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 15763 Expr *StrideExpr = TheCall->getArg(2); 15764 15765 bool ArgError = false; 15766 15767 { 15768 ExprResult MatrixConv = DefaultLvalueConversion(MatrixExpr); 15769 if (MatrixConv.isInvalid()) 15770 return MatrixConv; 15771 MatrixExpr = MatrixConv.get(); 15772 TheCall->setArg(0, MatrixExpr); 15773 } 15774 if (MatrixExpr->isTypeDependent()) { 15775 TheCall->setType(Context.DependentTy); 15776 return TheCall; 15777 } 15778 15779 auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>(); 15780 if (!MatrixTy) { 15781 Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_matrix_arg) << 0; 15782 ArgError = true; 15783 } 15784 15785 { 15786 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 15787 if (PtrConv.isInvalid()) 15788 return PtrConv; 15789 PtrExpr = PtrConv.get(); 15790 TheCall->setArg(1, PtrExpr); 15791 if (PtrExpr->isTypeDependent()) { 15792 TheCall->setType(Context.DependentTy); 15793 return TheCall; 15794 } 15795 } 15796 15797 // Check pointer argument. 15798 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 15799 if (!PtrTy) { 15800 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg) 15801 << PtrArgIdx + 1; 15802 ArgError = true; 15803 } else { 15804 QualType ElementTy = PtrTy->getPointeeType(); 15805 if (ElementTy.isConstQualified()) { 15806 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const); 15807 ArgError = true; 15808 } 15809 ElementTy = ElementTy.getUnqualifiedType().getCanonicalType(); 15810 if (MatrixTy && 15811 !Context.hasSameType(ElementTy, MatrixTy->getElementType())) { 15812 Diag(PtrExpr->getBeginLoc(), 15813 diag::err_builtin_matrix_pointer_arg_mismatch) 15814 << ElementTy << MatrixTy->getElementType(); 15815 ArgError = true; 15816 } 15817 } 15818 15819 // Apply default Lvalue conversions and convert the stride expression to 15820 // size_t. 15821 { 15822 ExprResult StrideConv = DefaultLvalueConversion(StrideExpr); 15823 if (StrideConv.isInvalid()) 15824 return StrideConv; 15825 15826 StrideConv = tryConvertExprToType(StrideConv.get(), Context.getSizeType()); 15827 if (StrideConv.isInvalid()) 15828 return StrideConv; 15829 StrideExpr = StrideConv.get(); 15830 TheCall->setArg(2, StrideExpr); 15831 } 15832 15833 // Check stride argument. 15834 if (MatrixTy) { 15835 if (Optional<llvm::APSInt> Value = 15836 StrideExpr->getIntegerConstantExpr(Context)) { 15837 uint64_t Stride = Value->getZExtValue(); 15838 if (Stride < MatrixTy->getNumRows()) { 15839 Diag(StrideExpr->getBeginLoc(), 15840 diag::err_builtin_matrix_stride_too_small); 15841 ArgError = true; 15842 } 15843 } 15844 } 15845 15846 if (ArgError) 15847 return ExprError(); 15848 15849 return CallResult; 15850 } 15851