1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements extra semantic analysis beyond what is enforced 10 // by the C type system. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/APValue.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/AttrIterator.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclBase.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/DeclarationName.h" 24 #include "clang/AST/EvaluatedExprVisitor.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/ExprCXX.h" 27 #include "clang/AST/ExprObjC.h" 28 #include "clang/AST/ExprOpenMP.h" 29 #include "clang/AST/FormatString.h" 30 #include "clang/AST/NSAPI.h" 31 #include "clang/AST/NonTrivialTypeVisitor.h" 32 #include "clang/AST/OperationKinds.h" 33 #include "clang/AST/RecordLayout.h" 34 #include "clang/AST/Stmt.h" 35 #include "clang/AST/TemplateBase.h" 36 #include "clang/AST/Type.h" 37 #include "clang/AST/TypeLoc.h" 38 #include "clang/AST/UnresolvedSet.h" 39 #include "clang/Basic/AddressSpaces.h" 40 #include "clang/Basic/CharInfo.h" 41 #include "clang/Basic/Diagnostic.h" 42 #include "clang/Basic/IdentifierTable.h" 43 #include "clang/Basic/LLVM.h" 44 #include "clang/Basic/LangOptions.h" 45 #include "clang/Basic/OpenCLOptions.h" 46 #include "clang/Basic/OperatorKinds.h" 47 #include "clang/Basic/PartialDiagnostic.h" 48 #include "clang/Basic/SourceLocation.h" 49 #include "clang/Basic/SourceManager.h" 50 #include "clang/Basic/Specifiers.h" 51 #include "clang/Basic/SyncScope.h" 52 #include "clang/Basic/TargetBuiltins.h" 53 #include "clang/Basic/TargetCXXABI.h" 54 #include "clang/Basic/TargetInfo.h" 55 #include "clang/Basic/TypeTraits.h" 56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 57 #include "clang/Sema/Initialization.h" 58 #include "clang/Sema/Lookup.h" 59 #include "clang/Sema/Ownership.h" 60 #include "clang/Sema/Scope.h" 61 #include "clang/Sema/ScopeInfo.h" 62 #include "clang/Sema/Sema.h" 63 #include "clang/Sema/SemaInternal.h" 64 #include "llvm/ADT/APFloat.h" 65 #include "llvm/ADT/APInt.h" 66 #include "llvm/ADT/APSInt.h" 67 #include "llvm/ADT/ArrayRef.h" 68 #include "llvm/ADT/DenseMap.h" 69 #include "llvm/ADT/FoldingSet.h" 70 #include "llvm/ADT/None.h" 71 #include "llvm/ADT/Optional.h" 72 #include "llvm/ADT/STLExtras.h" 73 #include "llvm/ADT/SmallBitVector.h" 74 #include "llvm/ADT/SmallPtrSet.h" 75 #include "llvm/ADT/SmallString.h" 76 #include "llvm/ADT/SmallVector.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/ADT/StringSet.h" 79 #include "llvm/ADT/StringSwitch.h" 80 #include "llvm/ADT/Triple.h" 81 #include "llvm/Support/AtomicOrdering.h" 82 #include "llvm/Support/Casting.h" 83 #include "llvm/Support/Compiler.h" 84 #include "llvm/Support/ConvertUTF.h" 85 #include "llvm/Support/ErrorHandling.h" 86 #include "llvm/Support/Format.h" 87 #include "llvm/Support/Locale.h" 88 #include "llvm/Support/MathExtras.h" 89 #include "llvm/Support/SaveAndRestore.h" 90 #include "llvm/Support/raw_ostream.h" 91 #include <algorithm> 92 #include <bitset> 93 #include <cassert> 94 #include <cctype> 95 #include <cstddef> 96 #include <cstdint> 97 #include <functional> 98 #include <limits> 99 #include <string> 100 #include <tuple> 101 #include <utility> 102 103 using namespace clang; 104 using namespace sema; 105 106 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 107 unsigned ByteNo) const { 108 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 109 Context.getTargetInfo()); 110 } 111 112 /// Checks that a call expression's argument count is the desired number. 113 /// This is useful when doing custom type-checking. Returns true on error. 114 static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) { 115 unsigned argCount = call->getNumArgs(); 116 if (argCount == desiredArgCount) return false; 117 118 if (argCount < desiredArgCount) 119 return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args) 120 << 0 /*function call*/ << desiredArgCount << argCount 121 << call->getSourceRange(); 122 123 // Highlight all the excess arguments. 124 SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(), 125 call->getArg(argCount - 1)->getEndLoc()); 126 127 return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args) 128 << 0 /*function call*/ << desiredArgCount << argCount 129 << call->getArg(1)->getSourceRange(); 130 } 131 132 /// Check that the first argument to __builtin_annotation is an integer 133 /// and the second argument is a non-wide string literal. 134 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 135 if (checkArgCount(S, TheCall, 2)) 136 return true; 137 138 // First argument should be an integer. 139 Expr *ValArg = TheCall->getArg(0); 140 QualType Ty = ValArg->getType(); 141 if (!Ty->isIntegerType()) { 142 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 143 << ValArg->getSourceRange(); 144 return true; 145 } 146 147 // Second argument should be a constant string. 148 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 149 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 150 if (!Literal || !Literal->isAscii()) { 151 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 152 << StrArg->getSourceRange(); 153 return true; 154 } 155 156 TheCall->setType(Ty); 157 return false; 158 } 159 160 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 161 // We need at least one argument. 162 if (TheCall->getNumArgs() < 1) { 163 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 164 << 0 << 1 << TheCall->getNumArgs() 165 << TheCall->getCallee()->getSourceRange(); 166 return true; 167 } 168 169 // All arguments should be wide string literals. 170 for (Expr *Arg : TheCall->arguments()) { 171 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 172 if (!Literal || !Literal->isWide()) { 173 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 174 << Arg->getSourceRange(); 175 return true; 176 } 177 } 178 179 return false; 180 } 181 182 /// Check that the argument to __builtin_addressof is a glvalue, and set the 183 /// result type to the corresponding pointer type. 184 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 185 if (checkArgCount(S, TheCall, 1)) 186 return true; 187 188 ExprResult Arg(TheCall->getArg(0)); 189 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 190 if (ResultType.isNull()) 191 return true; 192 193 TheCall->setArg(0, Arg.get()); 194 TheCall->setType(ResultType); 195 return false; 196 } 197 198 /// Check that the argument to __builtin_function_start is a function. 199 static bool SemaBuiltinFunctionStart(Sema &S, CallExpr *TheCall) { 200 if (checkArgCount(S, TheCall, 1)) 201 return true; 202 203 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 204 if (Arg.isInvalid()) 205 return true; 206 207 TheCall->setArg(0, Arg.get()); 208 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>( 209 Arg.get()->getAsBuiltinConstantDeclRef(S.getASTContext())); 210 211 if (!FD) { 212 S.Diag(TheCall->getBeginLoc(), diag::err_function_start_invalid_type) 213 << TheCall->getSourceRange(); 214 return true; 215 } 216 217 return !S.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true, 218 TheCall->getBeginLoc()); 219 } 220 221 /// Check the number of arguments and set the result type to 222 /// the argument type. 223 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) { 224 if (checkArgCount(S, TheCall, 1)) 225 return true; 226 227 TheCall->setType(TheCall->getArg(0)->getType()); 228 return false; 229 } 230 231 /// Check that the value argument for __builtin_is_aligned(value, alignment) and 232 /// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer 233 /// type (but not a function pointer) and that the alignment is a power-of-two. 234 static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) { 235 if (checkArgCount(S, TheCall, 2)) 236 return true; 237 238 clang::Expr *Source = TheCall->getArg(0); 239 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned; 240 241 auto IsValidIntegerType = [](QualType Ty) { 242 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType(); 243 }; 244 QualType SrcTy = Source->getType(); 245 // We should also be able to use it with arrays (but not functions!). 246 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) { 247 SrcTy = S.Context.getDecayedType(SrcTy); 248 } 249 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) || 250 SrcTy->isFunctionPointerType()) { 251 // FIXME: this is not quite the right error message since we don't allow 252 // floating point types, or member pointers. 253 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand) 254 << SrcTy; 255 return true; 256 } 257 258 clang::Expr *AlignOp = TheCall->getArg(1); 259 if (!IsValidIntegerType(AlignOp->getType())) { 260 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int) 261 << AlignOp->getType(); 262 return true; 263 } 264 Expr::EvalResult AlignResult; 265 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1; 266 // We can't check validity of alignment if it is value dependent. 267 if (!AlignOp->isValueDependent() && 268 AlignOp->EvaluateAsInt(AlignResult, S.Context, 269 Expr::SE_AllowSideEffects)) { 270 llvm::APSInt AlignValue = AlignResult.Val.getInt(); 271 llvm::APSInt MaxValue( 272 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits)); 273 if (AlignValue < 1) { 274 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1; 275 return true; 276 } 277 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) { 278 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big) 279 << toString(MaxValue, 10); 280 return true; 281 } 282 if (!AlignValue.isPowerOf2()) { 283 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two); 284 return true; 285 } 286 if (AlignValue == 1) { 287 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless) 288 << IsBooleanAlignBuiltin; 289 } 290 } 291 292 ExprResult SrcArg = S.PerformCopyInitialization( 293 InitializedEntity::InitializeParameter(S.Context, SrcTy, false), 294 SourceLocation(), Source); 295 if (SrcArg.isInvalid()) 296 return true; 297 TheCall->setArg(0, SrcArg.get()); 298 ExprResult AlignArg = 299 S.PerformCopyInitialization(InitializedEntity::InitializeParameter( 300 S.Context, AlignOp->getType(), false), 301 SourceLocation(), AlignOp); 302 if (AlignArg.isInvalid()) 303 return true; 304 TheCall->setArg(1, AlignArg.get()); 305 // For align_up/align_down, the return type is the same as the (potentially 306 // decayed) argument type including qualifiers. For is_aligned(), the result 307 // is always bool. 308 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy); 309 return false; 310 } 311 312 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall, 313 unsigned BuiltinID) { 314 if (checkArgCount(S, TheCall, 3)) 315 return true; 316 317 // First two arguments should be integers. 318 for (unsigned I = 0; I < 2; ++I) { 319 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I)); 320 if (Arg.isInvalid()) return true; 321 TheCall->setArg(I, Arg.get()); 322 323 QualType Ty = Arg.get()->getType(); 324 if (!Ty->isIntegerType()) { 325 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 326 << Ty << Arg.get()->getSourceRange(); 327 return true; 328 } 329 } 330 331 // Third argument should be a pointer to a non-const integer. 332 // IRGen correctly handles volatile, restrict, and address spaces, and 333 // the other qualifiers aren't possible. 334 { 335 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2)); 336 if (Arg.isInvalid()) return true; 337 TheCall->setArg(2, Arg.get()); 338 339 QualType Ty = Arg.get()->getType(); 340 const auto *PtrTy = Ty->getAs<PointerType>(); 341 if (!PtrTy || 342 !PtrTy->getPointeeType()->isIntegerType() || 343 PtrTy->getPointeeType().isConstQualified()) { 344 S.Diag(Arg.get()->getBeginLoc(), 345 diag::err_overflow_builtin_must_be_ptr_int) 346 << Ty << Arg.get()->getSourceRange(); 347 return true; 348 } 349 } 350 351 // Disallow signed bit-precise integer args larger than 128 bits to mul 352 // function until we improve backend support. 353 if (BuiltinID == Builtin::BI__builtin_mul_overflow) { 354 for (unsigned I = 0; I < 3; ++I) { 355 const auto Arg = TheCall->getArg(I); 356 // Third argument will be a pointer. 357 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType(); 358 if (Ty->isBitIntType() && Ty->isSignedIntegerType() && 359 S.getASTContext().getIntWidth(Ty) > 128) 360 return S.Diag(Arg->getBeginLoc(), 361 diag::err_overflow_builtin_bit_int_max_size) 362 << 128; 363 } 364 } 365 366 return false; 367 } 368 369 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 370 if (checkArgCount(S, BuiltinCall, 2)) 371 return true; 372 373 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 374 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 375 Expr *Call = BuiltinCall->getArg(0); 376 Expr *Chain = BuiltinCall->getArg(1); 377 378 if (Call->getStmtClass() != Stmt::CallExprClass) { 379 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 380 << Call->getSourceRange(); 381 return true; 382 } 383 384 auto CE = cast<CallExpr>(Call); 385 if (CE->getCallee()->getType()->isBlockPointerType()) { 386 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 387 << Call->getSourceRange(); 388 return true; 389 } 390 391 const Decl *TargetDecl = CE->getCalleeDecl(); 392 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 393 if (FD->getBuiltinID()) { 394 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 395 << Call->getSourceRange(); 396 return true; 397 } 398 399 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 400 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 401 << Call->getSourceRange(); 402 return true; 403 } 404 405 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 406 if (ChainResult.isInvalid()) 407 return true; 408 if (!ChainResult.get()->getType()->isPointerType()) { 409 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 410 << Chain->getSourceRange(); 411 return true; 412 } 413 414 QualType ReturnTy = CE->getCallReturnType(S.Context); 415 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 416 QualType BuiltinTy = S.Context.getFunctionType( 417 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 418 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 419 420 Builtin = 421 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 422 423 BuiltinCall->setType(CE->getType()); 424 BuiltinCall->setValueKind(CE->getValueKind()); 425 BuiltinCall->setObjectKind(CE->getObjectKind()); 426 BuiltinCall->setCallee(Builtin); 427 BuiltinCall->setArg(1, ChainResult.get()); 428 429 return false; 430 } 431 432 namespace { 433 434 class ScanfDiagnosticFormatHandler 435 : public analyze_format_string::FormatStringHandler { 436 // Accepts the argument index (relative to the first destination index) of the 437 // argument whose size we want. 438 using ComputeSizeFunction = 439 llvm::function_ref<Optional<llvm::APSInt>(unsigned)>; 440 441 // Accepts the argument index (relative to the first destination index), the 442 // destination size, and the source size). 443 using DiagnoseFunction = 444 llvm::function_ref<void(unsigned, unsigned, unsigned)>; 445 446 ComputeSizeFunction ComputeSizeArgument; 447 DiagnoseFunction Diagnose; 448 449 public: 450 ScanfDiagnosticFormatHandler(ComputeSizeFunction ComputeSizeArgument, 451 DiagnoseFunction Diagnose) 452 : ComputeSizeArgument(ComputeSizeArgument), Diagnose(Diagnose) {} 453 454 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 455 const char *StartSpecifier, 456 unsigned specifierLen) override { 457 if (!FS.consumesDataArgument()) 458 return true; 459 460 unsigned NulByte = 0; 461 switch ((FS.getConversionSpecifier().getKind())) { 462 default: 463 return true; 464 case analyze_format_string::ConversionSpecifier::sArg: 465 case analyze_format_string::ConversionSpecifier::ScanListArg: 466 NulByte = 1; 467 break; 468 case analyze_format_string::ConversionSpecifier::cArg: 469 break; 470 } 471 472 analyze_format_string::OptionalAmount FW = FS.getFieldWidth(); 473 if (FW.getHowSpecified() != 474 analyze_format_string::OptionalAmount::HowSpecified::Constant) 475 return true; 476 477 unsigned SourceSize = FW.getConstantAmount() + NulByte; 478 479 Optional<llvm::APSInt> DestSizeAPS = ComputeSizeArgument(FS.getArgIndex()); 480 if (!DestSizeAPS) 481 return true; 482 483 unsigned DestSize = DestSizeAPS->getZExtValue(); 484 485 if (DestSize < SourceSize) 486 Diagnose(FS.getArgIndex(), DestSize, SourceSize); 487 488 return true; 489 } 490 }; 491 492 class EstimateSizeFormatHandler 493 : public analyze_format_string::FormatStringHandler { 494 size_t Size; 495 496 public: 497 EstimateSizeFormatHandler(StringRef Format) 498 : Size(std::min(Format.find(0), Format.size()) + 499 1 /* null byte always written by sprintf */) {} 500 501 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 502 const char *, unsigned SpecifierLen) override { 503 504 const size_t FieldWidth = computeFieldWidth(FS); 505 const size_t Precision = computePrecision(FS); 506 507 // The actual format. 508 switch (FS.getConversionSpecifier().getKind()) { 509 // Just a char. 510 case analyze_format_string::ConversionSpecifier::cArg: 511 case analyze_format_string::ConversionSpecifier::CArg: 512 Size += std::max(FieldWidth, (size_t)1); 513 break; 514 // Just an integer. 515 case analyze_format_string::ConversionSpecifier::dArg: 516 case analyze_format_string::ConversionSpecifier::DArg: 517 case analyze_format_string::ConversionSpecifier::iArg: 518 case analyze_format_string::ConversionSpecifier::oArg: 519 case analyze_format_string::ConversionSpecifier::OArg: 520 case analyze_format_string::ConversionSpecifier::uArg: 521 case analyze_format_string::ConversionSpecifier::UArg: 522 case analyze_format_string::ConversionSpecifier::xArg: 523 case analyze_format_string::ConversionSpecifier::XArg: 524 Size += std::max(FieldWidth, Precision); 525 break; 526 527 // %g style conversion switches between %f or %e style dynamically. 528 // %f always takes less space, so default to it. 529 case analyze_format_string::ConversionSpecifier::gArg: 530 case analyze_format_string::ConversionSpecifier::GArg: 531 532 // Floating point number in the form '[+]ddd.ddd'. 533 case analyze_format_string::ConversionSpecifier::fArg: 534 case analyze_format_string::ConversionSpecifier::FArg: 535 Size += std::max(FieldWidth, 1 /* integer part */ + 536 (Precision ? 1 + Precision 537 : 0) /* period + decimal */); 538 break; 539 540 // Floating point number in the form '[-]d.ddde[+-]dd'. 541 case analyze_format_string::ConversionSpecifier::eArg: 542 case analyze_format_string::ConversionSpecifier::EArg: 543 Size += 544 std::max(FieldWidth, 545 1 /* integer part */ + 546 (Precision ? 1 + Precision : 0) /* period + decimal */ + 547 1 /* e or E letter */ + 2 /* exponent */); 548 break; 549 550 // Floating point number in the form '[-]0xh.hhhhp±dd'. 551 case analyze_format_string::ConversionSpecifier::aArg: 552 case analyze_format_string::ConversionSpecifier::AArg: 553 Size += 554 std::max(FieldWidth, 555 2 /* 0x */ + 1 /* integer part */ + 556 (Precision ? 1 + Precision : 0) /* period + decimal */ + 557 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */); 558 break; 559 560 // Just a string. 561 case analyze_format_string::ConversionSpecifier::sArg: 562 case analyze_format_string::ConversionSpecifier::SArg: 563 Size += FieldWidth; 564 break; 565 566 // Just a pointer in the form '0xddd'. 567 case analyze_format_string::ConversionSpecifier::pArg: 568 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision); 569 break; 570 571 // A plain percent. 572 case analyze_format_string::ConversionSpecifier::PercentArg: 573 Size += 1; 574 break; 575 576 default: 577 break; 578 } 579 580 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix(); 581 582 if (FS.hasAlternativeForm()) { 583 switch (FS.getConversionSpecifier().getKind()) { 584 default: 585 break; 586 // Force a leading '0'. 587 case analyze_format_string::ConversionSpecifier::oArg: 588 Size += 1; 589 break; 590 // Force a leading '0x'. 591 case analyze_format_string::ConversionSpecifier::xArg: 592 case analyze_format_string::ConversionSpecifier::XArg: 593 Size += 2; 594 break; 595 // Force a period '.' before decimal, even if precision is 0. 596 case analyze_format_string::ConversionSpecifier::aArg: 597 case analyze_format_string::ConversionSpecifier::AArg: 598 case analyze_format_string::ConversionSpecifier::eArg: 599 case analyze_format_string::ConversionSpecifier::EArg: 600 case analyze_format_string::ConversionSpecifier::fArg: 601 case analyze_format_string::ConversionSpecifier::FArg: 602 case analyze_format_string::ConversionSpecifier::gArg: 603 case analyze_format_string::ConversionSpecifier::GArg: 604 Size += (Precision ? 0 : 1); 605 break; 606 } 607 } 608 assert(SpecifierLen <= Size && "no underflow"); 609 Size -= SpecifierLen; 610 return true; 611 } 612 613 size_t getSizeLowerBound() const { return Size; } 614 615 private: 616 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) { 617 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth(); 618 size_t FieldWidth = 0; 619 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant) 620 FieldWidth = FW.getConstantAmount(); 621 return FieldWidth; 622 } 623 624 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) { 625 const analyze_format_string::OptionalAmount &FW = FS.getPrecision(); 626 size_t Precision = 0; 627 628 // See man 3 printf for default precision value based on the specifier. 629 switch (FW.getHowSpecified()) { 630 case analyze_format_string::OptionalAmount::NotSpecified: 631 switch (FS.getConversionSpecifier().getKind()) { 632 default: 633 break; 634 case analyze_format_string::ConversionSpecifier::dArg: // %d 635 case analyze_format_string::ConversionSpecifier::DArg: // %D 636 case analyze_format_string::ConversionSpecifier::iArg: // %i 637 Precision = 1; 638 break; 639 case analyze_format_string::ConversionSpecifier::oArg: // %d 640 case analyze_format_string::ConversionSpecifier::OArg: // %D 641 case analyze_format_string::ConversionSpecifier::uArg: // %d 642 case analyze_format_string::ConversionSpecifier::UArg: // %D 643 case analyze_format_string::ConversionSpecifier::xArg: // %d 644 case analyze_format_string::ConversionSpecifier::XArg: // %D 645 Precision = 1; 646 break; 647 case analyze_format_string::ConversionSpecifier::fArg: // %f 648 case analyze_format_string::ConversionSpecifier::FArg: // %F 649 case analyze_format_string::ConversionSpecifier::eArg: // %e 650 case analyze_format_string::ConversionSpecifier::EArg: // %E 651 case analyze_format_string::ConversionSpecifier::gArg: // %g 652 case analyze_format_string::ConversionSpecifier::GArg: // %G 653 Precision = 6; 654 break; 655 case analyze_format_string::ConversionSpecifier::pArg: // %d 656 Precision = 1; 657 break; 658 } 659 break; 660 case analyze_format_string::OptionalAmount::Constant: 661 Precision = FW.getConstantAmount(); 662 break; 663 default: 664 break; 665 } 666 return Precision; 667 } 668 }; 669 670 } // namespace 671 672 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, 673 CallExpr *TheCall) { 674 if (TheCall->isValueDependent() || TheCall->isTypeDependent() || 675 isConstantEvaluated()) 676 return; 677 678 bool UseDABAttr = false; 679 const FunctionDecl *UseDecl = FD; 680 681 const auto *DABAttr = FD->getAttr<DiagnoseAsBuiltinAttr>(); 682 if (DABAttr) { 683 UseDecl = DABAttr->getFunction(); 684 assert(UseDecl && "Missing FunctionDecl in DiagnoseAsBuiltin attribute!"); 685 UseDABAttr = true; 686 } 687 688 unsigned BuiltinID = UseDecl->getBuiltinID(/*ConsiderWrappers=*/true); 689 690 if (!BuiltinID) 691 return; 692 693 const TargetInfo &TI = getASTContext().getTargetInfo(); 694 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType()); 695 696 auto TranslateIndex = [&](unsigned Index) -> Optional<unsigned> { 697 // If we refer to a diagnose_as_builtin attribute, we need to change the 698 // argument index to refer to the arguments of the called function. Unless 699 // the index is out of bounds, which presumably means it's a variadic 700 // function. 701 if (!UseDABAttr) 702 return Index; 703 unsigned DABIndices = DABAttr->argIndices_size(); 704 unsigned NewIndex = Index < DABIndices 705 ? DABAttr->argIndices_begin()[Index] 706 : Index - DABIndices + FD->getNumParams(); 707 if (NewIndex >= TheCall->getNumArgs()) 708 return llvm::None; 709 return NewIndex; 710 }; 711 712 auto ComputeExplicitObjectSizeArgument = 713 [&](unsigned Index) -> Optional<llvm::APSInt> { 714 Optional<unsigned> IndexOptional = TranslateIndex(Index); 715 if (!IndexOptional) 716 return llvm::None; 717 unsigned NewIndex = IndexOptional.getValue(); 718 Expr::EvalResult Result; 719 Expr *SizeArg = TheCall->getArg(NewIndex); 720 if (!SizeArg->EvaluateAsInt(Result, getASTContext())) 721 return llvm::None; 722 llvm::APSInt Integer = Result.Val.getInt(); 723 Integer.setIsUnsigned(true); 724 return Integer; 725 }; 726 727 auto ComputeSizeArgument = [&](unsigned Index) -> Optional<llvm::APSInt> { 728 // If the parameter has a pass_object_size attribute, then we should use its 729 // (potentially) more strict checking mode. Otherwise, conservatively assume 730 // type 0. 731 int BOSType = 0; 732 // This check can fail for variadic functions. 733 if (Index < FD->getNumParams()) { 734 if (const auto *POS = 735 FD->getParamDecl(Index)->getAttr<PassObjectSizeAttr>()) 736 BOSType = POS->getType(); 737 } 738 739 Optional<unsigned> IndexOptional = TranslateIndex(Index); 740 if (!IndexOptional) 741 return llvm::None; 742 unsigned NewIndex = IndexOptional.getValue(); 743 744 const Expr *ObjArg = TheCall->getArg(NewIndex); 745 uint64_t Result; 746 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType)) 747 return llvm::None; 748 749 // Get the object size in the target's size_t width. 750 return llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth); 751 }; 752 753 auto ComputeStrLenArgument = [&](unsigned Index) -> Optional<llvm::APSInt> { 754 Optional<unsigned> IndexOptional = TranslateIndex(Index); 755 if (!IndexOptional) 756 return llvm::None; 757 unsigned NewIndex = IndexOptional.getValue(); 758 759 const Expr *ObjArg = TheCall->getArg(NewIndex); 760 uint64_t Result; 761 if (!ObjArg->tryEvaluateStrLen(Result, getASTContext())) 762 return llvm::None; 763 // Add 1 for null byte. 764 return llvm::APSInt::getUnsigned(Result + 1).extOrTrunc(SizeTypeWidth); 765 }; 766 767 Optional<llvm::APSInt> SourceSize; 768 Optional<llvm::APSInt> DestinationSize; 769 unsigned DiagID = 0; 770 bool IsChkVariant = false; 771 772 auto GetFunctionName = [&]() { 773 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID); 774 // Skim off the details of whichever builtin was called to produce a better 775 // diagnostic, as it's unlikely that the user wrote the __builtin 776 // explicitly. 777 if (IsChkVariant) { 778 FunctionName = FunctionName.drop_front(std::strlen("__builtin___")); 779 FunctionName = FunctionName.drop_back(std::strlen("_chk")); 780 } else if (FunctionName.startswith("__builtin_")) { 781 FunctionName = FunctionName.drop_front(std::strlen("__builtin_")); 782 } 783 return FunctionName; 784 }; 785 786 switch (BuiltinID) { 787 default: 788 return; 789 case Builtin::BI__builtin_strcpy: 790 case Builtin::BIstrcpy: { 791 DiagID = diag::warn_fortify_strlen_overflow; 792 SourceSize = ComputeStrLenArgument(1); 793 DestinationSize = ComputeSizeArgument(0); 794 break; 795 } 796 797 case Builtin::BI__builtin___strcpy_chk: { 798 DiagID = diag::warn_fortify_strlen_overflow; 799 SourceSize = ComputeStrLenArgument(1); 800 DestinationSize = ComputeExplicitObjectSizeArgument(2); 801 IsChkVariant = true; 802 break; 803 } 804 805 case Builtin::BIscanf: 806 case Builtin::BIfscanf: 807 case Builtin::BIsscanf: { 808 unsigned FormatIndex = 1; 809 unsigned DataIndex = 2; 810 if (BuiltinID == Builtin::BIscanf) { 811 FormatIndex = 0; 812 DataIndex = 1; 813 } 814 815 const auto *FormatExpr = 816 TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 817 818 const auto *Format = dyn_cast<StringLiteral>(FormatExpr); 819 if (!Format) 820 return; 821 822 if (!Format->isAscii() && !Format->isUTF8()) 823 return; 824 825 auto Diagnose = [&](unsigned ArgIndex, unsigned DestSize, 826 unsigned SourceSize) { 827 DiagID = diag::warn_fortify_scanf_overflow; 828 unsigned Index = ArgIndex + DataIndex; 829 StringRef FunctionName = GetFunctionName(); 830 DiagRuntimeBehavior(TheCall->getArg(Index)->getBeginLoc(), TheCall, 831 PDiag(DiagID) << FunctionName << (Index + 1) 832 << DestSize << SourceSize); 833 }; 834 835 StringRef FormatStrRef = Format->getString(); 836 auto ShiftedComputeSizeArgument = [&](unsigned Index) { 837 return ComputeSizeArgument(Index + DataIndex); 838 }; 839 ScanfDiagnosticFormatHandler H(ShiftedComputeSizeArgument, Diagnose); 840 const char *FormatBytes = FormatStrRef.data(); 841 const ConstantArrayType *T = 842 Context.getAsConstantArrayType(Format->getType()); 843 assert(T && "String literal not of constant array type!"); 844 size_t TypeSize = T->getSize().getZExtValue(); 845 846 // In case there's a null byte somewhere. 847 size_t StrLen = 848 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 849 850 analyze_format_string::ParseScanfString(H, FormatBytes, 851 FormatBytes + StrLen, getLangOpts(), 852 Context.getTargetInfo()); 853 854 // Unlike the other cases, in this one we have already issued the diagnostic 855 // here, so no need to continue (because unlike the other cases, here the 856 // diagnostic refers to the argument number). 857 return; 858 } 859 860 case Builtin::BIsprintf: 861 case Builtin::BI__builtin___sprintf_chk: { 862 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3; 863 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 864 865 if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) { 866 867 if (!Format->isAscii() && !Format->isUTF8()) 868 return; 869 870 StringRef FormatStrRef = Format->getString(); 871 EstimateSizeFormatHandler H(FormatStrRef); 872 const char *FormatBytes = FormatStrRef.data(); 873 const ConstantArrayType *T = 874 Context.getAsConstantArrayType(Format->getType()); 875 assert(T && "String literal not of constant array type!"); 876 size_t TypeSize = T->getSize().getZExtValue(); 877 878 // In case there's a null byte somewhere. 879 size_t StrLen = 880 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 881 if (!analyze_format_string::ParsePrintfString( 882 H, FormatBytes, FormatBytes + StrLen, getLangOpts(), 883 Context.getTargetInfo(), false)) { 884 DiagID = diag::warn_fortify_source_format_overflow; 885 SourceSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound()) 886 .extOrTrunc(SizeTypeWidth); 887 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) { 888 DestinationSize = ComputeExplicitObjectSizeArgument(2); 889 IsChkVariant = true; 890 } else { 891 DestinationSize = ComputeSizeArgument(0); 892 } 893 break; 894 } 895 } 896 return; 897 } 898 case Builtin::BI__builtin___memcpy_chk: 899 case Builtin::BI__builtin___memmove_chk: 900 case Builtin::BI__builtin___memset_chk: 901 case Builtin::BI__builtin___strlcat_chk: 902 case Builtin::BI__builtin___strlcpy_chk: 903 case Builtin::BI__builtin___strncat_chk: 904 case Builtin::BI__builtin___strncpy_chk: 905 case Builtin::BI__builtin___stpncpy_chk: 906 case Builtin::BI__builtin___memccpy_chk: 907 case Builtin::BI__builtin___mempcpy_chk: { 908 DiagID = diag::warn_builtin_chk_overflow; 909 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 2); 910 DestinationSize = 911 ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 912 IsChkVariant = true; 913 break; 914 } 915 916 case Builtin::BI__builtin___snprintf_chk: 917 case Builtin::BI__builtin___vsnprintf_chk: { 918 DiagID = diag::warn_builtin_chk_overflow; 919 SourceSize = ComputeExplicitObjectSizeArgument(1); 920 DestinationSize = ComputeExplicitObjectSizeArgument(3); 921 IsChkVariant = true; 922 break; 923 } 924 925 case Builtin::BIstrncat: 926 case Builtin::BI__builtin_strncat: 927 case Builtin::BIstrncpy: 928 case Builtin::BI__builtin_strncpy: 929 case Builtin::BIstpncpy: 930 case Builtin::BI__builtin_stpncpy: { 931 // Whether these functions overflow depends on the runtime strlen of the 932 // string, not just the buffer size, so emitting the "always overflow" 933 // diagnostic isn't quite right. We should still diagnose passing a buffer 934 // size larger than the destination buffer though; this is a runtime abort 935 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise. 936 DiagID = diag::warn_fortify_source_size_mismatch; 937 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 938 DestinationSize = ComputeSizeArgument(0); 939 break; 940 } 941 942 case Builtin::BImemcpy: 943 case Builtin::BI__builtin_memcpy: 944 case Builtin::BImemmove: 945 case Builtin::BI__builtin_memmove: 946 case Builtin::BImemset: 947 case Builtin::BI__builtin_memset: 948 case Builtin::BImempcpy: 949 case Builtin::BI__builtin_mempcpy: { 950 DiagID = diag::warn_fortify_source_overflow; 951 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 952 DestinationSize = ComputeSizeArgument(0); 953 break; 954 } 955 case Builtin::BIsnprintf: 956 case Builtin::BI__builtin_snprintf: 957 case Builtin::BIvsnprintf: 958 case Builtin::BI__builtin_vsnprintf: { 959 DiagID = diag::warn_fortify_source_size_mismatch; 960 SourceSize = ComputeExplicitObjectSizeArgument(1); 961 DestinationSize = ComputeSizeArgument(0); 962 break; 963 } 964 } 965 966 if (!SourceSize || !DestinationSize || 967 llvm::APSInt::compareValues(SourceSize.getValue(), 968 DestinationSize.getValue()) <= 0) 969 return; 970 971 StringRef FunctionName = GetFunctionName(); 972 973 SmallString<16> DestinationStr; 974 SmallString<16> SourceStr; 975 DestinationSize->toString(DestinationStr, /*Radix=*/10); 976 SourceSize->toString(SourceStr, /*Radix=*/10); 977 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 978 PDiag(DiagID) 979 << FunctionName << DestinationStr << SourceStr); 980 } 981 982 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 983 Scope::ScopeFlags NeededScopeFlags, 984 unsigned DiagID) { 985 // Scopes aren't available during instantiation. Fortunately, builtin 986 // functions cannot be template args so they cannot be formed through template 987 // instantiation. Therefore checking once during the parse is sufficient. 988 if (SemaRef.inTemplateInstantiation()) 989 return false; 990 991 Scope *S = SemaRef.getCurScope(); 992 while (S && !S->isSEHExceptScope()) 993 S = S->getParent(); 994 if (!S || !(S->getFlags() & NeededScopeFlags)) { 995 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 996 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 997 << DRE->getDecl()->getIdentifier(); 998 return true; 999 } 1000 1001 return false; 1002 } 1003 1004 static inline bool isBlockPointer(Expr *Arg) { 1005 return Arg->getType()->isBlockPointerType(); 1006 } 1007 1008 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 1009 /// void*, which is a requirement of device side enqueue. 1010 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 1011 const BlockPointerType *BPT = 1012 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1013 ArrayRef<QualType> Params = 1014 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes(); 1015 unsigned ArgCounter = 0; 1016 bool IllegalParams = false; 1017 // Iterate through the block parameters until either one is found that is not 1018 // a local void*, or the block is valid. 1019 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 1020 I != E; ++I, ++ArgCounter) { 1021 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 1022 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 1023 LangAS::opencl_local) { 1024 // Get the location of the error. If a block literal has been passed 1025 // (BlockExpr) then we can point straight to the offending argument, 1026 // else we just point to the variable reference. 1027 SourceLocation ErrorLoc; 1028 if (isa<BlockExpr>(BlockArg)) { 1029 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 1030 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 1031 } else if (isa<DeclRefExpr>(BlockArg)) { 1032 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 1033 } 1034 S.Diag(ErrorLoc, 1035 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 1036 IllegalParams = true; 1037 } 1038 } 1039 1040 return IllegalParams; 1041 } 1042 1043 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 1044 if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts())) { 1045 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 1046 << 1 << Call->getDirectCallee() << "cl_khr_subgroups"; 1047 return true; 1048 } 1049 return false; 1050 } 1051 1052 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 1053 if (checkArgCount(S, TheCall, 2)) 1054 return true; 1055 1056 if (checkOpenCLSubgroupExt(S, TheCall)) 1057 return true; 1058 1059 // First argument is an ndrange_t type. 1060 Expr *NDRangeArg = TheCall->getArg(0); 1061 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1062 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1063 << TheCall->getDirectCallee() << "'ndrange_t'"; 1064 return true; 1065 } 1066 1067 Expr *BlockArg = TheCall->getArg(1); 1068 if (!isBlockPointer(BlockArg)) { 1069 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1070 << TheCall->getDirectCallee() << "block"; 1071 return true; 1072 } 1073 return checkOpenCLBlockArgs(S, BlockArg); 1074 } 1075 1076 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 1077 /// get_kernel_work_group_size 1078 /// and get_kernel_preferred_work_group_size_multiple builtin functions. 1079 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 1080 if (checkArgCount(S, TheCall, 1)) 1081 return true; 1082 1083 Expr *BlockArg = TheCall->getArg(0); 1084 if (!isBlockPointer(BlockArg)) { 1085 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1086 << TheCall->getDirectCallee() << "block"; 1087 return true; 1088 } 1089 return checkOpenCLBlockArgs(S, BlockArg); 1090 } 1091 1092 /// Diagnose integer type and any valid implicit conversion to it. 1093 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 1094 const QualType &IntType); 1095 1096 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 1097 unsigned Start, unsigned End) { 1098 bool IllegalParams = false; 1099 for (unsigned I = Start; I <= End; ++I) 1100 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 1101 S.Context.getSizeType()); 1102 return IllegalParams; 1103 } 1104 1105 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 1106 /// 'local void*' parameter of passed block. 1107 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 1108 Expr *BlockArg, 1109 unsigned NumNonVarArgs) { 1110 const BlockPointerType *BPT = 1111 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1112 unsigned NumBlockParams = 1113 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams(); 1114 unsigned TotalNumArgs = TheCall->getNumArgs(); 1115 1116 // For each argument passed to the block, a corresponding uint needs to 1117 // be passed to describe the size of the local memory. 1118 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 1119 S.Diag(TheCall->getBeginLoc(), 1120 diag::err_opencl_enqueue_kernel_local_size_args); 1121 return true; 1122 } 1123 1124 // Check that the sizes of the local memory are specified by integers. 1125 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 1126 TotalNumArgs - 1); 1127 } 1128 1129 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 1130 /// overload formats specified in Table 6.13.17.1. 1131 /// int enqueue_kernel(queue_t queue, 1132 /// kernel_enqueue_flags_t flags, 1133 /// const ndrange_t ndrange, 1134 /// void (^block)(void)) 1135 /// int enqueue_kernel(queue_t queue, 1136 /// kernel_enqueue_flags_t flags, 1137 /// const ndrange_t ndrange, 1138 /// uint num_events_in_wait_list, 1139 /// clk_event_t *event_wait_list, 1140 /// clk_event_t *event_ret, 1141 /// void (^block)(void)) 1142 /// int enqueue_kernel(queue_t queue, 1143 /// kernel_enqueue_flags_t flags, 1144 /// const ndrange_t ndrange, 1145 /// void (^block)(local void*, ...), 1146 /// uint size0, ...) 1147 /// int enqueue_kernel(queue_t queue, 1148 /// kernel_enqueue_flags_t flags, 1149 /// const ndrange_t ndrange, 1150 /// uint num_events_in_wait_list, 1151 /// clk_event_t *event_wait_list, 1152 /// clk_event_t *event_ret, 1153 /// void (^block)(local void*, ...), 1154 /// uint size0, ...) 1155 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 1156 unsigned NumArgs = TheCall->getNumArgs(); 1157 1158 if (NumArgs < 4) { 1159 S.Diag(TheCall->getBeginLoc(), 1160 diag::err_typecheck_call_too_few_args_at_least) 1161 << 0 << 4 << NumArgs; 1162 return true; 1163 } 1164 1165 Expr *Arg0 = TheCall->getArg(0); 1166 Expr *Arg1 = TheCall->getArg(1); 1167 Expr *Arg2 = TheCall->getArg(2); 1168 Expr *Arg3 = TheCall->getArg(3); 1169 1170 // First argument always needs to be a queue_t type. 1171 if (!Arg0->getType()->isQueueT()) { 1172 S.Diag(TheCall->getArg(0)->getBeginLoc(), 1173 diag::err_opencl_builtin_expected_type) 1174 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 1175 return true; 1176 } 1177 1178 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 1179 if (!Arg1->getType()->isIntegerType()) { 1180 S.Diag(TheCall->getArg(1)->getBeginLoc(), 1181 diag::err_opencl_builtin_expected_type) 1182 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 1183 return true; 1184 } 1185 1186 // Third argument is always an ndrange_t type. 1187 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1188 S.Diag(TheCall->getArg(2)->getBeginLoc(), 1189 diag::err_opencl_builtin_expected_type) 1190 << TheCall->getDirectCallee() << "'ndrange_t'"; 1191 return true; 1192 } 1193 1194 // With four arguments, there is only one form that the function could be 1195 // called in: no events and no variable arguments. 1196 if (NumArgs == 4) { 1197 // check that the last argument is the right block type. 1198 if (!isBlockPointer(Arg3)) { 1199 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1200 << TheCall->getDirectCallee() << "block"; 1201 return true; 1202 } 1203 // we have a block type, check the prototype 1204 const BlockPointerType *BPT = 1205 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 1206 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) { 1207 S.Diag(Arg3->getBeginLoc(), 1208 diag::err_opencl_enqueue_kernel_blocks_no_args); 1209 return true; 1210 } 1211 return false; 1212 } 1213 // we can have block + varargs. 1214 if (isBlockPointer(Arg3)) 1215 return (checkOpenCLBlockArgs(S, Arg3) || 1216 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 1217 // last two cases with either exactly 7 args or 7 args and varargs. 1218 if (NumArgs >= 7) { 1219 // check common block argument. 1220 Expr *Arg6 = TheCall->getArg(6); 1221 if (!isBlockPointer(Arg6)) { 1222 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1223 << TheCall->getDirectCallee() << "block"; 1224 return true; 1225 } 1226 if (checkOpenCLBlockArgs(S, Arg6)) 1227 return true; 1228 1229 // Forth argument has to be any integer type. 1230 if (!Arg3->getType()->isIntegerType()) { 1231 S.Diag(TheCall->getArg(3)->getBeginLoc(), 1232 diag::err_opencl_builtin_expected_type) 1233 << TheCall->getDirectCallee() << "integer"; 1234 return true; 1235 } 1236 // check remaining common arguments. 1237 Expr *Arg4 = TheCall->getArg(4); 1238 Expr *Arg5 = TheCall->getArg(5); 1239 1240 // Fifth argument is always passed as a pointer to clk_event_t. 1241 if (!Arg4->isNullPointerConstant(S.Context, 1242 Expr::NPC_ValueDependentIsNotNull) && 1243 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 1244 S.Diag(TheCall->getArg(4)->getBeginLoc(), 1245 diag::err_opencl_builtin_expected_type) 1246 << TheCall->getDirectCallee() 1247 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1248 return true; 1249 } 1250 1251 // Sixth argument is always passed as a pointer to clk_event_t. 1252 if (!Arg5->isNullPointerConstant(S.Context, 1253 Expr::NPC_ValueDependentIsNotNull) && 1254 !(Arg5->getType()->isPointerType() && 1255 Arg5->getType()->getPointeeType()->isClkEventT())) { 1256 S.Diag(TheCall->getArg(5)->getBeginLoc(), 1257 diag::err_opencl_builtin_expected_type) 1258 << TheCall->getDirectCallee() 1259 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1260 return true; 1261 } 1262 1263 if (NumArgs == 7) 1264 return false; 1265 1266 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 1267 } 1268 1269 // None of the specific case has been detected, give generic error 1270 S.Diag(TheCall->getBeginLoc(), 1271 diag::err_opencl_enqueue_kernel_incorrect_args); 1272 return true; 1273 } 1274 1275 /// Returns OpenCL access qual. 1276 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 1277 return D->getAttr<OpenCLAccessAttr>(); 1278 } 1279 1280 /// Returns true if pipe element type is different from the pointer. 1281 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 1282 const Expr *Arg0 = Call->getArg(0); 1283 // First argument type should always be pipe. 1284 if (!Arg0->getType()->isPipeType()) { 1285 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1286 << Call->getDirectCallee() << Arg0->getSourceRange(); 1287 return true; 1288 } 1289 OpenCLAccessAttr *AccessQual = 1290 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 1291 // Validates the access qualifier is compatible with the call. 1292 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 1293 // read_only and write_only, and assumed to be read_only if no qualifier is 1294 // specified. 1295 switch (Call->getDirectCallee()->getBuiltinID()) { 1296 case Builtin::BIread_pipe: 1297 case Builtin::BIreserve_read_pipe: 1298 case Builtin::BIcommit_read_pipe: 1299 case Builtin::BIwork_group_reserve_read_pipe: 1300 case Builtin::BIsub_group_reserve_read_pipe: 1301 case Builtin::BIwork_group_commit_read_pipe: 1302 case Builtin::BIsub_group_commit_read_pipe: 1303 if (!(!AccessQual || AccessQual->isReadOnly())) { 1304 S.Diag(Arg0->getBeginLoc(), 1305 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1306 << "read_only" << Arg0->getSourceRange(); 1307 return true; 1308 } 1309 break; 1310 case Builtin::BIwrite_pipe: 1311 case Builtin::BIreserve_write_pipe: 1312 case Builtin::BIcommit_write_pipe: 1313 case Builtin::BIwork_group_reserve_write_pipe: 1314 case Builtin::BIsub_group_reserve_write_pipe: 1315 case Builtin::BIwork_group_commit_write_pipe: 1316 case Builtin::BIsub_group_commit_write_pipe: 1317 if (!(AccessQual && AccessQual->isWriteOnly())) { 1318 S.Diag(Arg0->getBeginLoc(), 1319 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1320 << "write_only" << Arg0->getSourceRange(); 1321 return true; 1322 } 1323 break; 1324 default: 1325 break; 1326 } 1327 return false; 1328 } 1329 1330 /// Returns true if pipe element type is different from the pointer. 1331 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 1332 const Expr *Arg0 = Call->getArg(0); 1333 const Expr *ArgIdx = Call->getArg(Idx); 1334 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 1335 const QualType EltTy = PipeTy->getElementType(); 1336 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 1337 // The Idx argument should be a pointer and the type of the pointer and 1338 // the type of pipe element should also be the same. 1339 if (!ArgTy || 1340 !S.Context.hasSameType( 1341 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 1342 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1343 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 1344 << ArgIdx->getType() << ArgIdx->getSourceRange(); 1345 return true; 1346 } 1347 return false; 1348 } 1349 1350 // Performs semantic analysis for the read/write_pipe call. 1351 // \param S Reference to the semantic analyzer. 1352 // \param Call A pointer to the builtin call. 1353 // \return True if a semantic error has been found, false otherwise. 1354 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 1355 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 1356 // functions have two forms. 1357 switch (Call->getNumArgs()) { 1358 case 2: 1359 if (checkOpenCLPipeArg(S, Call)) 1360 return true; 1361 // The call with 2 arguments should be 1362 // read/write_pipe(pipe T, T*). 1363 // Check packet type T. 1364 if (checkOpenCLPipePacketType(S, Call, 1)) 1365 return true; 1366 break; 1367 1368 case 4: { 1369 if (checkOpenCLPipeArg(S, Call)) 1370 return true; 1371 // The call with 4 arguments should be 1372 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 1373 // Check reserve_id_t. 1374 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1375 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1376 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1377 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1378 return true; 1379 } 1380 1381 // Check the index. 1382 const Expr *Arg2 = Call->getArg(2); 1383 if (!Arg2->getType()->isIntegerType() && 1384 !Arg2->getType()->isUnsignedIntegerType()) { 1385 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1386 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1387 << Arg2->getType() << Arg2->getSourceRange(); 1388 return true; 1389 } 1390 1391 // Check packet type T. 1392 if (checkOpenCLPipePacketType(S, Call, 3)) 1393 return true; 1394 } break; 1395 default: 1396 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 1397 << Call->getDirectCallee() << Call->getSourceRange(); 1398 return true; 1399 } 1400 1401 return false; 1402 } 1403 1404 // Performs a semantic analysis on the {work_group_/sub_group_ 1405 // /_}reserve_{read/write}_pipe 1406 // \param S Reference to the semantic analyzer. 1407 // \param Call The call to the builtin function to be analyzed. 1408 // \return True if a semantic error was found, false otherwise. 1409 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 1410 if (checkArgCount(S, Call, 2)) 1411 return true; 1412 1413 if (checkOpenCLPipeArg(S, Call)) 1414 return true; 1415 1416 // Check the reserve size. 1417 if (!Call->getArg(1)->getType()->isIntegerType() && 1418 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 1419 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1420 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1421 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1422 return true; 1423 } 1424 1425 // Since return type of reserve_read/write_pipe built-in function is 1426 // reserve_id_t, which is not defined in the builtin def file , we used int 1427 // as return type and need to override the return type of these functions. 1428 Call->setType(S.Context.OCLReserveIDTy); 1429 1430 return false; 1431 } 1432 1433 // Performs a semantic analysis on {work_group_/sub_group_ 1434 // /_}commit_{read/write}_pipe 1435 // \param S Reference to the semantic analyzer. 1436 // \param Call The call to the builtin function to be analyzed. 1437 // \return True if a semantic error was found, false otherwise. 1438 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 1439 if (checkArgCount(S, Call, 2)) 1440 return true; 1441 1442 if (checkOpenCLPipeArg(S, Call)) 1443 return true; 1444 1445 // Check reserve_id_t. 1446 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1447 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1448 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1449 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1450 return true; 1451 } 1452 1453 return false; 1454 } 1455 1456 // Performs a semantic analysis on the call to built-in Pipe 1457 // Query Functions. 1458 // \param S Reference to the semantic analyzer. 1459 // \param Call The call to the builtin function to be analyzed. 1460 // \return True if a semantic error was found, false otherwise. 1461 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 1462 if (checkArgCount(S, Call, 1)) 1463 return true; 1464 1465 if (!Call->getArg(0)->getType()->isPipeType()) { 1466 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1467 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 1468 return true; 1469 } 1470 1471 return false; 1472 } 1473 1474 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. 1475 // Performs semantic analysis for the to_global/local/private call. 1476 // \param S Reference to the semantic analyzer. 1477 // \param BuiltinID ID of the builtin function. 1478 // \param Call A pointer to the builtin call. 1479 // \return True if a semantic error has been found, false otherwise. 1480 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 1481 CallExpr *Call) { 1482 if (checkArgCount(S, Call, 1)) 1483 return true; 1484 1485 auto RT = Call->getArg(0)->getType(); 1486 if (!RT->isPointerType() || RT->getPointeeType() 1487 .getAddressSpace() == LangAS::opencl_constant) { 1488 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 1489 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 1490 return true; 1491 } 1492 1493 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 1494 S.Diag(Call->getArg(0)->getBeginLoc(), 1495 diag::warn_opencl_generic_address_space_arg) 1496 << Call->getDirectCallee()->getNameInfo().getAsString() 1497 << Call->getArg(0)->getSourceRange(); 1498 } 1499 1500 RT = RT->getPointeeType(); 1501 auto Qual = RT.getQualifiers(); 1502 switch (BuiltinID) { 1503 case Builtin::BIto_global: 1504 Qual.setAddressSpace(LangAS::opencl_global); 1505 break; 1506 case Builtin::BIto_local: 1507 Qual.setAddressSpace(LangAS::opencl_local); 1508 break; 1509 case Builtin::BIto_private: 1510 Qual.setAddressSpace(LangAS::opencl_private); 1511 break; 1512 default: 1513 llvm_unreachable("Invalid builtin function"); 1514 } 1515 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 1516 RT.getUnqualifiedType(), Qual))); 1517 1518 return false; 1519 } 1520 1521 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { 1522 if (checkArgCount(S, TheCall, 1)) 1523 return ExprError(); 1524 1525 // Compute __builtin_launder's parameter type from the argument. 1526 // The parameter type is: 1527 // * The type of the argument if it's not an array or function type, 1528 // Otherwise, 1529 // * The decayed argument type. 1530 QualType ParamTy = [&]() { 1531 QualType ArgTy = TheCall->getArg(0)->getType(); 1532 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe()) 1533 return S.Context.getPointerType(Ty->getElementType()); 1534 if (ArgTy->isFunctionType()) { 1535 return S.Context.getPointerType(ArgTy); 1536 } 1537 return ArgTy; 1538 }(); 1539 1540 TheCall->setType(ParamTy); 1541 1542 auto DiagSelect = [&]() -> llvm::Optional<unsigned> { 1543 if (!ParamTy->isPointerType()) 1544 return 0; 1545 if (ParamTy->isFunctionPointerType()) 1546 return 1; 1547 if (ParamTy->isVoidPointerType()) 1548 return 2; 1549 return llvm::Optional<unsigned>{}; 1550 }(); 1551 if (DiagSelect.hasValue()) { 1552 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) 1553 << DiagSelect.getValue() << TheCall->getSourceRange(); 1554 return ExprError(); 1555 } 1556 1557 // We either have an incomplete class type, or we have a class template 1558 // whose instantiation has not been forced. Example: 1559 // 1560 // template <class T> struct Foo { T value; }; 1561 // Foo<int> *p = nullptr; 1562 // auto *d = __builtin_launder(p); 1563 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(), 1564 diag::err_incomplete_type)) 1565 return ExprError(); 1566 1567 assert(ParamTy->getPointeeType()->isObjectType() && 1568 "Unhandled non-object pointer case"); 1569 1570 InitializedEntity Entity = 1571 InitializedEntity::InitializeParameter(S.Context, ParamTy, false); 1572 ExprResult Arg = 1573 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0)); 1574 if (Arg.isInvalid()) 1575 return ExprError(); 1576 TheCall->setArg(0, Arg.get()); 1577 1578 return TheCall; 1579 } 1580 1581 // Emit an error and return true if the current object format type is in the 1582 // list of unsupported types. 1583 static bool CheckBuiltinTargetNotInUnsupported( 1584 Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1585 ArrayRef<llvm::Triple::ObjectFormatType> UnsupportedObjectFormatTypes) { 1586 llvm::Triple::ObjectFormatType CurObjFormat = 1587 S.getASTContext().getTargetInfo().getTriple().getObjectFormat(); 1588 if (llvm::is_contained(UnsupportedObjectFormatTypes, CurObjFormat)) { 1589 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1590 << TheCall->getSourceRange(); 1591 return true; 1592 } 1593 return false; 1594 } 1595 1596 // Emit an error and return true if the current architecture is not in the list 1597 // of supported architectures. 1598 static bool 1599 CheckBuiltinTargetInSupported(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1600 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 1601 llvm::Triple::ArchType CurArch = 1602 S.getASTContext().getTargetInfo().getTriple().getArch(); 1603 if (llvm::is_contained(SupportedArchs, CurArch)) 1604 return false; 1605 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1606 << TheCall->getSourceRange(); 1607 return true; 1608 } 1609 1610 static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr, 1611 SourceLocation CallSiteLoc); 1612 1613 bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 1614 CallExpr *TheCall) { 1615 switch (TI.getTriple().getArch()) { 1616 default: 1617 // Some builtins don't require additional checking, so just consider these 1618 // acceptable. 1619 return false; 1620 case llvm::Triple::arm: 1621 case llvm::Triple::armeb: 1622 case llvm::Triple::thumb: 1623 case llvm::Triple::thumbeb: 1624 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall); 1625 case llvm::Triple::aarch64: 1626 case llvm::Triple::aarch64_32: 1627 case llvm::Triple::aarch64_be: 1628 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall); 1629 case llvm::Triple::bpfeb: 1630 case llvm::Triple::bpfel: 1631 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall); 1632 case llvm::Triple::hexagon: 1633 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall); 1634 case llvm::Triple::mips: 1635 case llvm::Triple::mipsel: 1636 case llvm::Triple::mips64: 1637 case llvm::Triple::mips64el: 1638 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall); 1639 case llvm::Triple::systemz: 1640 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall); 1641 case llvm::Triple::x86: 1642 case llvm::Triple::x86_64: 1643 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall); 1644 case llvm::Triple::ppc: 1645 case llvm::Triple::ppcle: 1646 case llvm::Triple::ppc64: 1647 case llvm::Triple::ppc64le: 1648 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall); 1649 case llvm::Triple::amdgcn: 1650 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall); 1651 case llvm::Triple::riscv32: 1652 case llvm::Triple::riscv64: 1653 return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall); 1654 } 1655 } 1656 1657 ExprResult 1658 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 1659 CallExpr *TheCall) { 1660 ExprResult TheCallResult(TheCall); 1661 1662 // Find out if any arguments are required to be integer constant expressions. 1663 unsigned ICEArguments = 0; 1664 ASTContext::GetBuiltinTypeError Error; 1665 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 1666 if (Error != ASTContext::GE_None) 1667 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 1668 1669 // If any arguments are required to be ICE's, check and diagnose. 1670 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 1671 // Skip arguments not required to be ICE's. 1672 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 1673 1674 llvm::APSInt Result; 1675 if (SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 1676 return true; 1677 ICEArguments &= ~(1 << ArgNo); 1678 } 1679 1680 switch (BuiltinID) { 1681 case Builtin::BI__builtin___CFStringMakeConstantString: 1682 // CFStringMakeConstantString is currently not implemented for GOFF (i.e., 1683 // on z/OS) and for XCOFF (i.e., on AIX). Emit unsupported 1684 if (CheckBuiltinTargetNotInUnsupported( 1685 *this, BuiltinID, TheCall, 1686 {llvm::Triple::GOFF, llvm::Triple::XCOFF})) 1687 return ExprError(); 1688 assert(TheCall->getNumArgs() == 1 && 1689 "Wrong # arguments to builtin CFStringMakeConstantString"); 1690 if (CheckObjCString(TheCall->getArg(0))) 1691 return ExprError(); 1692 break; 1693 case Builtin::BI__builtin_ms_va_start: 1694 case Builtin::BI__builtin_stdarg_start: 1695 case Builtin::BI__builtin_va_start: 1696 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1697 return ExprError(); 1698 break; 1699 case Builtin::BI__va_start: { 1700 switch (Context.getTargetInfo().getTriple().getArch()) { 1701 case llvm::Triple::aarch64: 1702 case llvm::Triple::arm: 1703 case llvm::Triple::thumb: 1704 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 1705 return ExprError(); 1706 break; 1707 default: 1708 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1709 return ExprError(); 1710 break; 1711 } 1712 break; 1713 } 1714 1715 // The acquire, release, and no fence variants are ARM and AArch64 only. 1716 case Builtin::BI_interlockedbittestandset_acq: 1717 case Builtin::BI_interlockedbittestandset_rel: 1718 case Builtin::BI_interlockedbittestandset_nf: 1719 case Builtin::BI_interlockedbittestandreset_acq: 1720 case Builtin::BI_interlockedbittestandreset_rel: 1721 case Builtin::BI_interlockedbittestandreset_nf: 1722 if (CheckBuiltinTargetInSupported( 1723 *this, BuiltinID, TheCall, 1724 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 1725 return ExprError(); 1726 break; 1727 1728 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 1729 case Builtin::BI_bittest64: 1730 case Builtin::BI_bittestandcomplement64: 1731 case Builtin::BI_bittestandreset64: 1732 case Builtin::BI_bittestandset64: 1733 case Builtin::BI_interlockedbittestandreset64: 1734 case Builtin::BI_interlockedbittestandset64: 1735 if (CheckBuiltinTargetInSupported(*this, BuiltinID, TheCall, 1736 {llvm::Triple::x86_64, llvm::Triple::arm, 1737 llvm::Triple::thumb, 1738 llvm::Triple::aarch64})) 1739 return ExprError(); 1740 break; 1741 1742 case Builtin::BI__builtin_isgreater: 1743 case Builtin::BI__builtin_isgreaterequal: 1744 case Builtin::BI__builtin_isless: 1745 case Builtin::BI__builtin_islessequal: 1746 case Builtin::BI__builtin_islessgreater: 1747 case Builtin::BI__builtin_isunordered: 1748 if (SemaBuiltinUnorderedCompare(TheCall)) 1749 return ExprError(); 1750 break; 1751 case Builtin::BI__builtin_fpclassify: 1752 if (SemaBuiltinFPClassification(TheCall, 6)) 1753 return ExprError(); 1754 break; 1755 case Builtin::BI__builtin_isfinite: 1756 case Builtin::BI__builtin_isinf: 1757 case Builtin::BI__builtin_isinf_sign: 1758 case Builtin::BI__builtin_isnan: 1759 case Builtin::BI__builtin_isnormal: 1760 case Builtin::BI__builtin_signbit: 1761 case Builtin::BI__builtin_signbitf: 1762 case Builtin::BI__builtin_signbitl: 1763 if (SemaBuiltinFPClassification(TheCall, 1)) 1764 return ExprError(); 1765 break; 1766 case Builtin::BI__builtin_shufflevector: 1767 return SemaBuiltinShuffleVector(TheCall); 1768 // TheCall will be freed by the smart pointer here, but that's fine, since 1769 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 1770 case Builtin::BI__builtin_prefetch: 1771 if (SemaBuiltinPrefetch(TheCall)) 1772 return ExprError(); 1773 break; 1774 case Builtin::BI__builtin_alloca_with_align: 1775 case Builtin::BI__builtin_alloca_with_align_uninitialized: 1776 if (SemaBuiltinAllocaWithAlign(TheCall)) 1777 return ExprError(); 1778 LLVM_FALLTHROUGH; 1779 case Builtin::BI__builtin_alloca: 1780 case Builtin::BI__builtin_alloca_uninitialized: 1781 Diag(TheCall->getBeginLoc(), diag::warn_alloca) 1782 << TheCall->getDirectCallee(); 1783 break; 1784 case Builtin::BI__arithmetic_fence: 1785 if (SemaBuiltinArithmeticFence(TheCall)) 1786 return ExprError(); 1787 break; 1788 case Builtin::BI__assume: 1789 case Builtin::BI__builtin_assume: 1790 if (SemaBuiltinAssume(TheCall)) 1791 return ExprError(); 1792 break; 1793 case Builtin::BI__builtin_assume_aligned: 1794 if (SemaBuiltinAssumeAligned(TheCall)) 1795 return ExprError(); 1796 break; 1797 case Builtin::BI__builtin_dynamic_object_size: 1798 case Builtin::BI__builtin_object_size: 1799 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 1800 return ExprError(); 1801 break; 1802 case Builtin::BI__builtin_longjmp: 1803 if (SemaBuiltinLongjmp(TheCall)) 1804 return ExprError(); 1805 break; 1806 case Builtin::BI__builtin_setjmp: 1807 if (SemaBuiltinSetjmp(TheCall)) 1808 return ExprError(); 1809 break; 1810 case Builtin::BI__builtin_classify_type: 1811 if (checkArgCount(*this, TheCall, 1)) return true; 1812 TheCall->setType(Context.IntTy); 1813 break; 1814 case Builtin::BI__builtin_complex: 1815 if (SemaBuiltinComplex(TheCall)) 1816 return ExprError(); 1817 break; 1818 case Builtin::BI__builtin_constant_p: { 1819 if (checkArgCount(*this, TheCall, 1)) return true; 1820 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 1821 if (Arg.isInvalid()) return true; 1822 TheCall->setArg(0, Arg.get()); 1823 TheCall->setType(Context.IntTy); 1824 break; 1825 } 1826 case Builtin::BI__builtin_launder: 1827 return SemaBuiltinLaunder(*this, TheCall); 1828 case Builtin::BI__sync_fetch_and_add: 1829 case Builtin::BI__sync_fetch_and_add_1: 1830 case Builtin::BI__sync_fetch_and_add_2: 1831 case Builtin::BI__sync_fetch_and_add_4: 1832 case Builtin::BI__sync_fetch_and_add_8: 1833 case Builtin::BI__sync_fetch_and_add_16: 1834 case Builtin::BI__sync_fetch_and_sub: 1835 case Builtin::BI__sync_fetch_and_sub_1: 1836 case Builtin::BI__sync_fetch_and_sub_2: 1837 case Builtin::BI__sync_fetch_and_sub_4: 1838 case Builtin::BI__sync_fetch_and_sub_8: 1839 case Builtin::BI__sync_fetch_and_sub_16: 1840 case Builtin::BI__sync_fetch_and_or: 1841 case Builtin::BI__sync_fetch_and_or_1: 1842 case Builtin::BI__sync_fetch_and_or_2: 1843 case Builtin::BI__sync_fetch_and_or_4: 1844 case Builtin::BI__sync_fetch_and_or_8: 1845 case Builtin::BI__sync_fetch_and_or_16: 1846 case Builtin::BI__sync_fetch_and_and: 1847 case Builtin::BI__sync_fetch_and_and_1: 1848 case Builtin::BI__sync_fetch_and_and_2: 1849 case Builtin::BI__sync_fetch_and_and_4: 1850 case Builtin::BI__sync_fetch_and_and_8: 1851 case Builtin::BI__sync_fetch_and_and_16: 1852 case Builtin::BI__sync_fetch_and_xor: 1853 case Builtin::BI__sync_fetch_and_xor_1: 1854 case Builtin::BI__sync_fetch_and_xor_2: 1855 case Builtin::BI__sync_fetch_and_xor_4: 1856 case Builtin::BI__sync_fetch_and_xor_8: 1857 case Builtin::BI__sync_fetch_and_xor_16: 1858 case Builtin::BI__sync_fetch_and_nand: 1859 case Builtin::BI__sync_fetch_and_nand_1: 1860 case Builtin::BI__sync_fetch_and_nand_2: 1861 case Builtin::BI__sync_fetch_and_nand_4: 1862 case Builtin::BI__sync_fetch_and_nand_8: 1863 case Builtin::BI__sync_fetch_and_nand_16: 1864 case Builtin::BI__sync_add_and_fetch: 1865 case Builtin::BI__sync_add_and_fetch_1: 1866 case Builtin::BI__sync_add_and_fetch_2: 1867 case Builtin::BI__sync_add_and_fetch_4: 1868 case Builtin::BI__sync_add_and_fetch_8: 1869 case Builtin::BI__sync_add_and_fetch_16: 1870 case Builtin::BI__sync_sub_and_fetch: 1871 case Builtin::BI__sync_sub_and_fetch_1: 1872 case Builtin::BI__sync_sub_and_fetch_2: 1873 case Builtin::BI__sync_sub_and_fetch_4: 1874 case Builtin::BI__sync_sub_and_fetch_8: 1875 case Builtin::BI__sync_sub_and_fetch_16: 1876 case Builtin::BI__sync_and_and_fetch: 1877 case Builtin::BI__sync_and_and_fetch_1: 1878 case Builtin::BI__sync_and_and_fetch_2: 1879 case Builtin::BI__sync_and_and_fetch_4: 1880 case Builtin::BI__sync_and_and_fetch_8: 1881 case Builtin::BI__sync_and_and_fetch_16: 1882 case Builtin::BI__sync_or_and_fetch: 1883 case Builtin::BI__sync_or_and_fetch_1: 1884 case Builtin::BI__sync_or_and_fetch_2: 1885 case Builtin::BI__sync_or_and_fetch_4: 1886 case Builtin::BI__sync_or_and_fetch_8: 1887 case Builtin::BI__sync_or_and_fetch_16: 1888 case Builtin::BI__sync_xor_and_fetch: 1889 case Builtin::BI__sync_xor_and_fetch_1: 1890 case Builtin::BI__sync_xor_and_fetch_2: 1891 case Builtin::BI__sync_xor_and_fetch_4: 1892 case Builtin::BI__sync_xor_and_fetch_8: 1893 case Builtin::BI__sync_xor_and_fetch_16: 1894 case Builtin::BI__sync_nand_and_fetch: 1895 case Builtin::BI__sync_nand_and_fetch_1: 1896 case Builtin::BI__sync_nand_and_fetch_2: 1897 case Builtin::BI__sync_nand_and_fetch_4: 1898 case Builtin::BI__sync_nand_and_fetch_8: 1899 case Builtin::BI__sync_nand_and_fetch_16: 1900 case Builtin::BI__sync_val_compare_and_swap: 1901 case Builtin::BI__sync_val_compare_and_swap_1: 1902 case Builtin::BI__sync_val_compare_and_swap_2: 1903 case Builtin::BI__sync_val_compare_and_swap_4: 1904 case Builtin::BI__sync_val_compare_and_swap_8: 1905 case Builtin::BI__sync_val_compare_and_swap_16: 1906 case Builtin::BI__sync_bool_compare_and_swap: 1907 case Builtin::BI__sync_bool_compare_and_swap_1: 1908 case Builtin::BI__sync_bool_compare_and_swap_2: 1909 case Builtin::BI__sync_bool_compare_and_swap_4: 1910 case Builtin::BI__sync_bool_compare_and_swap_8: 1911 case Builtin::BI__sync_bool_compare_and_swap_16: 1912 case Builtin::BI__sync_lock_test_and_set: 1913 case Builtin::BI__sync_lock_test_and_set_1: 1914 case Builtin::BI__sync_lock_test_and_set_2: 1915 case Builtin::BI__sync_lock_test_and_set_4: 1916 case Builtin::BI__sync_lock_test_and_set_8: 1917 case Builtin::BI__sync_lock_test_and_set_16: 1918 case Builtin::BI__sync_lock_release: 1919 case Builtin::BI__sync_lock_release_1: 1920 case Builtin::BI__sync_lock_release_2: 1921 case Builtin::BI__sync_lock_release_4: 1922 case Builtin::BI__sync_lock_release_8: 1923 case Builtin::BI__sync_lock_release_16: 1924 case Builtin::BI__sync_swap: 1925 case Builtin::BI__sync_swap_1: 1926 case Builtin::BI__sync_swap_2: 1927 case Builtin::BI__sync_swap_4: 1928 case Builtin::BI__sync_swap_8: 1929 case Builtin::BI__sync_swap_16: 1930 return SemaBuiltinAtomicOverloaded(TheCallResult); 1931 case Builtin::BI__sync_synchronize: 1932 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 1933 << TheCall->getCallee()->getSourceRange(); 1934 break; 1935 case Builtin::BI__builtin_nontemporal_load: 1936 case Builtin::BI__builtin_nontemporal_store: 1937 return SemaBuiltinNontemporalOverloaded(TheCallResult); 1938 case Builtin::BI__builtin_memcpy_inline: { 1939 clang::Expr *SizeOp = TheCall->getArg(2); 1940 // We warn about copying to or from `nullptr` pointers when `size` is 1941 // greater than 0. When `size` is value dependent we cannot evaluate its 1942 // value so we bail out. 1943 if (SizeOp->isValueDependent()) 1944 break; 1945 if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) { 1946 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 1947 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc()); 1948 } 1949 break; 1950 } 1951 #define BUILTIN(ID, TYPE, ATTRS) 1952 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 1953 case Builtin::BI##ID: \ 1954 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 1955 #include "clang/Basic/Builtins.def" 1956 case Builtin::BI__annotation: 1957 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 1958 return ExprError(); 1959 break; 1960 case Builtin::BI__builtin_annotation: 1961 if (SemaBuiltinAnnotation(*this, TheCall)) 1962 return ExprError(); 1963 break; 1964 case Builtin::BI__builtin_addressof: 1965 if (SemaBuiltinAddressof(*this, TheCall)) 1966 return ExprError(); 1967 break; 1968 case Builtin::BI__builtin_function_start: 1969 if (SemaBuiltinFunctionStart(*this, TheCall)) 1970 return ExprError(); 1971 break; 1972 case Builtin::BI__builtin_is_aligned: 1973 case Builtin::BI__builtin_align_up: 1974 case Builtin::BI__builtin_align_down: 1975 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID)) 1976 return ExprError(); 1977 break; 1978 case Builtin::BI__builtin_add_overflow: 1979 case Builtin::BI__builtin_sub_overflow: 1980 case Builtin::BI__builtin_mul_overflow: 1981 if (SemaBuiltinOverflow(*this, TheCall, BuiltinID)) 1982 return ExprError(); 1983 break; 1984 case Builtin::BI__builtin_operator_new: 1985 case Builtin::BI__builtin_operator_delete: { 1986 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 1987 ExprResult Res = 1988 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 1989 if (Res.isInvalid()) 1990 CorrectDelayedTyposInExpr(TheCallResult.get()); 1991 return Res; 1992 } 1993 case Builtin::BI__builtin_dump_struct: { 1994 // We first want to ensure we are called with 2 arguments 1995 if (checkArgCount(*this, TheCall, 2)) 1996 return ExprError(); 1997 // Ensure that the first argument is of type 'struct XX *' 1998 const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts(); 1999 const QualType PtrArgType = PtrArg->getType(); 2000 if (!PtrArgType->isPointerType() || 2001 !PtrArgType->getPointeeType()->isRecordType()) { 2002 Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 2003 << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType 2004 << "structure pointer"; 2005 return ExprError(); 2006 } 2007 2008 // Ensure that the second argument is of type 'FunctionType' 2009 const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts(); 2010 const QualType FnPtrArgType = FnPtrArg->getType(); 2011 if (!FnPtrArgType->isPointerType()) { 2012 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 2013 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 2014 << FnPtrArgType << "'int (*)(const char *, ...)'"; 2015 return ExprError(); 2016 } 2017 2018 const auto *FuncType = 2019 FnPtrArgType->getPointeeType()->getAs<FunctionType>(); 2020 2021 if (!FuncType) { 2022 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 2023 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 2024 << FnPtrArgType << "'int (*)(const char *, ...)'"; 2025 return ExprError(); 2026 } 2027 2028 if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) { 2029 if (!FT->getNumParams()) { 2030 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 2031 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 2032 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 2033 return ExprError(); 2034 } 2035 QualType PT = FT->getParamType(0); 2036 if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy || 2037 !PT->isPointerType() || !PT->getPointeeType()->isCharType() || 2038 !PT->getPointeeType().isConstQualified()) { 2039 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 2040 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 2041 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 2042 return ExprError(); 2043 } 2044 } 2045 2046 TheCall->setType(Context.IntTy); 2047 break; 2048 } 2049 case Builtin::BI__builtin_expect_with_probability: { 2050 // We first want to ensure we are called with 3 arguments 2051 if (checkArgCount(*this, TheCall, 3)) 2052 return ExprError(); 2053 // then check probability is constant float in range [0.0, 1.0] 2054 const Expr *ProbArg = TheCall->getArg(2); 2055 SmallVector<PartialDiagnosticAt, 8> Notes; 2056 Expr::EvalResult Eval; 2057 Eval.Diag = &Notes; 2058 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) || 2059 !Eval.Val.isFloat()) { 2060 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float) 2061 << ProbArg->getSourceRange(); 2062 for (const PartialDiagnosticAt &PDiag : Notes) 2063 Diag(PDiag.first, PDiag.second); 2064 return ExprError(); 2065 } 2066 llvm::APFloat Probability = Eval.Val.getFloat(); 2067 bool LoseInfo = false; 2068 Probability.convert(llvm::APFloat::IEEEdouble(), 2069 llvm::RoundingMode::Dynamic, &LoseInfo); 2070 if (!(Probability >= llvm::APFloat(0.0) && 2071 Probability <= llvm::APFloat(1.0))) { 2072 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range) 2073 << ProbArg->getSourceRange(); 2074 return ExprError(); 2075 } 2076 break; 2077 } 2078 case Builtin::BI__builtin_preserve_access_index: 2079 if (SemaBuiltinPreserveAI(*this, TheCall)) 2080 return ExprError(); 2081 break; 2082 case Builtin::BI__builtin_call_with_static_chain: 2083 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 2084 return ExprError(); 2085 break; 2086 case Builtin::BI__exception_code: 2087 case Builtin::BI_exception_code: 2088 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 2089 diag::err_seh___except_block)) 2090 return ExprError(); 2091 break; 2092 case Builtin::BI__exception_info: 2093 case Builtin::BI_exception_info: 2094 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 2095 diag::err_seh___except_filter)) 2096 return ExprError(); 2097 break; 2098 case Builtin::BI__GetExceptionInfo: 2099 if (checkArgCount(*this, TheCall, 1)) 2100 return ExprError(); 2101 2102 if (CheckCXXThrowOperand( 2103 TheCall->getBeginLoc(), 2104 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 2105 TheCall)) 2106 return ExprError(); 2107 2108 TheCall->setType(Context.VoidPtrTy); 2109 break; 2110 // OpenCL v2.0, s6.13.16 - Pipe functions 2111 case Builtin::BIread_pipe: 2112 case Builtin::BIwrite_pipe: 2113 // Since those two functions are declared with var args, we need a semantic 2114 // check for the argument. 2115 if (SemaBuiltinRWPipe(*this, TheCall)) 2116 return ExprError(); 2117 break; 2118 case Builtin::BIreserve_read_pipe: 2119 case Builtin::BIreserve_write_pipe: 2120 case Builtin::BIwork_group_reserve_read_pipe: 2121 case Builtin::BIwork_group_reserve_write_pipe: 2122 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 2123 return ExprError(); 2124 break; 2125 case Builtin::BIsub_group_reserve_read_pipe: 2126 case Builtin::BIsub_group_reserve_write_pipe: 2127 if (checkOpenCLSubgroupExt(*this, TheCall) || 2128 SemaBuiltinReserveRWPipe(*this, TheCall)) 2129 return ExprError(); 2130 break; 2131 case Builtin::BIcommit_read_pipe: 2132 case Builtin::BIcommit_write_pipe: 2133 case Builtin::BIwork_group_commit_read_pipe: 2134 case Builtin::BIwork_group_commit_write_pipe: 2135 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 2136 return ExprError(); 2137 break; 2138 case Builtin::BIsub_group_commit_read_pipe: 2139 case Builtin::BIsub_group_commit_write_pipe: 2140 if (checkOpenCLSubgroupExt(*this, TheCall) || 2141 SemaBuiltinCommitRWPipe(*this, TheCall)) 2142 return ExprError(); 2143 break; 2144 case Builtin::BIget_pipe_num_packets: 2145 case Builtin::BIget_pipe_max_packets: 2146 if (SemaBuiltinPipePackets(*this, TheCall)) 2147 return ExprError(); 2148 break; 2149 case Builtin::BIto_global: 2150 case Builtin::BIto_local: 2151 case Builtin::BIto_private: 2152 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 2153 return ExprError(); 2154 break; 2155 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 2156 case Builtin::BIenqueue_kernel: 2157 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 2158 return ExprError(); 2159 break; 2160 case Builtin::BIget_kernel_work_group_size: 2161 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 2162 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 2163 return ExprError(); 2164 break; 2165 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 2166 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 2167 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 2168 return ExprError(); 2169 break; 2170 case Builtin::BI__builtin_os_log_format: 2171 Cleanup.setExprNeedsCleanups(true); 2172 LLVM_FALLTHROUGH; 2173 case Builtin::BI__builtin_os_log_format_buffer_size: 2174 if (SemaBuiltinOSLogFormat(TheCall)) 2175 return ExprError(); 2176 break; 2177 case Builtin::BI__builtin_frame_address: 2178 case Builtin::BI__builtin_return_address: { 2179 if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF)) 2180 return ExprError(); 2181 2182 // -Wframe-address warning if non-zero passed to builtin 2183 // return/frame address. 2184 Expr::EvalResult Result; 2185 if (!TheCall->getArg(0)->isValueDependent() && 2186 TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) && 2187 Result.Val.getInt() != 0) 2188 Diag(TheCall->getBeginLoc(), diag::warn_frame_address) 2189 << ((BuiltinID == Builtin::BI__builtin_return_address) 2190 ? "__builtin_return_address" 2191 : "__builtin_frame_address") 2192 << TheCall->getSourceRange(); 2193 break; 2194 } 2195 2196 // __builtin_elementwise_abs restricts the element type to signed integers or 2197 // floating point types only. 2198 case Builtin::BI__builtin_elementwise_abs: { 2199 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2200 return ExprError(); 2201 2202 QualType ArgTy = TheCall->getArg(0)->getType(); 2203 QualType EltTy = ArgTy; 2204 2205 if (auto *VecTy = EltTy->getAs<VectorType>()) 2206 EltTy = VecTy->getElementType(); 2207 if (EltTy->isUnsignedIntegerType()) { 2208 Diag(TheCall->getArg(0)->getBeginLoc(), 2209 diag::err_builtin_invalid_arg_type) 2210 << 1 << /* signed integer or float ty*/ 3 << ArgTy; 2211 return ExprError(); 2212 } 2213 break; 2214 } 2215 2216 // These builtins restrict the element type to floating point 2217 // types only. 2218 case Builtin::BI__builtin_elementwise_ceil: 2219 case Builtin::BI__builtin_elementwise_floor: 2220 case Builtin::BI__builtin_elementwise_roundeven: 2221 case Builtin::BI__builtin_elementwise_trunc: { 2222 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2223 return ExprError(); 2224 2225 QualType ArgTy = TheCall->getArg(0)->getType(); 2226 QualType EltTy = ArgTy; 2227 2228 if (auto *VecTy = EltTy->getAs<VectorType>()) 2229 EltTy = VecTy->getElementType(); 2230 if (!EltTy->isFloatingType()) { 2231 Diag(TheCall->getArg(0)->getBeginLoc(), 2232 diag::err_builtin_invalid_arg_type) 2233 << 1 << /* float ty*/ 5 << ArgTy; 2234 2235 return ExprError(); 2236 } 2237 break; 2238 } 2239 2240 case Builtin::BI__builtin_elementwise_min: 2241 case Builtin::BI__builtin_elementwise_max: 2242 if (SemaBuiltinElementwiseMath(TheCall)) 2243 return ExprError(); 2244 break; 2245 case Builtin::BI__builtin_reduce_max: 2246 case Builtin::BI__builtin_reduce_min: { 2247 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2248 return ExprError(); 2249 2250 const Expr *Arg = TheCall->getArg(0); 2251 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2252 if (!TyA) { 2253 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2254 << 1 << /* vector ty*/ 4 << Arg->getType(); 2255 return ExprError(); 2256 } 2257 2258 TheCall->setType(TyA->getElementType()); 2259 break; 2260 } 2261 2262 // These builtins support vectors of integers only. 2263 case Builtin::BI__builtin_reduce_xor: 2264 case Builtin::BI__builtin_reduce_or: 2265 case Builtin::BI__builtin_reduce_and: { 2266 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2267 return ExprError(); 2268 2269 const Expr *Arg = TheCall->getArg(0); 2270 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2271 if (!TyA || !TyA->getElementType()->isIntegerType()) { 2272 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2273 << 1 << /* vector of integers */ 6 << Arg->getType(); 2274 return ExprError(); 2275 } 2276 TheCall->setType(TyA->getElementType()); 2277 break; 2278 } 2279 2280 case Builtin::BI__builtin_matrix_transpose: 2281 return SemaBuiltinMatrixTranspose(TheCall, TheCallResult); 2282 2283 case Builtin::BI__builtin_matrix_column_major_load: 2284 return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult); 2285 2286 case Builtin::BI__builtin_matrix_column_major_store: 2287 return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult); 2288 2289 case Builtin::BI__builtin_get_device_side_mangled_name: { 2290 auto Check = [](CallExpr *TheCall) { 2291 if (TheCall->getNumArgs() != 1) 2292 return false; 2293 auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts()); 2294 if (!DRE) 2295 return false; 2296 auto *D = DRE->getDecl(); 2297 if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D)) 2298 return false; 2299 return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() || 2300 D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>(); 2301 }; 2302 if (!Check(TheCall)) { 2303 Diag(TheCall->getBeginLoc(), 2304 diag::err_hip_invalid_args_builtin_mangled_name); 2305 return ExprError(); 2306 } 2307 } 2308 } 2309 2310 // Since the target specific builtins for each arch overlap, only check those 2311 // of the arch we are compiling for. 2312 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 2313 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) { 2314 assert(Context.getAuxTargetInfo() && 2315 "Aux Target Builtin, but not an aux target?"); 2316 2317 if (CheckTSBuiltinFunctionCall( 2318 *Context.getAuxTargetInfo(), 2319 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall)) 2320 return ExprError(); 2321 } else { 2322 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID, 2323 TheCall)) 2324 return ExprError(); 2325 } 2326 } 2327 2328 return TheCallResult; 2329 } 2330 2331 // Get the valid immediate range for the specified NEON type code. 2332 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 2333 NeonTypeFlags Type(t); 2334 int IsQuad = ForceQuad ? true : Type.isQuad(); 2335 switch (Type.getEltType()) { 2336 case NeonTypeFlags::Int8: 2337 case NeonTypeFlags::Poly8: 2338 return shift ? 7 : (8 << IsQuad) - 1; 2339 case NeonTypeFlags::Int16: 2340 case NeonTypeFlags::Poly16: 2341 return shift ? 15 : (4 << IsQuad) - 1; 2342 case NeonTypeFlags::Int32: 2343 return shift ? 31 : (2 << IsQuad) - 1; 2344 case NeonTypeFlags::Int64: 2345 case NeonTypeFlags::Poly64: 2346 return shift ? 63 : (1 << IsQuad) - 1; 2347 case NeonTypeFlags::Poly128: 2348 return shift ? 127 : (1 << IsQuad) - 1; 2349 case NeonTypeFlags::Float16: 2350 assert(!shift && "cannot shift float types!"); 2351 return (4 << IsQuad) - 1; 2352 case NeonTypeFlags::Float32: 2353 assert(!shift && "cannot shift float types!"); 2354 return (2 << IsQuad) - 1; 2355 case NeonTypeFlags::Float64: 2356 assert(!shift && "cannot shift float types!"); 2357 return (1 << IsQuad) - 1; 2358 case NeonTypeFlags::BFloat16: 2359 assert(!shift && "cannot shift float types!"); 2360 return (4 << IsQuad) - 1; 2361 } 2362 llvm_unreachable("Invalid NeonTypeFlag!"); 2363 } 2364 2365 /// getNeonEltType - Return the QualType corresponding to the elements of 2366 /// the vector type specified by the NeonTypeFlags. This is used to check 2367 /// the pointer arguments for Neon load/store intrinsics. 2368 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 2369 bool IsPolyUnsigned, bool IsInt64Long) { 2370 switch (Flags.getEltType()) { 2371 case NeonTypeFlags::Int8: 2372 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 2373 case NeonTypeFlags::Int16: 2374 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 2375 case NeonTypeFlags::Int32: 2376 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 2377 case NeonTypeFlags::Int64: 2378 if (IsInt64Long) 2379 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 2380 else 2381 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 2382 : Context.LongLongTy; 2383 case NeonTypeFlags::Poly8: 2384 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 2385 case NeonTypeFlags::Poly16: 2386 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 2387 case NeonTypeFlags::Poly64: 2388 if (IsInt64Long) 2389 return Context.UnsignedLongTy; 2390 else 2391 return Context.UnsignedLongLongTy; 2392 case NeonTypeFlags::Poly128: 2393 break; 2394 case NeonTypeFlags::Float16: 2395 return Context.HalfTy; 2396 case NeonTypeFlags::Float32: 2397 return Context.FloatTy; 2398 case NeonTypeFlags::Float64: 2399 return Context.DoubleTy; 2400 case NeonTypeFlags::BFloat16: 2401 return Context.BFloat16Ty; 2402 } 2403 llvm_unreachable("Invalid NeonTypeFlag!"); 2404 } 2405 2406 bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2407 // Range check SVE intrinsics that take immediate values. 2408 SmallVector<std::tuple<int,int,int>, 3> ImmChecks; 2409 2410 switch (BuiltinID) { 2411 default: 2412 return false; 2413 #define GET_SVE_IMMEDIATE_CHECK 2414 #include "clang/Basic/arm_sve_sema_rangechecks.inc" 2415 #undef GET_SVE_IMMEDIATE_CHECK 2416 } 2417 2418 // Perform all the immediate checks for this builtin call. 2419 bool HasError = false; 2420 for (auto &I : ImmChecks) { 2421 int ArgNum, CheckTy, ElementSizeInBits; 2422 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I; 2423 2424 typedef bool(*OptionSetCheckFnTy)(int64_t Value); 2425 2426 // Function that checks whether the operand (ArgNum) is an immediate 2427 // that is one of the predefined values. 2428 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm, 2429 int ErrDiag) -> bool { 2430 // We can't check the value of a dependent argument. 2431 Expr *Arg = TheCall->getArg(ArgNum); 2432 if (Arg->isTypeDependent() || Arg->isValueDependent()) 2433 return false; 2434 2435 // Check constant-ness first. 2436 llvm::APSInt Imm; 2437 if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm)) 2438 return true; 2439 2440 if (!CheckImm(Imm.getSExtValue())) 2441 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange(); 2442 return false; 2443 }; 2444 2445 switch ((SVETypeFlags::ImmCheckType)CheckTy) { 2446 case SVETypeFlags::ImmCheck0_31: 2447 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31)) 2448 HasError = true; 2449 break; 2450 case SVETypeFlags::ImmCheck0_13: 2451 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13)) 2452 HasError = true; 2453 break; 2454 case SVETypeFlags::ImmCheck1_16: 2455 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16)) 2456 HasError = true; 2457 break; 2458 case SVETypeFlags::ImmCheck0_7: 2459 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7)) 2460 HasError = true; 2461 break; 2462 case SVETypeFlags::ImmCheckExtract: 2463 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2464 (2048 / ElementSizeInBits) - 1)) 2465 HasError = true; 2466 break; 2467 case SVETypeFlags::ImmCheckShiftRight: 2468 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits)) 2469 HasError = true; 2470 break; 2471 case SVETypeFlags::ImmCheckShiftRightNarrow: 2472 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 2473 ElementSizeInBits / 2)) 2474 HasError = true; 2475 break; 2476 case SVETypeFlags::ImmCheckShiftLeft: 2477 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2478 ElementSizeInBits - 1)) 2479 HasError = true; 2480 break; 2481 case SVETypeFlags::ImmCheckLaneIndex: 2482 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2483 (128 / (1 * ElementSizeInBits)) - 1)) 2484 HasError = true; 2485 break; 2486 case SVETypeFlags::ImmCheckLaneIndexCompRotate: 2487 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2488 (128 / (2 * ElementSizeInBits)) - 1)) 2489 HasError = true; 2490 break; 2491 case SVETypeFlags::ImmCheckLaneIndexDot: 2492 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2493 (128 / (4 * ElementSizeInBits)) - 1)) 2494 HasError = true; 2495 break; 2496 case SVETypeFlags::ImmCheckComplexRot90_270: 2497 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; }, 2498 diag::err_rotation_argument_to_cadd)) 2499 HasError = true; 2500 break; 2501 case SVETypeFlags::ImmCheckComplexRotAll90: 2502 if (CheckImmediateInSet( 2503 [](int64_t V) { 2504 return V == 0 || V == 90 || V == 180 || V == 270; 2505 }, 2506 diag::err_rotation_argument_to_cmla)) 2507 HasError = true; 2508 break; 2509 case SVETypeFlags::ImmCheck0_1: 2510 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1)) 2511 HasError = true; 2512 break; 2513 case SVETypeFlags::ImmCheck0_2: 2514 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2)) 2515 HasError = true; 2516 break; 2517 case SVETypeFlags::ImmCheck0_3: 2518 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3)) 2519 HasError = true; 2520 break; 2521 } 2522 } 2523 2524 return HasError; 2525 } 2526 2527 bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI, 2528 unsigned BuiltinID, CallExpr *TheCall) { 2529 llvm::APSInt Result; 2530 uint64_t mask = 0; 2531 unsigned TV = 0; 2532 int PtrArgNum = -1; 2533 bool HasConstPtr = false; 2534 switch (BuiltinID) { 2535 #define GET_NEON_OVERLOAD_CHECK 2536 #include "clang/Basic/arm_neon.inc" 2537 #include "clang/Basic/arm_fp16.inc" 2538 #undef GET_NEON_OVERLOAD_CHECK 2539 } 2540 2541 // For NEON intrinsics which are overloaded on vector element type, validate 2542 // the immediate which specifies which variant to emit. 2543 unsigned ImmArg = TheCall->getNumArgs()-1; 2544 if (mask) { 2545 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 2546 return true; 2547 2548 TV = Result.getLimitedValue(64); 2549 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 2550 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 2551 << TheCall->getArg(ImmArg)->getSourceRange(); 2552 } 2553 2554 if (PtrArgNum >= 0) { 2555 // Check that pointer arguments have the specified type. 2556 Expr *Arg = TheCall->getArg(PtrArgNum); 2557 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 2558 Arg = ICE->getSubExpr(); 2559 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 2560 QualType RHSTy = RHS.get()->getType(); 2561 2562 llvm::Triple::ArchType Arch = TI.getTriple().getArch(); 2563 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 2564 Arch == llvm::Triple::aarch64_32 || 2565 Arch == llvm::Triple::aarch64_be; 2566 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong; 2567 QualType EltTy = 2568 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 2569 if (HasConstPtr) 2570 EltTy = EltTy.withConst(); 2571 QualType LHSTy = Context.getPointerType(EltTy); 2572 AssignConvertType ConvTy; 2573 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 2574 if (RHS.isInvalid()) 2575 return true; 2576 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 2577 RHS.get(), AA_Assigning)) 2578 return true; 2579 } 2580 2581 // For NEON intrinsics which take an immediate value as part of the 2582 // instruction, range check them here. 2583 unsigned i = 0, l = 0, u = 0; 2584 switch (BuiltinID) { 2585 default: 2586 return false; 2587 #define GET_NEON_IMMEDIATE_CHECK 2588 #include "clang/Basic/arm_neon.inc" 2589 #include "clang/Basic/arm_fp16.inc" 2590 #undef GET_NEON_IMMEDIATE_CHECK 2591 } 2592 2593 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2594 } 2595 2596 bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2597 switch (BuiltinID) { 2598 default: 2599 return false; 2600 #include "clang/Basic/arm_mve_builtin_sema.inc" 2601 } 2602 } 2603 2604 bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2605 CallExpr *TheCall) { 2606 bool Err = false; 2607 switch (BuiltinID) { 2608 default: 2609 return false; 2610 #include "clang/Basic/arm_cde_builtin_sema.inc" 2611 } 2612 2613 if (Err) 2614 return true; 2615 2616 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true); 2617 } 2618 2619 bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI, 2620 const Expr *CoprocArg, bool WantCDE) { 2621 if (isConstantEvaluated()) 2622 return false; 2623 2624 // We can't check the value of a dependent argument. 2625 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent()) 2626 return false; 2627 2628 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context); 2629 int64_t CoprocNo = CoprocNoAP.getExtValue(); 2630 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative"); 2631 2632 uint32_t CDECoprocMask = TI.getARMCDECoprocMask(); 2633 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo)); 2634 2635 if (IsCDECoproc != WantCDE) 2636 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc) 2637 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange(); 2638 2639 return false; 2640 } 2641 2642 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 2643 unsigned MaxWidth) { 2644 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 2645 BuiltinID == ARM::BI__builtin_arm_ldaex || 2646 BuiltinID == ARM::BI__builtin_arm_strex || 2647 BuiltinID == ARM::BI__builtin_arm_stlex || 2648 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2649 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2650 BuiltinID == AArch64::BI__builtin_arm_strex || 2651 BuiltinID == AArch64::BI__builtin_arm_stlex) && 2652 "unexpected ARM builtin"); 2653 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 2654 BuiltinID == ARM::BI__builtin_arm_ldaex || 2655 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2656 BuiltinID == AArch64::BI__builtin_arm_ldaex; 2657 2658 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 2659 2660 // Ensure that we have the proper number of arguments. 2661 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 2662 return true; 2663 2664 // Inspect the pointer argument of the atomic builtin. This should always be 2665 // a pointer type, whose element is an integral scalar or pointer type. 2666 // Because it is a pointer type, we don't have to worry about any implicit 2667 // casts here. 2668 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 2669 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 2670 if (PointerArgRes.isInvalid()) 2671 return true; 2672 PointerArg = PointerArgRes.get(); 2673 2674 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 2675 if (!pointerType) { 2676 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 2677 << PointerArg->getType() << PointerArg->getSourceRange(); 2678 return true; 2679 } 2680 2681 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 2682 // task is to insert the appropriate casts into the AST. First work out just 2683 // what the appropriate type is. 2684 QualType ValType = pointerType->getPointeeType(); 2685 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 2686 if (IsLdrex) 2687 AddrType.addConst(); 2688 2689 // Issue a warning if the cast is dodgy. 2690 CastKind CastNeeded = CK_NoOp; 2691 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 2692 CastNeeded = CK_BitCast; 2693 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 2694 << PointerArg->getType() << Context.getPointerType(AddrType) 2695 << AA_Passing << PointerArg->getSourceRange(); 2696 } 2697 2698 // Finally, do the cast and replace the argument with the corrected version. 2699 AddrType = Context.getPointerType(AddrType); 2700 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 2701 if (PointerArgRes.isInvalid()) 2702 return true; 2703 PointerArg = PointerArgRes.get(); 2704 2705 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 2706 2707 // In general, we allow ints, floats and pointers to be loaded and stored. 2708 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 2709 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 2710 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 2711 << PointerArg->getType() << PointerArg->getSourceRange(); 2712 return true; 2713 } 2714 2715 // But ARM doesn't have instructions to deal with 128-bit versions. 2716 if (Context.getTypeSize(ValType) > MaxWidth) { 2717 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 2718 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 2719 << PointerArg->getType() << PointerArg->getSourceRange(); 2720 return true; 2721 } 2722 2723 switch (ValType.getObjCLifetime()) { 2724 case Qualifiers::OCL_None: 2725 case Qualifiers::OCL_ExplicitNone: 2726 // okay 2727 break; 2728 2729 case Qualifiers::OCL_Weak: 2730 case Qualifiers::OCL_Strong: 2731 case Qualifiers::OCL_Autoreleasing: 2732 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 2733 << ValType << PointerArg->getSourceRange(); 2734 return true; 2735 } 2736 2737 if (IsLdrex) { 2738 TheCall->setType(ValType); 2739 return false; 2740 } 2741 2742 // Initialize the argument to be stored. 2743 ExprResult ValArg = TheCall->getArg(0); 2744 InitializedEntity Entity = InitializedEntity::InitializeParameter( 2745 Context, ValType, /*consume*/ false); 2746 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 2747 if (ValArg.isInvalid()) 2748 return true; 2749 TheCall->setArg(0, ValArg.get()); 2750 2751 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 2752 // but the custom checker bypasses all default analysis. 2753 TheCall->setType(Context.IntTy); 2754 return false; 2755 } 2756 2757 bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2758 CallExpr *TheCall) { 2759 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 2760 BuiltinID == ARM::BI__builtin_arm_ldaex || 2761 BuiltinID == ARM::BI__builtin_arm_strex || 2762 BuiltinID == ARM::BI__builtin_arm_stlex) { 2763 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 2764 } 2765 2766 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 2767 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 2768 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 2769 } 2770 2771 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 2772 BuiltinID == ARM::BI__builtin_arm_wsr64) 2773 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 2774 2775 if (BuiltinID == ARM::BI__builtin_arm_rsr || 2776 BuiltinID == ARM::BI__builtin_arm_rsrp || 2777 BuiltinID == ARM::BI__builtin_arm_wsr || 2778 BuiltinID == ARM::BI__builtin_arm_wsrp) 2779 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2780 2781 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2782 return true; 2783 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall)) 2784 return true; 2785 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2786 return true; 2787 2788 // For intrinsics which take an immediate value as part of the instruction, 2789 // range check them here. 2790 // FIXME: VFP Intrinsics should error if VFP not present. 2791 switch (BuiltinID) { 2792 default: return false; 2793 case ARM::BI__builtin_arm_ssat: 2794 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 2795 case ARM::BI__builtin_arm_usat: 2796 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 2797 case ARM::BI__builtin_arm_ssat16: 2798 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 2799 case ARM::BI__builtin_arm_usat16: 2800 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 2801 case ARM::BI__builtin_arm_vcvtr_f: 2802 case ARM::BI__builtin_arm_vcvtr_d: 2803 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 2804 case ARM::BI__builtin_arm_dmb: 2805 case ARM::BI__builtin_arm_dsb: 2806 case ARM::BI__builtin_arm_isb: 2807 case ARM::BI__builtin_arm_dbg: 2808 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 2809 case ARM::BI__builtin_arm_cdp: 2810 case ARM::BI__builtin_arm_cdp2: 2811 case ARM::BI__builtin_arm_mcr: 2812 case ARM::BI__builtin_arm_mcr2: 2813 case ARM::BI__builtin_arm_mrc: 2814 case ARM::BI__builtin_arm_mrc2: 2815 case ARM::BI__builtin_arm_mcrr: 2816 case ARM::BI__builtin_arm_mcrr2: 2817 case ARM::BI__builtin_arm_mrrc: 2818 case ARM::BI__builtin_arm_mrrc2: 2819 case ARM::BI__builtin_arm_ldc: 2820 case ARM::BI__builtin_arm_ldcl: 2821 case ARM::BI__builtin_arm_ldc2: 2822 case ARM::BI__builtin_arm_ldc2l: 2823 case ARM::BI__builtin_arm_stc: 2824 case ARM::BI__builtin_arm_stcl: 2825 case ARM::BI__builtin_arm_stc2: 2826 case ARM::BI__builtin_arm_stc2l: 2827 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) || 2828 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), 2829 /*WantCDE*/ false); 2830 } 2831 } 2832 2833 bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, 2834 unsigned BuiltinID, 2835 CallExpr *TheCall) { 2836 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 2837 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2838 BuiltinID == AArch64::BI__builtin_arm_strex || 2839 BuiltinID == AArch64::BI__builtin_arm_stlex) { 2840 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 2841 } 2842 2843 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 2844 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 2845 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || 2846 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 2847 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 2848 } 2849 2850 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 2851 BuiltinID == AArch64::BI__builtin_arm_wsr64) 2852 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2853 2854 // Memory Tagging Extensions (MTE) Intrinsics 2855 if (BuiltinID == AArch64::BI__builtin_arm_irg || 2856 BuiltinID == AArch64::BI__builtin_arm_addg || 2857 BuiltinID == AArch64::BI__builtin_arm_gmi || 2858 BuiltinID == AArch64::BI__builtin_arm_ldg || 2859 BuiltinID == AArch64::BI__builtin_arm_stg || 2860 BuiltinID == AArch64::BI__builtin_arm_subp) { 2861 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 2862 } 2863 2864 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 2865 BuiltinID == AArch64::BI__builtin_arm_rsrp || 2866 BuiltinID == AArch64::BI__builtin_arm_wsr || 2867 BuiltinID == AArch64::BI__builtin_arm_wsrp) 2868 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2869 2870 // Only check the valid encoding range. Any constant in this range would be 2871 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 2872 // an exception for incorrect registers. This matches MSVC behavior. 2873 if (BuiltinID == AArch64::BI_ReadStatusReg || 2874 BuiltinID == AArch64::BI_WriteStatusReg) 2875 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 2876 2877 if (BuiltinID == AArch64::BI__getReg) 2878 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 2879 2880 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2881 return true; 2882 2883 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall)) 2884 return true; 2885 2886 // For intrinsics which take an immediate value as part of the instruction, 2887 // range check them here. 2888 unsigned i = 0, l = 0, u = 0; 2889 switch (BuiltinID) { 2890 default: return false; 2891 case AArch64::BI__builtin_arm_dmb: 2892 case AArch64::BI__builtin_arm_dsb: 2893 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 2894 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break; 2895 } 2896 2897 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2898 } 2899 2900 static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) { 2901 if (Arg->getType()->getAsPlaceholderType()) 2902 return false; 2903 2904 // The first argument needs to be a record field access. 2905 // If it is an array element access, we delay decision 2906 // to BPF backend to check whether the access is a 2907 // field access or not. 2908 return (Arg->IgnoreParens()->getObjectKind() == OK_BitField || 2909 isa<MemberExpr>(Arg->IgnoreParens()) || 2910 isa<ArraySubscriptExpr>(Arg->IgnoreParens())); 2911 } 2912 2913 static bool isEltOfVectorTy(ASTContext &Context, CallExpr *Call, Sema &S, 2914 QualType VectorTy, QualType EltTy) { 2915 QualType VectorEltTy = VectorTy->castAs<VectorType>()->getElementType(); 2916 if (!Context.hasSameType(VectorEltTy, EltTy)) { 2917 S.Diag(Call->getBeginLoc(), diag::err_typecheck_call_different_arg_types) 2918 << Call->getSourceRange() << VectorEltTy << EltTy; 2919 return false; 2920 } 2921 return true; 2922 } 2923 2924 static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) { 2925 QualType ArgType = Arg->getType(); 2926 if (ArgType->getAsPlaceholderType()) 2927 return false; 2928 2929 // for TYPE_EXISTENCE/TYPE_SIZEOF reloc type 2930 // format: 2931 // 1. __builtin_preserve_type_info(*(<type> *)0, flag); 2932 // 2. <type> var; 2933 // __builtin_preserve_type_info(var, flag); 2934 if (!isa<DeclRefExpr>(Arg->IgnoreParens()) && 2935 !isa<UnaryOperator>(Arg->IgnoreParens())) 2936 return false; 2937 2938 // Typedef type. 2939 if (ArgType->getAs<TypedefType>()) 2940 return true; 2941 2942 // Record type or Enum type. 2943 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 2944 if (const auto *RT = Ty->getAs<RecordType>()) { 2945 if (!RT->getDecl()->getDeclName().isEmpty()) 2946 return true; 2947 } else if (const auto *ET = Ty->getAs<EnumType>()) { 2948 if (!ET->getDecl()->getDeclName().isEmpty()) 2949 return true; 2950 } 2951 2952 return false; 2953 } 2954 2955 static bool isValidBPFPreserveEnumValueArg(Expr *Arg) { 2956 QualType ArgType = Arg->getType(); 2957 if (ArgType->getAsPlaceholderType()) 2958 return false; 2959 2960 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type 2961 // format: 2962 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>, 2963 // flag); 2964 const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens()); 2965 if (!UO) 2966 return false; 2967 2968 const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr()); 2969 if (!CE) 2970 return false; 2971 if (CE->getCastKind() != CK_IntegralToPointer && 2972 CE->getCastKind() != CK_NullToPointer) 2973 return false; 2974 2975 // The integer must be from an EnumConstantDecl. 2976 const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr()); 2977 if (!DR) 2978 return false; 2979 2980 const EnumConstantDecl *Enumerator = 2981 dyn_cast<EnumConstantDecl>(DR->getDecl()); 2982 if (!Enumerator) 2983 return false; 2984 2985 // The type must be EnumType. 2986 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 2987 const auto *ET = Ty->getAs<EnumType>(); 2988 if (!ET) 2989 return false; 2990 2991 // The enum value must be supported. 2992 return llvm::is_contained(ET->getDecl()->enumerators(), Enumerator); 2993 } 2994 2995 bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID, 2996 CallExpr *TheCall) { 2997 assert((BuiltinID == BPF::BI__builtin_preserve_field_info || 2998 BuiltinID == BPF::BI__builtin_btf_type_id || 2999 BuiltinID == BPF::BI__builtin_preserve_type_info || 3000 BuiltinID == BPF::BI__builtin_preserve_enum_value) && 3001 "unexpected BPF builtin"); 3002 3003 if (checkArgCount(*this, TheCall, 2)) 3004 return true; 3005 3006 // The second argument needs to be a constant int 3007 Expr *Arg = TheCall->getArg(1); 3008 Optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context); 3009 diag::kind kind; 3010 if (!Value) { 3011 if (BuiltinID == BPF::BI__builtin_preserve_field_info) 3012 kind = diag::err_preserve_field_info_not_const; 3013 else if (BuiltinID == BPF::BI__builtin_btf_type_id) 3014 kind = diag::err_btf_type_id_not_const; 3015 else if (BuiltinID == BPF::BI__builtin_preserve_type_info) 3016 kind = diag::err_preserve_type_info_not_const; 3017 else 3018 kind = diag::err_preserve_enum_value_not_const; 3019 Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange(); 3020 return true; 3021 } 3022 3023 // The first argument 3024 Arg = TheCall->getArg(0); 3025 bool InvalidArg = false; 3026 bool ReturnUnsignedInt = true; 3027 if (BuiltinID == BPF::BI__builtin_preserve_field_info) { 3028 if (!isValidBPFPreserveFieldInfoArg(Arg)) { 3029 InvalidArg = true; 3030 kind = diag::err_preserve_field_info_not_field; 3031 } 3032 } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) { 3033 if (!isValidBPFPreserveTypeInfoArg(Arg)) { 3034 InvalidArg = true; 3035 kind = diag::err_preserve_type_info_invalid; 3036 } 3037 } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) { 3038 if (!isValidBPFPreserveEnumValueArg(Arg)) { 3039 InvalidArg = true; 3040 kind = diag::err_preserve_enum_value_invalid; 3041 } 3042 ReturnUnsignedInt = false; 3043 } else if (BuiltinID == BPF::BI__builtin_btf_type_id) { 3044 ReturnUnsignedInt = false; 3045 } 3046 3047 if (InvalidArg) { 3048 Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange(); 3049 return true; 3050 } 3051 3052 if (ReturnUnsignedInt) 3053 TheCall->setType(Context.UnsignedIntTy); 3054 else 3055 TheCall->setType(Context.UnsignedLongTy); 3056 return false; 3057 } 3058 3059 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3060 struct ArgInfo { 3061 uint8_t OpNum; 3062 bool IsSigned; 3063 uint8_t BitWidth; 3064 uint8_t Align; 3065 }; 3066 struct BuiltinInfo { 3067 unsigned BuiltinID; 3068 ArgInfo Infos[2]; 3069 }; 3070 3071 static BuiltinInfo Infos[] = { 3072 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 3073 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 3074 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 3075 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} }, 3076 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 3077 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 3078 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 3079 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 3080 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 3081 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 3082 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 3083 3084 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 3085 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 3086 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 3087 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 3088 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 3089 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 3090 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 3091 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 3092 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 3093 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 3094 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 3095 3096 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 3097 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 3098 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 3099 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 3100 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 3101 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 3102 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 3103 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 3104 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 3105 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 3106 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 3107 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 3108 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 3109 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 3110 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 3111 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 3112 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 3113 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 3114 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 3115 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 3116 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 3117 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 3118 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 3119 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 3120 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 3121 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 3122 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 3123 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 3124 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 3125 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 3126 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 3127 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 3128 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 3129 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 3130 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 3131 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 3132 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 3133 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 3134 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 3135 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 3136 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 3137 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 3138 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 3139 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 3140 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 3141 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 3142 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 3143 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 3144 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 3145 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 3146 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 3147 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 3148 {{ 1, false, 6, 0 }} }, 3149 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 3150 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 3151 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 3152 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 3153 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 3154 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 3155 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 3156 {{ 1, false, 5, 0 }} }, 3157 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 3158 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 3159 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 3160 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 3161 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 3162 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 3163 { 2, false, 5, 0 }} }, 3164 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 3165 { 2, false, 6, 0 }} }, 3166 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 3167 { 3, false, 5, 0 }} }, 3168 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 3169 { 3, false, 6, 0 }} }, 3170 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 3171 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 3172 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 3173 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 3174 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 3175 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 3176 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 3177 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 3178 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 3179 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 3180 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 3181 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 3182 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 3183 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 3184 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 3185 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 3186 {{ 2, false, 4, 0 }, 3187 { 3, false, 5, 0 }} }, 3188 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 3189 {{ 2, false, 4, 0 }, 3190 { 3, false, 5, 0 }} }, 3191 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 3192 {{ 2, false, 4, 0 }, 3193 { 3, false, 5, 0 }} }, 3194 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 3195 {{ 2, false, 4, 0 }, 3196 { 3, false, 5, 0 }} }, 3197 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 3198 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 3199 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 3200 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 3201 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 3202 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 3203 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 3204 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 3205 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 3206 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 3207 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 3208 { 2, false, 5, 0 }} }, 3209 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 3210 { 2, false, 6, 0 }} }, 3211 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 3212 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 3213 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 3214 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 3215 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 3216 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 3217 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 3218 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 3219 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 3220 {{ 1, false, 4, 0 }} }, 3221 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 3222 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 3223 {{ 1, false, 4, 0 }} }, 3224 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 3225 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 3226 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 3227 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 3228 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 3229 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 3230 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 3231 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 3232 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 3233 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 3234 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 3235 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 3236 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 3237 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 3238 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 3239 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 3240 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 3241 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 3242 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 3243 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 3244 {{ 3, false, 1, 0 }} }, 3245 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 3246 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 3247 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 3248 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 3249 {{ 3, false, 1, 0 }} }, 3250 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 3251 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 3252 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 3253 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 3254 {{ 3, false, 1, 0 }} }, 3255 }; 3256 3257 // Use a dynamically initialized static to sort the table exactly once on 3258 // first run. 3259 static const bool SortOnce = 3260 (llvm::sort(Infos, 3261 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 3262 return LHS.BuiltinID < RHS.BuiltinID; 3263 }), 3264 true); 3265 (void)SortOnce; 3266 3267 const BuiltinInfo *F = llvm::partition_point( 3268 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; }); 3269 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 3270 return false; 3271 3272 bool Error = false; 3273 3274 for (const ArgInfo &A : F->Infos) { 3275 // Ignore empty ArgInfo elements. 3276 if (A.BitWidth == 0) 3277 continue; 3278 3279 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 3280 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 3281 if (!A.Align) { 3282 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3283 } else { 3284 unsigned M = 1 << A.Align; 3285 Min *= M; 3286 Max *= M; 3287 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3288 Error |= SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 3289 } 3290 } 3291 return Error; 3292 } 3293 3294 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 3295 CallExpr *TheCall) { 3296 return CheckHexagonBuiltinArgument(BuiltinID, TheCall); 3297 } 3298 3299 bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI, 3300 unsigned BuiltinID, CallExpr *TheCall) { 3301 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) || 3302 CheckMipsBuiltinArgument(BuiltinID, TheCall); 3303 } 3304 3305 bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, 3306 CallExpr *TheCall) { 3307 3308 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID && 3309 BuiltinID <= Mips::BI__builtin_mips_lwx) { 3310 if (!TI.hasFeature("dsp")) 3311 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp); 3312 } 3313 3314 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID && 3315 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) { 3316 if (!TI.hasFeature("dspr2")) 3317 return Diag(TheCall->getBeginLoc(), 3318 diag::err_mips_builtin_requires_dspr2); 3319 } 3320 3321 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID && 3322 BuiltinID <= Mips::BI__builtin_msa_xori_b) { 3323 if (!TI.hasFeature("msa")) 3324 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa); 3325 } 3326 3327 return false; 3328 } 3329 3330 // CheckMipsBuiltinArgument - Checks the constant value passed to the 3331 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The 3332 // ordering for DSP is unspecified. MSA is ordered by the data format used 3333 // by the underlying instruction i.e., df/m, df/n and then by size. 3334 // 3335 // FIXME: The size tests here should instead be tablegen'd along with the 3336 // definitions from include/clang/Basic/BuiltinsMips.def. 3337 // FIXME: GCC is strict on signedness for some of these intrinsics, we should 3338 // be too. 3339 bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3340 unsigned i = 0, l = 0, u = 0, m = 0; 3341 switch (BuiltinID) { 3342 default: return false; 3343 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 3344 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 3345 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 3346 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 3347 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 3348 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 3349 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 3350 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the 3351 // df/m field. 3352 // These intrinsics take an unsigned 3 bit immediate. 3353 case Mips::BI__builtin_msa_bclri_b: 3354 case Mips::BI__builtin_msa_bnegi_b: 3355 case Mips::BI__builtin_msa_bseti_b: 3356 case Mips::BI__builtin_msa_sat_s_b: 3357 case Mips::BI__builtin_msa_sat_u_b: 3358 case Mips::BI__builtin_msa_slli_b: 3359 case Mips::BI__builtin_msa_srai_b: 3360 case Mips::BI__builtin_msa_srari_b: 3361 case Mips::BI__builtin_msa_srli_b: 3362 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 3363 case Mips::BI__builtin_msa_binsli_b: 3364 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 3365 // These intrinsics take an unsigned 4 bit immediate. 3366 case Mips::BI__builtin_msa_bclri_h: 3367 case Mips::BI__builtin_msa_bnegi_h: 3368 case Mips::BI__builtin_msa_bseti_h: 3369 case Mips::BI__builtin_msa_sat_s_h: 3370 case Mips::BI__builtin_msa_sat_u_h: 3371 case Mips::BI__builtin_msa_slli_h: 3372 case Mips::BI__builtin_msa_srai_h: 3373 case Mips::BI__builtin_msa_srari_h: 3374 case Mips::BI__builtin_msa_srli_h: 3375 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 3376 case Mips::BI__builtin_msa_binsli_h: 3377 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 3378 // These intrinsics take an unsigned 5 bit immediate. 3379 // The first block of intrinsics actually have an unsigned 5 bit field, 3380 // not a df/n field. 3381 case Mips::BI__builtin_msa_cfcmsa: 3382 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; 3383 case Mips::BI__builtin_msa_clei_u_b: 3384 case Mips::BI__builtin_msa_clei_u_h: 3385 case Mips::BI__builtin_msa_clei_u_w: 3386 case Mips::BI__builtin_msa_clei_u_d: 3387 case Mips::BI__builtin_msa_clti_u_b: 3388 case Mips::BI__builtin_msa_clti_u_h: 3389 case Mips::BI__builtin_msa_clti_u_w: 3390 case Mips::BI__builtin_msa_clti_u_d: 3391 case Mips::BI__builtin_msa_maxi_u_b: 3392 case Mips::BI__builtin_msa_maxi_u_h: 3393 case Mips::BI__builtin_msa_maxi_u_w: 3394 case Mips::BI__builtin_msa_maxi_u_d: 3395 case Mips::BI__builtin_msa_mini_u_b: 3396 case Mips::BI__builtin_msa_mini_u_h: 3397 case Mips::BI__builtin_msa_mini_u_w: 3398 case Mips::BI__builtin_msa_mini_u_d: 3399 case Mips::BI__builtin_msa_addvi_b: 3400 case Mips::BI__builtin_msa_addvi_h: 3401 case Mips::BI__builtin_msa_addvi_w: 3402 case Mips::BI__builtin_msa_addvi_d: 3403 case Mips::BI__builtin_msa_bclri_w: 3404 case Mips::BI__builtin_msa_bnegi_w: 3405 case Mips::BI__builtin_msa_bseti_w: 3406 case Mips::BI__builtin_msa_sat_s_w: 3407 case Mips::BI__builtin_msa_sat_u_w: 3408 case Mips::BI__builtin_msa_slli_w: 3409 case Mips::BI__builtin_msa_srai_w: 3410 case Mips::BI__builtin_msa_srari_w: 3411 case Mips::BI__builtin_msa_srli_w: 3412 case Mips::BI__builtin_msa_srlri_w: 3413 case Mips::BI__builtin_msa_subvi_b: 3414 case Mips::BI__builtin_msa_subvi_h: 3415 case Mips::BI__builtin_msa_subvi_w: 3416 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 3417 case Mips::BI__builtin_msa_binsli_w: 3418 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 3419 // These intrinsics take an unsigned 6 bit immediate. 3420 case Mips::BI__builtin_msa_bclri_d: 3421 case Mips::BI__builtin_msa_bnegi_d: 3422 case Mips::BI__builtin_msa_bseti_d: 3423 case Mips::BI__builtin_msa_sat_s_d: 3424 case Mips::BI__builtin_msa_sat_u_d: 3425 case Mips::BI__builtin_msa_slli_d: 3426 case Mips::BI__builtin_msa_srai_d: 3427 case Mips::BI__builtin_msa_srari_d: 3428 case Mips::BI__builtin_msa_srli_d: 3429 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 3430 case Mips::BI__builtin_msa_binsli_d: 3431 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 3432 // These intrinsics take a signed 5 bit immediate. 3433 case Mips::BI__builtin_msa_ceqi_b: 3434 case Mips::BI__builtin_msa_ceqi_h: 3435 case Mips::BI__builtin_msa_ceqi_w: 3436 case Mips::BI__builtin_msa_ceqi_d: 3437 case Mips::BI__builtin_msa_clti_s_b: 3438 case Mips::BI__builtin_msa_clti_s_h: 3439 case Mips::BI__builtin_msa_clti_s_w: 3440 case Mips::BI__builtin_msa_clti_s_d: 3441 case Mips::BI__builtin_msa_clei_s_b: 3442 case Mips::BI__builtin_msa_clei_s_h: 3443 case Mips::BI__builtin_msa_clei_s_w: 3444 case Mips::BI__builtin_msa_clei_s_d: 3445 case Mips::BI__builtin_msa_maxi_s_b: 3446 case Mips::BI__builtin_msa_maxi_s_h: 3447 case Mips::BI__builtin_msa_maxi_s_w: 3448 case Mips::BI__builtin_msa_maxi_s_d: 3449 case Mips::BI__builtin_msa_mini_s_b: 3450 case Mips::BI__builtin_msa_mini_s_h: 3451 case Mips::BI__builtin_msa_mini_s_w: 3452 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 3453 // These intrinsics take an unsigned 8 bit immediate. 3454 case Mips::BI__builtin_msa_andi_b: 3455 case Mips::BI__builtin_msa_nori_b: 3456 case Mips::BI__builtin_msa_ori_b: 3457 case Mips::BI__builtin_msa_shf_b: 3458 case Mips::BI__builtin_msa_shf_h: 3459 case Mips::BI__builtin_msa_shf_w: 3460 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 3461 case Mips::BI__builtin_msa_bseli_b: 3462 case Mips::BI__builtin_msa_bmnzi_b: 3463 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 3464 // df/n format 3465 // These intrinsics take an unsigned 4 bit immediate. 3466 case Mips::BI__builtin_msa_copy_s_b: 3467 case Mips::BI__builtin_msa_copy_u_b: 3468 case Mips::BI__builtin_msa_insve_b: 3469 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 3470 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 3471 // These intrinsics take an unsigned 3 bit immediate. 3472 case Mips::BI__builtin_msa_copy_s_h: 3473 case Mips::BI__builtin_msa_copy_u_h: 3474 case Mips::BI__builtin_msa_insve_h: 3475 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 3476 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 3477 // These intrinsics take an unsigned 2 bit immediate. 3478 case Mips::BI__builtin_msa_copy_s_w: 3479 case Mips::BI__builtin_msa_copy_u_w: 3480 case Mips::BI__builtin_msa_insve_w: 3481 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 3482 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 3483 // These intrinsics take an unsigned 1 bit immediate. 3484 case Mips::BI__builtin_msa_copy_s_d: 3485 case Mips::BI__builtin_msa_copy_u_d: 3486 case Mips::BI__builtin_msa_insve_d: 3487 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 3488 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 3489 // Memory offsets and immediate loads. 3490 // These intrinsics take a signed 10 bit immediate. 3491 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 3492 case Mips::BI__builtin_msa_ldi_h: 3493 case Mips::BI__builtin_msa_ldi_w: 3494 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 3495 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 3496 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 3497 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 3498 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 3499 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break; 3500 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break; 3501 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 3502 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 3503 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 3504 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 3505 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break; 3506 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break; 3507 } 3508 3509 if (!m) 3510 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3511 3512 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 3513 SemaBuiltinConstantArgMultiple(TheCall, i, m); 3514 } 3515 3516 /// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str, 3517 /// advancing the pointer over the consumed characters. The decoded type is 3518 /// returned. If the decoded type represents a constant integer with a 3519 /// constraint on its value then Mask is set to that value. The type descriptors 3520 /// used in Str are specific to PPC MMA builtins and are documented in the file 3521 /// defining the PPC builtins. 3522 static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str, 3523 unsigned &Mask) { 3524 bool RequireICE = false; 3525 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 3526 switch (*Str++) { 3527 case 'V': 3528 return Context.getVectorType(Context.UnsignedCharTy, 16, 3529 VectorType::VectorKind::AltiVecVector); 3530 case 'i': { 3531 char *End; 3532 unsigned size = strtoul(Str, &End, 10); 3533 assert(End != Str && "Missing constant parameter constraint"); 3534 Str = End; 3535 Mask = size; 3536 return Context.IntTy; 3537 } 3538 case 'W': { 3539 char *End; 3540 unsigned size = strtoul(Str, &End, 10); 3541 assert(End != Str && "Missing PowerPC MMA type size"); 3542 Str = End; 3543 QualType Type; 3544 switch (size) { 3545 #define PPC_VECTOR_TYPE(typeName, Id, size) \ 3546 case size: Type = Context.Id##Ty; break; 3547 #include "clang/Basic/PPCTypes.def" 3548 default: llvm_unreachable("Invalid PowerPC MMA vector type"); 3549 } 3550 bool CheckVectorArgs = false; 3551 while (!CheckVectorArgs) { 3552 switch (*Str++) { 3553 case '*': 3554 Type = Context.getPointerType(Type); 3555 break; 3556 case 'C': 3557 Type = Type.withConst(); 3558 break; 3559 default: 3560 CheckVectorArgs = true; 3561 --Str; 3562 break; 3563 } 3564 } 3565 return Type; 3566 } 3567 default: 3568 return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true); 3569 } 3570 } 3571 3572 static bool isPPC_64Builtin(unsigned BuiltinID) { 3573 // These builtins only work on PPC 64bit targets. 3574 switch (BuiltinID) { 3575 case PPC::BI__builtin_divde: 3576 case PPC::BI__builtin_divdeu: 3577 case PPC::BI__builtin_bpermd: 3578 case PPC::BI__builtin_ppc_ldarx: 3579 case PPC::BI__builtin_ppc_stdcx: 3580 case PPC::BI__builtin_ppc_tdw: 3581 case PPC::BI__builtin_ppc_trapd: 3582 case PPC::BI__builtin_ppc_cmpeqb: 3583 case PPC::BI__builtin_ppc_setb: 3584 case PPC::BI__builtin_ppc_mulhd: 3585 case PPC::BI__builtin_ppc_mulhdu: 3586 case PPC::BI__builtin_ppc_maddhd: 3587 case PPC::BI__builtin_ppc_maddhdu: 3588 case PPC::BI__builtin_ppc_maddld: 3589 case PPC::BI__builtin_ppc_load8r: 3590 case PPC::BI__builtin_ppc_store8r: 3591 case PPC::BI__builtin_ppc_insert_exp: 3592 case PPC::BI__builtin_ppc_extract_sig: 3593 case PPC::BI__builtin_ppc_addex: 3594 case PPC::BI__builtin_darn: 3595 case PPC::BI__builtin_darn_raw: 3596 case PPC::BI__builtin_ppc_compare_and_swaplp: 3597 case PPC::BI__builtin_ppc_fetch_and_addlp: 3598 case PPC::BI__builtin_ppc_fetch_and_andlp: 3599 case PPC::BI__builtin_ppc_fetch_and_orlp: 3600 case PPC::BI__builtin_ppc_fetch_and_swaplp: 3601 return true; 3602 } 3603 return false; 3604 } 3605 3606 static bool SemaFeatureCheck(Sema &S, CallExpr *TheCall, 3607 StringRef FeatureToCheck, unsigned DiagID, 3608 StringRef DiagArg = "") { 3609 if (S.Context.getTargetInfo().hasFeature(FeatureToCheck)) 3610 return false; 3611 3612 if (DiagArg.empty()) 3613 S.Diag(TheCall->getBeginLoc(), DiagID) << TheCall->getSourceRange(); 3614 else 3615 S.Diag(TheCall->getBeginLoc(), DiagID) 3616 << DiagArg << TheCall->getSourceRange(); 3617 3618 return true; 3619 } 3620 3621 /// Returns true if the argument consists of one contiguous run of 1s with any 3622 /// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so 3623 /// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not, 3624 /// since all 1s are not contiguous. 3625 bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) { 3626 llvm::APSInt Result; 3627 // We can't check the value of a dependent argument. 3628 Expr *Arg = TheCall->getArg(ArgNum); 3629 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3630 return false; 3631 3632 // Check constant-ness first. 3633 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3634 return true; 3635 3636 // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s. 3637 if (Result.isShiftedMask() || (~Result).isShiftedMask()) 3638 return false; 3639 3640 return Diag(TheCall->getBeginLoc(), 3641 diag::err_argument_not_contiguous_bit_field) 3642 << ArgNum << Arg->getSourceRange(); 3643 } 3644 3645 bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3646 CallExpr *TheCall) { 3647 unsigned i = 0, l = 0, u = 0; 3648 bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64; 3649 llvm::APSInt Result; 3650 3651 if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit) 3652 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 3653 << TheCall->getSourceRange(); 3654 3655 switch (BuiltinID) { 3656 default: return false; 3657 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 3658 case PPC::BI__builtin_altivec_crypto_vshasigmad: 3659 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3660 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3661 case PPC::BI__builtin_altivec_dss: 3662 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3); 3663 case PPC::BI__builtin_tbegin: 3664 case PPC::BI__builtin_tend: 3665 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1) || 3666 SemaFeatureCheck(*this, TheCall, "htm", 3667 diag::err_ppc_builtin_requires_htm); 3668 case PPC::BI__builtin_tsr: 3669 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 3670 SemaFeatureCheck(*this, TheCall, "htm", 3671 diag::err_ppc_builtin_requires_htm); 3672 case PPC::BI__builtin_tabortwc: 3673 case PPC::BI__builtin_tabortdc: 3674 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 3675 SemaFeatureCheck(*this, TheCall, "htm", 3676 diag::err_ppc_builtin_requires_htm); 3677 case PPC::BI__builtin_tabortwci: 3678 case PPC::BI__builtin_tabortdci: 3679 return SemaFeatureCheck(*this, TheCall, "htm", 3680 diag::err_ppc_builtin_requires_htm) || 3681 (SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 3682 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31)); 3683 case PPC::BI__builtin_tabort: 3684 case PPC::BI__builtin_tcheck: 3685 case PPC::BI__builtin_treclaim: 3686 case PPC::BI__builtin_trechkpt: 3687 case PPC::BI__builtin_tendall: 3688 case PPC::BI__builtin_tresume: 3689 case PPC::BI__builtin_tsuspend: 3690 case PPC::BI__builtin_get_texasr: 3691 case PPC::BI__builtin_get_texasru: 3692 case PPC::BI__builtin_get_tfhar: 3693 case PPC::BI__builtin_get_tfiar: 3694 case PPC::BI__builtin_set_texasr: 3695 case PPC::BI__builtin_set_texasru: 3696 case PPC::BI__builtin_set_tfhar: 3697 case PPC::BI__builtin_set_tfiar: 3698 case PPC::BI__builtin_ttest: 3699 return SemaFeatureCheck(*this, TheCall, "htm", 3700 diag::err_ppc_builtin_requires_htm); 3701 // According to GCC 'Basic PowerPC Built-in Functions Available on ISA 2.05', 3702 // __builtin_(un)pack_longdouble are available only if long double uses IBM 3703 // extended double representation. 3704 case PPC::BI__builtin_unpack_longdouble: 3705 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 1)) 3706 return true; 3707 LLVM_FALLTHROUGH; 3708 case PPC::BI__builtin_pack_longdouble: 3709 if (&TI.getLongDoubleFormat() != &llvm::APFloat::PPCDoubleDouble()) 3710 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_requires_abi) 3711 << "ibmlongdouble"; 3712 return false; 3713 case PPC::BI__builtin_altivec_dst: 3714 case PPC::BI__builtin_altivec_dstt: 3715 case PPC::BI__builtin_altivec_dstst: 3716 case PPC::BI__builtin_altivec_dststt: 3717 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 3718 case PPC::BI__builtin_vsx_xxpermdi: 3719 case PPC::BI__builtin_vsx_xxsldwi: 3720 return SemaBuiltinVSX(TheCall); 3721 case PPC::BI__builtin_divwe: 3722 case PPC::BI__builtin_divweu: 3723 case PPC::BI__builtin_divde: 3724 case PPC::BI__builtin_divdeu: 3725 return SemaFeatureCheck(*this, TheCall, "extdiv", 3726 diag::err_ppc_builtin_only_on_arch, "7"); 3727 case PPC::BI__builtin_bpermd: 3728 return SemaFeatureCheck(*this, TheCall, "bpermd", 3729 diag::err_ppc_builtin_only_on_arch, "7"); 3730 case PPC::BI__builtin_unpack_vector_int128: 3731 return SemaFeatureCheck(*this, TheCall, "vsx", 3732 diag::err_ppc_builtin_only_on_arch, "7") || 3733 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3734 case PPC::BI__builtin_pack_vector_int128: 3735 return SemaFeatureCheck(*this, TheCall, "vsx", 3736 diag::err_ppc_builtin_only_on_arch, "7"); 3737 case PPC::BI__builtin_altivec_vgnb: 3738 return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7); 3739 case PPC::BI__builtin_altivec_vec_replace_elt: 3740 case PPC::BI__builtin_altivec_vec_replace_unaligned: { 3741 QualType VecTy = TheCall->getArg(0)->getType(); 3742 QualType EltTy = TheCall->getArg(1)->getType(); 3743 unsigned Width = Context.getIntWidth(EltTy); 3744 return SemaBuiltinConstantArgRange(TheCall, 2, 0, Width == 32 ? 12 : 8) || 3745 !isEltOfVectorTy(Context, TheCall, *this, VecTy, EltTy); 3746 } 3747 case PPC::BI__builtin_vsx_xxeval: 3748 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255); 3749 case PPC::BI__builtin_altivec_vsldbi: 3750 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 3751 case PPC::BI__builtin_altivec_vsrdbi: 3752 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 3753 case PPC::BI__builtin_vsx_xxpermx: 3754 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7); 3755 case PPC::BI__builtin_ppc_tw: 3756 case PPC::BI__builtin_ppc_tdw: 3757 return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31); 3758 case PPC::BI__builtin_ppc_cmpeqb: 3759 case PPC::BI__builtin_ppc_setb: 3760 case PPC::BI__builtin_ppc_maddhd: 3761 case PPC::BI__builtin_ppc_maddhdu: 3762 case PPC::BI__builtin_ppc_maddld: 3763 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3764 diag::err_ppc_builtin_only_on_arch, "9"); 3765 case PPC::BI__builtin_ppc_cmprb: 3766 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3767 diag::err_ppc_builtin_only_on_arch, "9") || 3768 SemaBuiltinConstantArgRange(TheCall, 0, 0, 1); 3769 // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must 3770 // be a constant that represents a contiguous bit field. 3771 case PPC::BI__builtin_ppc_rlwnm: 3772 return SemaValueIsRunOfOnes(TheCall, 2); 3773 case PPC::BI__builtin_ppc_rlwimi: 3774 case PPC::BI__builtin_ppc_rldimi: 3775 return SemaBuiltinConstantArg(TheCall, 2, Result) || 3776 SemaValueIsRunOfOnes(TheCall, 3); 3777 case PPC::BI__builtin_ppc_extract_exp: 3778 case PPC::BI__builtin_ppc_extract_sig: 3779 case PPC::BI__builtin_ppc_insert_exp: 3780 return SemaFeatureCheck(*this, TheCall, "power9-vector", 3781 diag::err_ppc_builtin_only_on_arch, "9"); 3782 case PPC::BI__builtin_ppc_addex: { 3783 if (SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3784 diag::err_ppc_builtin_only_on_arch, "9") || 3785 SemaBuiltinConstantArgRange(TheCall, 2, 0, 3)) 3786 return true; 3787 // Output warning for reserved values 1 to 3. 3788 int ArgValue = 3789 TheCall->getArg(2)->getIntegerConstantExpr(Context)->getSExtValue(); 3790 if (ArgValue != 0) 3791 Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour) 3792 << ArgValue; 3793 return false; 3794 } 3795 case PPC::BI__builtin_ppc_mtfsb0: 3796 case PPC::BI__builtin_ppc_mtfsb1: 3797 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 3798 case PPC::BI__builtin_ppc_mtfsf: 3799 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 255); 3800 case PPC::BI__builtin_ppc_mtfsfi: 3801 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 3802 SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 3803 case PPC::BI__builtin_ppc_alignx: 3804 return SemaBuiltinConstantArgPower2(TheCall, 0); 3805 case PPC::BI__builtin_ppc_rdlam: 3806 return SemaValueIsRunOfOnes(TheCall, 2); 3807 case PPC::BI__builtin_ppc_icbt: 3808 case PPC::BI__builtin_ppc_sthcx: 3809 case PPC::BI__builtin_ppc_stbcx: 3810 case PPC::BI__builtin_ppc_lharx: 3811 case PPC::BI__builtin_ppc_lbarx: 3812 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 3813 diag::err_ppc_builtin_only_on_arch, "8"); 3814 case PPC::BI__builtin_vsx_ldrmb: 3815 case PPC::BI__builtin_vsx_strmb: 3816 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 3817 diag::err_ppc_builtin_only_on_arch, "8") || 3818 SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 3819 case PPC::BI__builtin_altivec_vcntmbb: 3820 case PPC::BI__builtin_altivec_vcntmbh: 3821 case PPC::BI__builtin_altivec_vcntmbw: 3822 case PPC::BI__builtin_altivec_vcntmbd: 3823 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3824 case PPC::BI__builtin_darn: 3825 case PPC::BI__builtin_darn_raw: 3826 case PPC::BI__builtin_darn_32: 3827 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3828 diag::err_ppc_builtin_only_on_arch, "9"); 3829 case PPC::BI__builtin_vsx_xxgenpcvbm: 3830 case PPC::BI__builtin_vsx_xxgenpcvhm: 3831 case PPC::BI__builtin_vsx_xxgenpcvwm: 3832 case PPC::BI__builtin_vsx_xxgenpcvdm: 3833 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3); 3834 case PPC::BI__builtin_ppc_compare_exp_uo: 3835 case PPC::BI__builtin_ppc_compare_exp_lt: 3836 case PPC::BI__builtin_ppc_compare_exp_gt: 3837 case PPC::BI__builtin_ppc_compare_exp_eq: 3838 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3839 diag::err_ppc_builtin_only_on_arch, "9") || 3840 SemaFeatureCheck(*this, TheCall, "vsx", 3841 diag::err_ppc_builtin_requires_vsx); 3842 case PPC::BI__builtin_ppc_test_data_class: { 3843 // Check if the first argument of the __builtin_ppc_test_data_class call is 3844 // valid. The argument must be either a 'float' or a 'double'. 3845 QualType ArgType = TheCall->getArg(0)->getType(); 3846 if (ArgType != QualType(Context.FloatTy) && 3847 ArgType != QualType(Context.DoubleTy)) 3848 return Diag(TheCall->getBeginLoc(), 3849 diag::err_ppc_invalid_test_data_class_type); 3850 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3851 diag::err_ppc_builtin_only_on_arch, "9") || 3852 SemaFeatureCheck(*this, TheCall, "vsx", 3853 diag::err_ppc_builtin_requires_vsx) || 3854 SemaBuiltinConstantArgRange(TheCall, 1, 0, 127); 3855 } 3856 case PPC::BI__builtin_ppc_load8r: 3857 case PPC::BI__builtin_ppc_store8r: 3858 return SemaFeatureCheck(*this, TheCall, "isa-v206-instructions", 3859 diag::err_ppc_builtin_only_on_arch, "7"); 3860 #define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \ 3861 case PPC::BI__builtin_##Name: \ 3862 return SemaBuiltinPPCMMACall(TheCall, BuiltinID, Types); 3863 #include "clang/Basic/BuiltinsPPC.def" 3864 } 3865 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3866 } 3867 3868 // Check if the given type is a non-pointer PPC MMA type. This function is used 3869 // in Sema to prevent invalid uses of restricted PPC MMA types. 3870 bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) { 3871 if (Type->isPointerType() || Type->isArrayType()) 3872 return false; 3873 3874 QualType CoreType = Type.getCanonicalType().getUnqualifiedType(); 3875 #define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty 3876 if (false 3877 #include "clang/Basic/PPCTypes.def" 3878 ) { 3879 Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type); 3880 return true; 3881 } 3882 return false; 3883 } 3884 3885 bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, 3886 CallExpr *TheCall) { 3887 // position of memory order and scope arguments in the builtin 3888 unsigned OrderIndex, ScopeIndex; 3889 switch (BuiltinID) { 3890 case AMDGPU::BI__builtin_amdgcn_atomic_inc32: 3891 case AMDGPU::BI__builtin_amdgcn_atomic_inc64: 3892 case AMDGPU::BI__builtin_amdgcn_atomic_dec32: 3893 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: 3894 OrderIndex = 2; 3895 ScopeIndex = 3; 3896 break; 3897 case AMDGPU::BI__builtin_amdgcn_fence: 3898 OrderIndex = 0; 3899 ScopeIndex = 1; 3900 break; 3901 default: 3902 return false; 3903 } 3904 3905 ExprResult Arg = TheCall->getArg(OrderIndex); 3906 auto ArgExpr = Arg.get(); 3907 Expr::EvalResult ArgResult; 3908 3909 if (!ArgExpr->EvaluateAsInt(ArgResult, Context)) 3910 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int) 3911 << ArgExpr->getType(); 3912 auto Ord = ArgResult.Val.getInt().getZExtValue(); 3913 3914 // Check validity of memory ordering as per C11 / C++11's memody model. 3915 // Only fence needs check. Atomic dec/inc allow all memory orders. 3916 if (!llvm::isValidAtomicOrderingCABI(Ord)) 3917 return Diag(ArgExpr->getBeginLoc(), 3918 diag::warn_atomic_op_has_invalid_memory_order) 3919 << ArgExpr->getSourceRange(); 3920 switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) { 3921 case llvm::AtomicOrderingCABI::relaxed: 3922 case llvm::AtomicOrderingCABI::consume: 3923 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence) 3924 return Diag(ArgExpr->getBeginLoc(), 3925 diag::warn_atomic_op_has_invalid_memory_order) 3926 << ArgExpr->getSourceRange(); 3927 break; 3928 case llvm::AtomicOrderingCABI::acquire: 3929 case llvm::AtomicOrderingCABI::release: 3930 case llvm::AtomicOrderingCABI::acq_rel: 3931 case llvm::AtomicOrderingCABI::seq_cst: 3932 break; 3933 } 3934 3935 Arg = TheCall->getArg(ScopeIndex); 3936 ArgExpr = Arg.get(); 3937 Expr::EvalResult ArgResult1; 3938 // Check that sync scope is a constant literal 3939 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context)) 3940 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal) 3941 << ArgExpr->getType(); 3942 3943 return false; 3944 } 3945 3946 bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) { 3947 llvm::APSInt Result; 3948 3949 // We can't check the value of a dependent argument. 3950 Expr *Arg = TheCall->getArg(ArgNum); 3951 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3952 return false; 3953 3954 // Check constant-ness first. 3955 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3956 return true; 3957 3958 int64_t Val = Result.getSExtValue(); 3959 if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7)) 3960 return false; 3961 3962 return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul) 3963 << Arg->getSourceRange(); 3964 } 3965 3966 bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, 3967 unsigned BuiltinID, 3968 CallExpr *TheCall) { 3969 // CodeGenFunction can also detect this, but this gives a better error 3970 // message. 3971 bool FeatureMissing = false; 3972 SmallVector<StringRef> ReqFeatures; 3973 StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID); 3974 Features.split(ReqFeatures, ','); 3975 3976 // Check if each required feature is included 3977 for (StringRef F : ReqFeatures) { 3978 if (TI.hasFeature(F)) 3979 continue; 3980 3981 // If the feature is 64bit, alter the string so it will print better in 3982 // the diagnostic. 3983 if (F == "64bit") 3984 F = "RV64"; 3985 3986 // Convert features like "zbr" and "experimental-zbr" to "Zbr". 3987 F.consume_front("experimental-"); 3988 std::string FeatureStr = F.str(); 3989 FeatureStr[0] = std::toupper(FeatureStr[0]); 3990 3991 // Error message 3992 FeatureMissing = true; 3993 Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension) 3994 << TheCall->getSourceRange() << StringRef(FeatureStr); 3995 } 3996 3997 if (FeatureMissing) 3998 return true; 3999 4000 switch (BuiltinID) { 4001 case RISCVVector::BI__builtin_rvv_vsetvli: 4002 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) || 4003 CheckRISCVLMUL(TheCall, 2); 4004 case RISCVVector::BI__builtin_rvv_vsetvlimax: 4005 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 4006 CheckRISCVLMUL(TheCall, 1); 4007 } 4008 4009 return false; 4010 } 4011 4012 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 4013 CallExpr *TheCall) { 4014 if (BuiltinID == SystemZ::BI__builtin_tabort) { 4015 Expr *Arg = TheCall->getArg(0); 4016 if (Optional<llvm::APSInt> AbortCode = Arg->getIntegerConstantExpr(Context)) 4017 if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256) 4018 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 4019 << Arg->getSourceRange(); 4020 } 4021 4022 // For intrinsics which take an immediate value as part of the instruction, 4023 // range check them here. 4024 unsigned i = 0, l = 0, u = 0; 4025 switch (BuiltinID) { 4026 default: return false; 4027 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 4028 case SystemZ::BI__builtin_s390_verimb: 4029 case SystemZ::BI__builtin_s390_verimh: 4030 case SystemZ::BI__builtin_s390_verimf: 4031 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 4032 case SystemZ::BI__builtin_s390_vfaeb: 4033 case SystemZ::BI__builtin_s390_vfaeh: 4034 case SystemZ::BI__builtin_s390_vfaef: 4035 case SystemZ::BI__builtin_s390_vfaebs: 4036 case SystemZ::BI__builtin_s390_vfaehs: 4037 case SystemZ::BI__builtin_s390_vfaefs: 4038 case SystemZ::BI__builtin_s390_vfaezb: 4039 case SystemZ::BI__builtin_s390_vfaezh: 4040 case SystemZ::BI__builtin_s390_vfaezf: 4041 case SystemZ::BI__builtin_s390_vfaezbs: 4042 case SystemZ::BI__builtin_s390_vfaezhs: 4043 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 4044 case SystemZ::BI__builtin_s390_vfisb: 4045 case SystemZ::BI__builtin_s390_vfidb: 4046 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 4047 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 4048 case SystemZ::BI__builtin_s390_vftcisb: 4049 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 4050 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 4051 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 4052 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 4053 case SystemZ::BI__builtin_s390_vstrcb: 4054 case SystemZ::BI__builtin_s390_vstrch: 4055 case SystemZ::BI__builtin_s390_vstrcf: 4056 case SystemZ::BI__builtin_s390_vstrczb: 4057 case SystemZ::BI__builtin_s390_vstrczh: 4058 case SystemZ::BI__builtin_s390_vstrczf: 4059 case SystemZ::BI__builtin_s390_vstrcbs: 4060 case SystemZ::BI__builtin_s390_vstrchs: 4061 case SystemZ::BI__builtin_s390_vstrcfs: 4062 case SystemZ::BI__builtin_s390_vstrczbs: 4063 case SystemZ::BI__builtin_s390_vstrczhs: 4064 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 4065 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 4066 case SystemZ::BI__builtin_s390_vfminsb: 4067 case SystemZ::BI__builtin_s390_vfmaxsb: 4068 case SystemZ::BI__builtin_s390_vfmindb: 4069 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 4070 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break; 4071 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break; 4072 case SystemZ::BI__builtin_s390_vclfnhs: 4073 case SystemZ::BI__builtin_s390_vclfnls: 4074 case SystemZ::BI__builtin_s390_vcfn: 4075 case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break; 4076 case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break; 4077 } 4078 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 4079 } 4080 4081 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 4082 /// This checks that the target supports __builtin_cpu_supports and 4083 /// that the string argument is constant and valid. 4084 static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI, 4085 CallExpr *TheCall) { 4086 Expr *Arg = TheCall->getArg(0); 4087 4088 // Check if the argument is a string literal. 4089 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 4090 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 4091 << Arg->getSourceRange(); 4092 4093 // Check the contents of the string. 4094 StringRef Feature = 4095 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 4096 if (!TI.validateCpuSupports(Feature)) 4097 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 4098 << Arg->getSourceRange(); 4099 return false; 4100 } 4101 4102 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 4103 /// This checks that the target supports __builtin_cpu_is and 4104 /// that the string argument is constant and valid. 4105 static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) { 4106 Expr *Arg = TheCall->getArg(0); 4107 4108 // Check if the argument is a string literal. 4109 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 4110 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 4111 << Arg->getSourceRange(); 4112 4113 // Check the contents of the string. 4114 StringRef Feature = 4115 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 4116 if (!TI.validateCpuIs(Feature)) 4117 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 4118 << Arg->getSourceRange(); 4119 return false; 4120 } 4121 4122 // Check if the rounding mode is legal. 4123 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 4124 // Indicates if this instruction has rounding control or just SAE. 4125 bool HasRC = false; 4126 4127 unsigned ArgNum = 0; 4128 switch (BuiltinID) { 4129 default: 4130 return false; 4131 case X86::BI__builtin_ia32_vcvttsd2si32: 4132 case X86::BI__builtin_ia32_vcvttsd2si64: 4133 case X86::BI__builtin_ia32_vcvttsd2usi32: 4134 case X86::BI__builtin_ia32_vcvttsd2usi64: 4135 case X86::BI__builtin_ia32_vcvttss2si32: 4136 case X86::BI__builtin_ia32_vcvttss2si64: 4137 case X86::BI__builtin_ia32_vcvttss2usi32: 4138 case X86::BI__builtin_ia32_vcvttss2usi64: 4139 case X86::BI__builtin_ia32_vcvttsh2si32: 4140 case X86::BI__builtin_ia32_vcvttsh2si64: 4141 case X86::BI__builtin_ia32_vcvttsh2usi32: 4142 case X86::BI__builtin_ia32_vcvttsh2usi64: 4143 ArgNum = 1; 4144 break; 4145 case X86::BI__builtin_ia32_maxpd512: 4146 case X86::BI__builtin_ia32_maxps512: 4147 case X86::BI__builtin_ia32_minpd512: 4148 case X86::BI__builtin_ia32_minps512: 4149 case X86::BI__builtin_ia32_maxph512: 4150 case X86::BI__builtin_ia32_minph512: 4151 ArgNum = 2; 4152 break; 4153 case X86::BI__builtin_ia32_vcvtph2pd512_mask: 4154 case X86::BI__builtin_ia32_vcvtph2psx512_mask: 4155 case X86::BI__builtin_ia32_cvtps2pd512_mask: 4156 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 4157 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 4158 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 4159 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 4160 case X86::BI__builtin_ia32_cvttps2dq512_mask: 4161 case X86::BI__builtin_ia32_cvttps2qq512_mask: 4162 case X86::BI__builtin_ia32_cvttps2udq512_mask: 4163 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 4164 case X86::BI__builtin_ia32_vcvttph2w512_mask: 4165 case X86::BI__builtin_ia32_vcvttph2uw512_mask: 4166 case X86::BI__builtin_ia32_vcvttph2dq512_mask: 4167 case X86::BI__builtin_ia32_vcvttph2udq512_mask: 4168 case X86::BI__builtin_ia32_vcvttph2qq512_mask: 4169 case X86::BI__builtin_ia32_vcvttph2uqq512_mask: 4170 case X86::BI__builtin_ia32_exp2pd_mask: 4171 case X86::BI__builtin_ia32_exp2ps_mask: 4172 case X86::BI__builtin_ia32_getexppd512_mask: 4173 case X86::BI__builtin_ia32_getexpps512_mask: 4174 case X86::BI__builtin_ia32_getexpph512_mask: 4175 case X86::BI__builtin_ia32_rcp28pd_mask: 4176 case X86::BI__builtin_ia32_rcp28ps_mask: 4177 case X86::BI__builtin_ia32_rsqrt28pd_mask: 4178 case X86::BI__builtin_ia32_rsqrt28ps_mask: 4179 case X86::BI__builtin_ia32_vcomisd: 4180 case X86::BI__builtin_ia32_vcomiss: 4181 case X86::BI__builtin_ia32_vcomish: 4182 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 4183 ArgNum = 3; 4184 break; 4185 case X86::BI__builtin_ia32_cmppd512_mask: 4186 case X86::BI__builtin_ia32_cmpps512_mask: 4187 case X86::BI__builtin_ia32_cmpsd_mask: 4188 case X86::BI__builtin_ia32_cmpss_mask: 4189 case X86::BI__builtin_ia32_cmpsh_mask: 4190 case X86::BI__builtin_ia32_vcvtsh2sd_round_mask: 4191 case X86::BI__builtin_ia32_vcvtsh2ss_round_mask: 4192 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 4193 case X86::BI__builtin_ia32_getexpsd128_round_mask: 4194 case X86::BI__builtin_ia32_getexpss128_round_mask: 4195 case X86::BI__builtin_ia32_getexpsh128_round_mask: 4196 case X86::BI__builtin_ia32_getmantpd512_mask: 4197 case X86::BI__builtin_ia32_getmantps512_mask: 4198 case X86::BI__builtin_ia32_getmantph512_mask: 4199 case X86::BI__builtin_ia32_maxsd_round_mask: 4200 case X86::BI__builtin_ia32_maxss_round_mask: 4201 case X86::BI__builtin_ia32_maxsh_round_mask: 4202 case X86::BI__builtin_ia32_minsd_round_mask: 4203 case X86::BI__builtin_ia32_minss_round_mask: 4204 case X86::BI__builtin_ia32_minsh_round_mask: 4205 case X86::BI__builtin_ia32_rcp28sd_round_mask: 4206 case X86::BI__builtin_ia32_rcp28ss_round_mask: 4207 case X86::BI__builtin_ia32_reducepd512_mask: 4208 case X86::BI__builtin_ia32_reduceps512_mask: 4209 case X86::BI__builtin_ia32_reduceph512_mask: 4210 case X86::BI__builtin_ia32_rndscalepd_mask: 4211 case X86::BI__builtin_ia32_rndscaleps_mask: 4212 case X86::BI__builtin_ia32_rndscaleph_mask: 4213 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 4214 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 4215 ArgNum = 4; 4216 break; 4217 case X86::BI__builtin_ia32_fixupimmpd512_mask: 4218 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 4219 case X86::BI__builtin_ia32_fixupimmps512_mask: 4220 case X86::BI__builtin_ia32_fixupimmps512_maskz: 4221 case X86::BI__builtin_ia32_fixupimmsd_mask: 4222 case X86::BI__builtin_ia32_fixupimmsd_maskz: 4223 case X86::BI__builtin_ia32_fixupimmss_mask: 4224 case X86::BI__builtin_ia32_fixupimmss_maskz: 4225 case X86::BI__builtin_ia32_getmantsd_round_mask: 4226 case X86::BI__builtin_ia32_getmantss_round_mask: 4227 case X86::BI__builtin_ia32_getmantsh_round_mask: 4228 case X86::BI__builtin_ia32_rangepd512_mask: 4229 case X86::BI__builtin_ia32_rangeps512_mask: 4230 case X86::BI__builtin_ia32_rangesd128_round_mask: 4231 case X86::BI__builtin_ia32_rangess128_round_mask: 4232 case X86::BI__builtin_ia32_reducesd_mask: 4233 case X86::BI__builtin_ia32_reducess_mask: 4234 case X86::BI__builtin_ia32_reducesh_mask: 4235 case X86::BI__builtin_ia32_rndscalesd_round_mask: 4236 case X86::BI__builtin_ia32_rndscaless_round_mask: 4237 case X86::BI__builtin_ia32_rndscalesh_round_mask: 4238 ArgNum = 5; 4239 break; 4240 case X86::BI__builtin_ia32_vcvtsd2si64: 4241 case X86::BI__builtin_ia32_vcvtsd2si32: 4242 case X86::BI__builtin_ia32_vcvtsd2usi32: 4243 case X86::BI__builtin_ia32_vcvtsd2usi64: 4244 case X86::BI__builtin_ia32_vcvtss2si32: 4245 case X86::BI__builtin_ia32_vcvtss2si64: 4246 case X86::BI__builtin_ia32_vcvtss2usi32: 4247 case X86::BI__builtin_ia32_vcvtss2usi64: 4248 case X86::BI__builtin_ia32_vcvtsh2si32: 4249 case X86::BI__builtin_ia32_vcvtsh2si64: 4250 case X86::BI__builtin_ia32_vcvtsh2usi32: 4251 case X86::BI__builtin_ia32_vcvtsh2usi64: 4252 case X86::BI__builtin_ia32_sqrtpd512: 4253 case X86::BI__builtin_ia32_sqrtps512: 4254 case X86::BI__builtin_ia32_sqrtph512: 4255 ArgNum = 1; 4256 HasRC = true; 4257 break; 4258 case X86::BI__builtin_ia32_addph512: 4259 case X86::BI__builtin_ia32_divph512: 4260 case X86::BI__builtin_ia32_mulph512: 4261 case X86::BI__builtin_ia32_subph512: 4262 case X86::BI__builtin_ia32_addpd512: 4263 case X86::BI__builtin_ia32_addps512: 4264 case X86::BI__builtin_ia32_divpd512: 4265 case X86::BI__builtin_ia32_divps512: 4266 case X86::BI__builtin_ia32_mulpd512: 4267 case X86::BI__builtin_ia32_mulps512: 4268 case X86::BI__builtin_ia32_subpd512: 4269 case X86::BI__builtin_ia32_subps512: 4270 case X86::BI__builtin_ia32_cvtsi2sd64: 4271 case X86::BI__builtin_ia32_cvtsi2ss32: 4272 case X86::BI__builtin_ia32_cvtsi2ss64: 4273 case X86::BI__builtin_ia32_cvtusi2sd64: 4274 case X86::BI__builtin_ia32_cvtusi2ss32: 4275 case X86::BI__builtin_ia32_cvtusi2ss64: 4276 case X86::BI__builtin_ia32_vcvtusi2sh: 4277 case X86::BI__builtin_ia32_vcvtusi642sh: 4278 case X86::BI__builtin_ia32_vcvtsi2sh: 4279 case X86::BI__builtin_ia32_vcvtsi642sh: 4280 ArgNum = 2; 4281 HasRC = true; 4282 break; 4283 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 4284 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 4285 case X86::BI__builtin_ia32_vcvtpd2ph512_mask: 4286 case X86::BI__builtin_ia32_vcvtps2phx512_mask: 4287 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 4288 case X86::BI__builtin_ia32_cvtpd2dq512_mask: 4289 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 4290 case X86::BI__builtin_ia32_cvtpd2udq512_mask: 4291 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 4292 case X86::BI__builtin_ia32_cvtps2dq512_mask: 4293 case X86::BI__builtin_ia32_cvtps2qq512_mask: 4294 case X86::BI__builtin_ia32_cvtps2udq512_mask: 4295 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 4296 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 4297 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 4298 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 4299 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 4300 case X86::BI__builtin_ia32_vcvtdq2ph512_mask: 4301 case X86::BI__builtin_ia32_vcvtudq2ph512_mask: 4302 case X86::BI__builtin_ia32_vcvtw2ph512_mask: 4303 case X86::BI__builtin_ia32_vcvtuw2ph512_mask: 4304 case X86::BI__builtin_ia32_vcvtph2w512_mask: 4305 case X86::BI__builtin_ia32_vcvtph2uw512_mask: 4306 case X86::BI__builtin_ia32_vcvtph2dq512_mask: 4307 case X86::BI__builtin_ia32_vcvtph2udq512_mask: 4308 case X86::BI__builtin_ia32_vcvtph2qq512_mask: 4309 case X86::BI__builtin_ia32_vcvtph2uqq512_mask: 4310 case X86::BI__builtin_ia32_vcvtqq2ph512_mask: 4311 case X86::BI__builtin_ia32_vcvtuqq2ph512_mask: 4312 ArgNum = 3; 4313 HasRC = true; 4314 break; 4315 case X86::BI__builtin_ia32_addsh_round_mask: 4316 case X86::BI__builtin_ia32_addss_round_mask: 4317 case X86::BI__builtin_ia32_addsd_round_mask: 4318 case X86::BI__builtin_ia32_divsh_round_mask: 4319 case X86::BI__builtin_ia32_divss_round_mask: 4320 case X86::BI__builtin_ia32_divsd_round_mask: 4321 case X86::BI__builtin_ia32_mulsh_round_mask: 4322 case X86::BI__builtin_ia32_mulss_round_mask: 4323 case X86::BI__builtin_ia32_mulsd_round_mask: 4324 case X86::BI__builtin_ia32_subsh_round_mask: 4325 case X86::BI__builtin_ia32_subss_round_mask: 4326 case X86::BI__builtin_ia32_subsd_round_mask: 4327 case X86::BI__builtin_ia32_scalefph512_mask: 4328 case X86::BI__builtin_ia32_scalefpd512_mask: 4329 case X86::BI__builtin_ia32_scalefps512_mask: 4330 case X86::BI__builtin_ia32_scalefsd_round_mask: 4331 case X86::BI__builtin_ia32_scalefss_round_mask: 4332 case X86::BI__builtin_ia32_scalefsh_round_mask: 4333 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 4334 case X86::BI__builtin_ia32_vcvtss2sh_round_mask: 4335 case X86::BI__builtin_ia32_vcvtsd2sh_round_mask: 4336 case X86::BI__builtin_ia32_sqrtsd_round_mask: 4337 case X86::BI__builtin_ia32_sqrtss_round_mask: 4338 case X86::BI__builtin_ia32_sqrtsh_round_mask: 4339 case X86::BI__builtin_ia32_vfmaddsd3_mask: 4340 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 4341 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 4342 case X86::BI__builtin_ia32_vfmaddss3_mask: 4343 case X86::BI__builtin_ia32_vfmaddss3_maskz: 4344 case X86::BI__builtin_ia32_vfmaddss3_mask3: 4345 case X86::BI__builtin_ia32_vfmaddsh3_mask: 4346 case X86::BI__builtin_ia32_vfmaddsh3_maskz: 4347 case X86::BI__builtin_ia32_vfmaddsh3_mask3: 4348 case X86::BI__builtin_ia32_vfmaddpd512_mask: 4349 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 4350 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 4351 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 4352 case X86::BI__builtin_ia32_vfmaddps512_mask: 4353 case X86::BI__builtin_ia32_vfmaddps512_maskz: 4354 case X86::BI__builtin_ia32_vfmaddps512_mask3: 4355 case X86::BI__builtin_ia32_vfmsubps512_mask3: 4356 case X86::BI__builtin_ia32_vfmaddph512_mask: 4357 case X86::BI__builtin_ia32_vfmaddph512_maskz: 4358 case X86::BI__builtin_ia32_vfmaddph512_mask3: 4359 case X86::BI__builtin_ia32_vfmsubph512_mask3: 4360 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 4361 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 4362 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 4363 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 4364 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 4365 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 4366 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 4367 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 4368 case X86::BI__builtin_ia32_vfmaddsubph512_mask: 4369 case X86::BI__builtin_ia32_vfmaddsubph512_maskz: 4370 case X86::BI__builtin_ia32_vfmaddsubph512_mask3: 4371 case X86::BI__builtin_ia32_vfmsubaddph512_mask3: 4372 case X86::BI__builtin_ia32_vfmaddcsh_mask: 4373 case X86::BI__builtin_ia32_vfmaddcsh_round_mask: 4374 case X86::BI__builtin_ia32_vfmaddcsh_round_mask3: 4375 case X86::BI__builtin_ia32_vfmaddcph512_mask: 4376 case X86::BI__builtin_ia32_vfmaddcph512_maskz: 4377 case X86::BI__builtin_ia32_vfmaddcph512_mask3: 4378 case X86::BI__builtin_ia32_vfcmaddcsh_mask: 4379 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask: 4380 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3: 4381 case X86::BI__builtin_ia32_vfcmaddcph512_mask: 4382 case X86::BI__builtin_ia32_vfcmaddcph512_maskz: 4383 case X86::BI__builtin_ia32_vfcmaddcph512_mask3: 4384 case X86::BI__builtin_ia32_vfmulcsh_mask: 4385 case X86::BI__builtin_ia32_vfmulcph512_mask: 4386 case X86::BI__builtin_ia32_vfcmulcsh_mask: 4387 case X86::BI__builtin_ia32_vfcmulcph512_mask: 4388 ArgNum = 4; 4389 HasRC = true; 4390 break; 4391 } 4392 4393 llvm::APSInt Result; 4394 4395 // We can't check the value of a dependent argument. 4396 Expr *Arg = TheCall->getArg(ArgNum); 4397 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4398 return false; 4399 4400 // Check constant-ness first. 4401 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4402 return true; 4403 4404 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 4405 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 4406 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding 4407 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together. 4408 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 4409 Result == 8/*ROUND_NO_EXC*/ || 4410 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) || 4411 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 4412 return false; 4413 4414 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 4415 << Arg->getSourceRange(); 4416 } 4417 4418 // Check if the gather/scatter scale is legal. 4419 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 4420 CallExpr *TheCall) { 4421 unsigned ArgNum = 0; 4422 switch (BuiltinID) { 4423 default: 4424 return false; 4425 case X86::BI__builtin_ia32_gatherpfdpd: 4426 case X86::BI__builtin_ia32_gatherpfdps: 4427 case X86::BI__builtin_ia32_gatherpfqpd: 4428 case X86::BI__builtin_ia32_gatherpfqps: 4429 case X86::BI__builtin_ia32_scatterpfdpd: 4430 case X86::BI__builtin_ia32_scatterpfdps: 4431 case X86::BI__builtin_ia32_scatterpfqpd: 4432 case X86::BI__builtin_ia32_scatterpfqps: 4433 ArgNum = 3; 4434 break; 4435 case X86::BI__builtin_ia32_gatherd_pd: 4436 case X86::BI__builtin_ia32_gatherd_pd256: 4437 case X86::BI__builtin_ia32_gatherq_pd: 4438 case X86::BI__builtin_ia32_gatherq_pd256: 4439 case X86::BI__builtin_ia32_gatherd_ps: 4440 case X86::BI__builtin_ia32_gatherd_ps256: 4441 case X86::BI__builtin_ia32_gatherq_ps: 4442 case X86::BI__builtin_ia32_gatherq_ps256: 4443 case X86::BI__builtin_ia32_gatherd_q: 4444 case X86::BI__builtin_ia32_gatherd_q256: 4445 case X86::BI__builtin_ia32_gatherq_q: 4446 case X86::BI__builtin_ia32_gatherq_q256: 4447 case X86::BI__builtin_ia32_gatherd_d: 4448 case X86::BI__builtin_ia32_gatherd_d256: 4449 case X86::BI__builtin_ia32_gatherq_d: 4450 case X86::BI__builtin_ia32_gatherq_d256: 4451 case X86::BI__builtin_ia32_gather3div2df: 4452 case X86::BI__builtin_ia32_gather3div2di: 4453 case X86::BI__builtin_ia32_gather3div4df: 4454 case X86::BI__builtin_ia32_gather3div4di: 4455 case X86::BI__builtin_ia32_gather3div4sf: 4456 case X86::BI__builtin_ia32_gather3div4si: 4457 case X86::BI__builtin_ia32_gather3div8sf: 4458 case X86::BI__builtin_ia32_gather3div8si: 4459 case X86::BI__builtin_ia32_gather3siv2df: 4460 case X86::BI__builtin_ia32_gather3siv2di: 4461 case X86::BI__builtin_ia32_gather3siv4df: 4462 case X86::BI__builtin_ia32_gather3siv4di: 4463 case X86::BI__builtin_ia32_gather3siv4sf: 4464 case X86::BI__builtin_ia32_gather3siv4si: 4465 case X86::BI__builtin_ia32_gather3siv8sf: 4466 case X86::BI__builtin_ia32_gather3siv8si: 4467 case X86::BI__builtin_ia32_gathersiv8df: 4468 case X86::BI__builtin_ia32_gathersiv16sf: 4469 case X86::BI__builtin_ia32_gatherdiv8df: 4470 case X86::BI__builtin_ia32_gatherdiv16sf: 4471 case X86::BI__builtin_ia32_gathersiv8di: 4472 case X86::BI__builtin_ia32_gathersiv16si: 4473 case X86::BI__builtin_ia32_gatherdiv8di: 4474 case X86::BI__builtin_ia32_gatherdiv16si: 4475 case X86::BI__builtin_ia32_scatterdiv2df: 4476 case X86::BI__builtin_ia32_scatterdiv2di: 4477 case X86::BI__builtin_ia32_scatterdiv4df: 4478 case X86::BI__builtin_ia32_scatterdiv4di: 4479 case X86::BI__builtin_ia32_scatterdiv4sf: 4480 case X86::BI__builtin_ia32_scatterdiv4si: 4481 case X86::BI__builtin_ia32_scatterdiv8sf: 4482 case X86::BI__builtin_ia32_scatterdiv8si: 4483 case X86::BI__builtin_ia32_scattersiv2df: 4484 case X86::BI__builtin_ia32_scattersiv2di: 4485 case X86::BI__builtin_ia32_scattersiv4df: 4486 case X86::BI__builtin_ia32_scattersiv4di: 4487 case X86::BI__builtin_ia32_scattersiv4sf: 4488 case X86::BI__builtin_ia32_scattersiv4si: 4489 case X86::BI__builtin_ia32_scattersiv8sf: 4490 case X86::BI__builtin_ia32_scattersiv8si: 4491 case X86::BI__builtin_ia32_scattersiv8df: 4492 case X86::BI__builtin_ia32_scattersiv16sf: 4493 case X86::BI__builtin_ia32_scatterdiv8df: 4494 case X86::BI__builtin_ia32_scatterdiv16sf: 4495 case X86::BI__builtin_ia32_scattersiv8di: 4496 case X86::BI__builtin_ia32_scattersiv16si: 4497 case X86::BI__builtin_ia32_scatterdiv8di: 4498 case X86::BI__builtin_ia32_scatterdiv16si: 4499 ArgNum = 4; 4500 break; 4501 } 4502 4503 llvm::APSInt Result; 4504 4505 // We can't check the value of a dependent argument. 4506 Expr *Arg = TheCall->getArg(ArgNum); 4507 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4508 return false; 4509 4510 // Check constant-ness first. 4511 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4512 return true; 4513 4514 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 4515 return false; 4516 4517 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 4518 << Arg->getSourceRange(); 4519 } 4520 4521 enum { TileRegLow = 0, TileRegHigh = 7 }; 4522 4523 bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, 4524 ArrayRef<int> ArgNums) { 4525 for (int ArgNum : ArgNums) { 4526 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh)) 4527 return true; 4528 } 4529 return false; 4530 } 4531 4532 bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall, 4533 ArrayRef<int> ArgNums) { 4534 // Because the max number of tile register is TileRegHigh + 1, so here we use 4535 // each bit to represent the usage of them in bitset. 4536 std::bitset<TileRegHigh + 1> ArgValues; 4537 for (int ArgNum : ArgNums) { 4538 Expr *Arg = TheCall->getArg(ArgNum); 4539 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4540 continue; 4541 4542 llvm::APSInt Result; 4543 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4544 return true; 4545 int ArgExtValue = Result.getExtValue(); 4546 assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) && 4547 "Incorrect tile register num."); 4548 if (ArgValues.test(ArgExtValue)) 4549 return Diag(TheCall->getBeginLoc(), 4550 diag::err_x86_builtin_tile_arg_duplicate) 4551 << TheCall->getArg(ArgNum)->getSourceRange(); 4552 ArgValues.set(ArgExtValue); 4553 } 4554 return false; 4555 } 4556 4557 bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, 4558 ArrayRef<int> ArgNums) { 4559 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) || 4560 CheckX86BuiltinTileDuplicate(TheCall, ArgNums); 4561 } 4562 4563 bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) { 4564 switch (BuiltinID) { 4565 default: 4566 return false; 4567 case X86::BI__builtin_ia32_tileloadd64: 4568 case X86::BI__builtin_ia32_tileloaddt164: 4569 case X86::BI__builtin_ia32_tilestored64: 4570 case X86::BI__builtin_ia32_tilezero: 4571 return CheckX86BuiltinTileArgumentsRange(TheCall, 0); 4572 case X86::BI__builtin_ia32_tdpbssd: 4573 case X86::BI__builtin_ia32_tdpbsud: 4574 case X86::BI__builtin_ia32_tdpbusd: 4575 case X86::BI__builtin_ia32_tdpbuud: 4576 case X86::BI__builtin_ia32_tdpbf16ps: 4577 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2}); 4578 } 4579 } 4580 static bool isX86_32Builtin(unsigned BuiltinID) { 4581 // These builtins only work on x86-32 targets. 4582 switch (BuiltinID) { 4583 case X86::BI__builtin_ia32_readeflags_u32: 4584 case X86::BI__builtin_ia32_writeeflags_u32: 4585 return true; 4586 } 4587 4588 return false; 4589 } 4590 4591 bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 4592 CallExpr *TheCall) { 4593 if (BuiltinID == X86::BI__builtin_cpu_supports) 4594 return SemaBuiltinCpuSupports(*this, TI, TheCall); 4595 4596 if (BuiltinID == X86::BI__builtin_cpu_is) 4597 return SemaBuiltinCpuIs(*this, TI, TheCall); 4598 4599 // Check for 32-bit only builtins on a 64-bit target. 4600 const llvm::Triple &TT = TI.getTriple(); 4601 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 4602 return Diag(TheCall->getCallee()->getBeginLoc(), 4603 diag::err_32_bit_builtin_64_bit_tgt); 4604 4605 // If the intrinsic has rounding or SAE make sure its valid. 4606 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 4607 return true; 4608 4609 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 4610 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 4611 return true; 4612 4613 // If the intrinsic has a tile arguments, make sure they are valid. 4614 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall)) 4615 return true; 4616 4617 // For intrinsics which take an immediate value as part of the instruction, 4618 // range check them here. 4619 int i = 0, l = 0, u = 0; 4620 switch (BuiltinID) { 4621 default: 4622 return false; 4623 case X86::BI__builtin_ia32_vec_ext_v2si: 4624 case X86::BI__builtin_ia32_vec_ext_v2di: 4625 case X86::BI__builtin_ia32_vextractf128_pd256: 4626 case X86::BI__builtin_ia32_vextractf128_ps256: 4627 case X86::BI__builtin_ia32_vextractf128_si256: 4628 case X86::BI__builtin_ia32_extract128i256: 4629 case X86::BI__builtin_ia32_extractf64x4_mask: 4630 case X86::BI__builtin_ia32_extracti64x4_mask: 4631 case X86::BI__builtin_ia32_extractf32x8_mask: 4632 case X86::BI__builtin_ia32_extracti32x8_mask: 4633 case X86::BI__builtin_ia32_extractf64x2_256_mask: 4634 case X86::BI__builtin_ia32_extracti64x2_256_mask: 4635 case X86::BI__builtin_ia32_extractf32x4_256_mask: 4636 case X86::BI__builtin_ia32_extracti32x4_256_mask: 4637 i = 1; l = 0; u = 1; 4638 break; 4639 case X86::BI__builtin_ia32_vec_set_v2di: 4640 case X86::BI__builtin_ia32_vinsertf128_pd256: 4641 case X86::BI__builtin_ia32_vinsertf128_ps256: 4642 case X86::BI__builtin_ia32_vinsertf128_si256: 4643 case X86::BI__builtin_ia32_insert128i256: 4644 case X86::BI__builtin_ia32_insertf32x8: 4645 case X86::BI__builtin_ia32_inserti32x8: 4646 case X86::BI__builtin_ia32_insertf64x4: 4647 case X86::BI__builtin_ia32_inserti64x4: 4648 case X86::BI__builtin_ia32_insertf64x2_256: 4649 case X86::BI__builtin_ia32_inserti64x2_256: 4650 case X86::BI__builtin_ia32_insertf32x4_256: 4651 case X86::BI__builtin_ia32_inserti32x4_256: 4652 i = 2; l = 0; u = 1; 4653 break; 4654 case X86::BI__builtin_ia32_vpermilpd: 4655 case X86::BI__builtin_ia32_vec_ext_v4hi: 4656 case X86::BI__builtin_ia32_vec_ext_v4si: 4657 case X86::BI__builtin_ia32_vec_ext_v4sf: 4658 case X86::BI__builtin_ia32_vec_ext_v4di: 4659 case X86::BI__builtin_ia32_extractf32x4_mask: 4660 case X86::BI__builtin_ia32_extracti32x4_mask: 4661 case X86::BI__builtin_ia32_extractf64x2_512_mask: 4662 case X86::BI__builtin_ia32_extracti64x2_512_mask: 4663 i = 1; l = 0; u = 3; 4664 break; 4665 case X86::BI_mm_prefetch: 4666 case X86::BI__builtin_ia32_vec_ext_v8hi: 4667 case X86::BI__builtin_ia32_vec_ext_v8si: 4668 i = 1; l = 0; u = 7; 4669 break; 4670 case X86::BI__builtin_ia32_sha1rnds4: 4671 case X86::BI__builtin_ia32_blendpd: 4672 case X86::BI__builtin_ia32_shufpd: 4673 case X86::BI__builtin_ia32_vec_set_v4hi: 4674 case X86::BI__builtin_ia32_vec_set_v4si: 4675 case X86::BI__builtin_ia32_vec_set_v4di: 4676 case X86::BI__builtin_ia32_shuf_f32x4_256: 4677 case X86::BI__builtin_ia32_shuf_f64x2_256: 4678 case X86::BI__builtin_ia32_shuf_i32x4_256: 4679 case X86::BI__builtin_ia32_shuf_i64x2_256: 4680 case X86::BI__builtin_ia32_insertf64x2_512: 4681 case X86::BI__builtin_ia32_inserti64x2_512: 4682 case X86::BI__builtin_ia32_insertf32x4: 4683 case X86::BI__builtin_ia32_inserti32x4: 4684 i = 2; l = 0; u = 3; 4685 break; 4686 case X86::BI__builtin_ia32_vpermil2pd: 4687 case X86::BI__builtin_ia32_vpermil2pd256: 4688 case X86::BI__builtin_ia32_vpermil2ps: 4689 case X86::BI__builtin_ia32_vpermil2ps256: 4690 i = 3; l = 0; u = 3; 4691 break; 4692 case X86::BI__builtin_ia32_cmpb128_mask: 4693 case X86::BI__builtin_ia32_cmpw128_mask: 4694 case X86::BI__builtin_ia32_cmpd128_mask: 4695 case X86::BI__builtin_ia32_cmpq128_mask: 4696 case X86::BI__builtin_ia32_cmpb256_mask: 4697 case X86::BI__builtin_ia32_cmpw256_mask: 4698 case X86::BI__builtin_ia32_cmpd256_mask: 4699 case X86::BI__builtin_ia32_cmpq256_mask: 4700 case X86::BI__builtin_ia32_cmpb512_mask: 4701 case X86::BI__builtin_ia32_cmpw512_mask: 4702 case X86::BI__builtin_ia32_cmpd512_mask: 4703 case X86::BI__builtin_ia32_cmpq512_mask: 4704 case X86::BI__builtin_ia32_ucmpb128_mask: 4705 case X86::BI__builtin_ia32_ucmpw128_mask: 4706 case X86::BI__builtin_ia32_ucmpd128_mask: 4707 case X86::BI__builtin_ia32_ucmpq128_mask: 4708 case X86::BI__builtin_ia32_ucmpb256_mask: 4709 case X86::BI__builtin_ia32_ucmpw256_mask: 4710 case X86::BI__builtin_ia32_ucmpd256_mask: 4711 case X86::BI__builtin_ia32_ucmpq256_mask: 4712 case X86::BI__builtin_ia32_ucmpb512_mask: 4713 case X86::BI__builtin_ia32_ucmpw512_mask: 4714 case X86::BI__builtin_ia32_ucmpd512_mask: 4715 case X86::BI__builtin_ia32_ucmpq512_mask: 4716 case X86::BI__builtin_ia32_vpcomub: 4717 case X86::BI__builtin_ia32_vpcomuw: 4718 case X86::BI__builtin_ia32_vpcomud: 4719 case X86::BI__builtin_ia32_vpcomuq: 4720 case X86::BI__builtin_ia32_vpcomb: 4721 case X86::BI__builtin_ia32_vpcomw: 4722 case X86::BI__builtin_ia32_vpcomd: 4723 case X86::BI__builtin_ia32_vpcomq: 4724 case X86::BI__builtin_ia32_vec_set_v8hi: 4725 case X86::BI__builtin_ia32_vec_set_v8si: 4726 i = 2; l = 0; u = 7; 4727 break; 4728 case X86::BI__builtin_ia32_vpermilpd256: 4729 case X86::BI__builtin_ia32_roundps: 4730 case X86::BI__builtin_ia32_roundpd: 4731 case X86::BI__builtin_ia32_roundps256: 4732 case X86::BI__builtin_ia32_roundpd256: 4733 case X86::BI__builtin_ia32_getmantpd128_mask: 4734 case X86::BI__builtin_ia32_getmantpd256_mask: 4735 case X86::BI__builtin_ia32_getmantps128_mask: 4736 case X86::BI__builtin_ia32_getmantps256_mask: 4737 case X86::BI__builtin_ia32_getmantpd512_mask: 4738 case X86::BI__builtin_ia32_getmantps512_mask: 4739 case X86::BI__builtin_ia32_getmantph128_mask: 4740 case X86::BI__builtin_ia32_getmantph256_mask: 4741 case X86::BI__builtin_ia32_getmantph512_mask: 4742 case X86::BI__builtin_ia32_vec_ext_v16qi: 4743 case X86::BI__builtin_ia32_vec_ext_v16hi: 4744 i = 1; l = 0; u = 15; 4745 break; 4746 case X86::BI__builtin_ia32_pblendd128: 4747 case X86::BI__builtin_ia32_blendps: 4748 case X86::BI__builtin_ia32_blendpd256: 4749 case X86::BI__builtin_ia32_shufpd256: 4750 case X86::BI__builtin_ia32_roundss: 4751 case X86::BI__builtin_ia32_roundsd: 4752 case X86::BI__builtin_ia32_rangepd128_mask: 4753 case X86::BI__builtin_ia32_rangepd256_mask: 4754 case X86::BI__builtin_ia32_rangepd512_mask: 4755 case X86::BI__builtin_ia32_rangeps128_mask: 4756 case X86::BI__builtin_ia32_rangeps256_mask: 4757 case X86::BI__builtin_ia32_rangeps512_mask: 4758 case X86::BI__builtin_ia32_getmantsd_round_mask: 4759 case X86::BI__builtin_ia32_getmantss_round_mask: 4760 case X86::BI__builtin_ia32_getmantsh_round_mask: 4761 case X86::BI__builtin_ia32_vec_set_v16qi: 4762 case X86::BI__builtin_ia32_vec_set_v16hi: 4763 i = 2; l = 0; u = 15; 4764 break; 4765 case X86::BI__builtin_ia32_vec_ext_v32qi: 4766 i = 1; l = 0; u = 31; 4767 break; 4768 case X86::BI__builtin_ia32_cmpps: 4769 case X86::BI__builtin_ia32_cmpss: 4770 case X86::BI__builtin_ia32_cmppd: 4771 case X86::BI__builtin_ia32_cmpsd: 4772 case X86::BI__builtin_ia32_cmpps256: 4773 case X86::BI__builtin_ia32_cmppd256: 4774 case X86::BI__builtin_ia32_cmpps128_mask: 4775 case X86::BI__builtin_ia32_cmppd128_mask: 4776 case X86::BI__builtin_ia32_cmpps256_mask: 4777 case X86::BI__builtin_ia32_cmppd256_mask: 4778 case X86::BI__builtin_ia32_cmpps512_mask: 4779 case X86::BI__builtin_ia32_cmppd512_mask: 4780 case X86::BI__builtin_ia32_cmpsd_mask: 4781 case X86::BI__builtin_ia32_cmpss_mask: 4782 case X86::BI__builtin_ia32_vec_set_v32qi: 4783 i = 2; l = 0; u = 31; 4784 break; 4785 case X86::BI__builtin_ia32_permdf256: 4786 case X86::BI__builtin_ia32_permdi256: 4787 case X86::BI__builtin_ia32_permdf512: 4788 case X86::BI__builtin_ia32_permdi512: 4789 case X86::BI__builtin_ia32_vpermilps: 4790 case X86::BI__builtin_ia32_vpermilps256: 4791 case X86::BI__builtin_ia32_vpermilpd512: 4792 case X86::BI__builtin_ia32_vpermilps512: 4793 case X86::BI__builtin_ia32_pshufd: 4794 case X86::BI__builtin_ia32_pshufd256: 4795 case X86::BI__builtin_ia32_pshufd512: 4796 case X86::BI__builtin_ia32_pshufhw: 4797 case X86::BI__builtin_ia32_pshufhw256: 4798 case X86::BI__builtin_ia32_pshufhw512: 4799 case X86::BI__builtin_ia32_pshuflw: 4800 case X86::BI__builtin_ia32_pshuflw256: 4801 case X86::BI__builtin_ia32_pshuflw512: 4802 case X86::BI__builtin_ia32_vcvtps2ph: 4803 case X86::BI__builtin_ia32_vcvtps2ph_mask: 4804 case X86::BI__builtin_ia32_vcvtps2ph256: 4805 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 4806 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 4807 case X86::BI__builtin_ia32_rndscaleps_128_mask: 4808 case X86::BI__builtin_ia32_rndscalepd_128_mask: 4809 case X86::BI__builtin_ia32_rndscaleps_256_mask: 4810 case X86::BI__builtin_ia32_rndscalepd_256_mask: 4811 case X86::BI__builtin_ia32_rndscaleps_mask: 4812 case X86::BI__builtin_ia32_rndscalepd_mask: 4813 case X86::BI__builtin_ia32_rndscaleph_mask: 4814 case X86::BI__builtin_ia32_reducepd128_mask: 4815 case X86::BI__builtin_ia32_reducepd256_mask: 4816 case X86::BI__builtin_ia32_reducepd512_mask: 4817 case X86::BI__builtin_ia32_reduceps128_mask: 4818 case X86::BI__builtin_ia32_reduceps256_mask: 4819 case X86::BI__builtin_ia32_reduceps512_mask: 4820 case X86::BI__builtin_ia32_reduceph128_mask: 4821 case X86::BI__builtin_ia32_reduceph256_mask: 4822 case X86::BI__builtin_ia32_reduceph512_mask: 4823 case X86::BI__builtin_ia32_prold512: 4824 case X86::BI__builtin_ia32_prolq512: 4825 case X86::BI__builtin_ia32_prold128: 4826 case X86::BI__builtin_ia32_prold256: 4827 case X86::BI__builtin_ia32_prolq128: 4828 case X86::BI__builtin_ia32_prolq256: 4829 case X86::BI__builtin_ia32_prord512: 4830 case X86::BI__builtin_ia32_prorq512: 4831 case X86::BI__builtin_ia32_prord128: 4832 case X86::BI__builtin_ia32_prord256: 4833 case X86::BI__builtin_ia32_prorq128: 4834 case X86::BI__builtin_ia32_prorq256: 4835 case X86::BI__builtin_ia32_fpclasspd128_mask: 4836 case X86::BI__builtin_ia32_fpclasspd256_mask: 4837 case X86::BI__builtin_ia32_fpclassps128_mask: 4838 case X86::BI__builtin_ia32_fpclassps256_mask: 4839 case X86::BI__builtin_ia32_fpclassps512_mask: 4840 case X86::BI__builtin_ia32_fpclasspd512_mask: 4841 case X86::BI__builtin_ia32_fpclassph128_mask: 4842 case X86::BI__builtin_ia32_fpclassph256_mask: 4843 case X86::BI__builtin_ia32_fpclassph512_mask: 4844 case X86::BI__builtin_ia32_fpclasssd_mask: 4845 case X86::BI__builtin_ia32_fpclassss_mask: 4846 case X86::BI__builtin_ia32_fpclasssh_mask: 4847 case X86::BI__builtin_ia32_pslldqi128_byteshift: 4848 case X86::BI__builtin_ia32_pslldqi256_byteshift: 4849 case X86::BI__builtin_ia32_pslldqi512_byteshift: 4850 case X86::BI__builtin_ia32_psrldqi128_byteshift: 4851 case X86::BI__builtin_ia32_psrldqi256_byteshift: 4852 case X86::BI__builtin_ia32_psrldqi512_byteshift: 4853 case X86::BI__builtin_ia32_kshiftliqi: 4854 case X86::BI__builtin_ia32_kshiftlihi: 4855 case X86::BI__builtin_ia32_kshiftlisi: 4856 case X86::BI__builtin_ia32_kshiftlidi: 4857 case X86::BI__builtin_ia32_kshiftriqi: 4858 case X86::BI__builtin_ia32_kshiftrihi: 4859 case X86::BI__builtin_ia32_kshiftrisi: 4860 case X86::BI__builtin_ia32_kshiftridi: 4861 i = 1; l = 0; u = 255; 4862 break; 4863 case X86::BI__builtin_ia32_vperm2f128_pd256: 4864 case X86::BI__builtin_ia32_vperm2f128_ps256: 4865 case X86::BI__builtin_ia32_vperm2f128_si256: 4866 case X86::BI__builtin_ia32_permti256: 4867 case X86::BI__builtin_ia32_pblendw128: 4868 case X86::BI__builtin_ia32_pblendw256: 4869 case X86::BI__builtin_ia32_blendps256: 4870 case X86::BI__builtin_ia32_pblendd256: 4871 case X86::BI__builtin_ia32_palignr128: 4872 case X86::BI__builtin_ia32_palignr256: 4873 case X86::BI__builtin_ia32_palignr512: 4874 case X86::BI__builtin_ia32_alignq512: 4875 case X86::BI__builtin_ia32_alignd512: 4876 case X86::BI__builtin_ia32_alignd128: 4877 case X86::BI__builtin_ia32_alignd256: 4878 case X86::BI__builtin_ia32_alignq128: 4879 case X86::BI__builtin_ia32_alignq256: 4880 case X86::BI__builtin_ia32_vcomisd: 4881 case X86::BI__builtin_ia32_vcomiss: 4882 case X86::BI__builtin_ia32_shuf_f32x4: 4883 case X86::BI__builtin_ia32_shuf_f64x2: 4884 case X86::BI__builtin_ia32_shuf_i32x4: 4885 case X86::BI__builtin_ia32_shuf_i64x2: 4886 case X86::BI__builtin_ia32_shufpd512: 4887 case X86::BI__builtin_ia32_shufps: 4888 case X86::BI__builtin_ia32_shufps256: 4889 case X86::BI__builtin_ia32_shufps512: 4890 case X86::BI__builtin_ia32_dbpsadbw128: 4891 case X86::BI__builtin_ia32_dbpsadbw256: 4892 case X86::BI__builtin_ia32_dbpsadbw512: 4893 case X86::BI__builtin_ia32_vpshldd128: 4894 case X86::BI__builtin_ia32_vpshldd256: 4895 case X86::BI__builtin_ia32_vpshldd512: 4896 case X86::BI__builtin_ia32_vpshldq128: 4897 case X86::BI__builtin_ia32_vpshldq256: 4898 case X86::BI__builtin_ia32_vpshldq512: 4899 case X86::BI__builtin_ia32_vpshldw128: 4900 case X86::BI__builtin_ia32_vpshldw256: 4901 case X86::BI__builtin_ia32_vpshldw512: 4902 case X86::BI__builtin_ia32_vpshrdd128: 4903 case X86::BI__builtin_ia32_vpshrdd256: 4904 case X86::BI__builtin_ia32_vpshrdd512: 4905 case X86::BI__builtin_ia32_vpshrdq128: 4906 case X86::BI__builtin_ia32_vpshrdq256: 4907 case X86::BI__builtin_ia32_vpshrdq512: 4908 case X86::BI__builtin_ia32_vpshrdw128: 4909 case X86::BI__builtin_ia32_vpshrdw256: 4910 case X86::BI__builtin_ia32_vpshrdw512: 4911 i = 2; l = 0; u = 255; 4912 break; 4913 case X86::BI__builtin_ia32_fixupimmpd512_mask: 4914 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 4915 case X86::BI__builtin_ia32_fixupimmps512_mask: 4916 case X86::BI__builtin_ia32_fixupimmps512_maskz: 4917 case X86::BI__builtin_ia32_fixupimmsd_mask: 4918 case X86::BI__builtin_ia32_fixupimmsd_maskz: 4919 case X86::BI__builtin_ia32_fixupimmss_mask: 4920 case X86::BI__builtin_ia32_fixupimmss_maskz: 4921 case X86::BI__builtin_ia32_fixupimmpd128_mask: 4922 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 4923 case X86::BI__builtin_ia32_fixupimmpd256_mask: 4924 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 4925 case X86::BI__builtin_ia32_fixupimmps128_mask: 4926 case X86::BI__builtin_ia32_fixupimmps128_maskz: 4927 case X86::BI__builtin_ia32_fixupimmps256_mask: 4928 case X86::BI__builtin_ia32_fixupimmps256_maskz: 4929 case X86::BI__builtin_ia32_pternlogd512_mask: 4930 case X86::BI__builtin_ia32_pternlogd512_maskz: 4931 case X86::BI__builtin_ia32_pternlogq512_mask: 4932 case X86::BI__builtin_ia32_pternlogq512_maskz: 4933 case X86::BI__builtin_ia32_pternlogd128_mask: 4934 case X86::BI__builtin_ia32_pternlogd128_maskz: 4935 case X86::BI__builtin_ia32_pternlogd256_mask: 4936 case X86::BI__builtin_ia32_pternlogd256_maskz: 4937 case X86::BI__builtin_ia32_pternlogq128_mask: 4938 case X86::BI__builtin_ia32_pternlogq128_maskz: 4939 case X86::BI__builtin_ia32_pternlogq256_mask: 4940 case X86::BI__builtin_ia32_pternlogq256_maskz: 4941 i = 3; l = 0; u = 255; 4942 break; 4943 case X86::BI__builtin_ia32_gatherpfdpd: 4944 case X86::BI__builtin_ia32_gatherpfdps: 4945 case X86::BI__builtin_ia32_gatherpfqpd: 4946 case X86::BI__builtin_ia32_gatherpfqps: 4947 case X86::BI__builtin_ia32_scatterpfdpd: 4948 case X86::BI__builtin_ia32_scatterpfdps: 4949 case X86::BI__builtin_ia32_scatterpfqpd: 4950 case X86::BI__builtin_ia32_scatterpfqps: 4951 i = 4; l = 2; u = 3; 4952 break; 4953 case X86::BI__builtin_ia32_reducesd_mask: 4954 case X86::BI__builtin_ia32_reducess_mask: 4955 case X86::BI__builtin_ia32_rndscalesd_round_mask: 4956 case X86::BI__builtin_ia32_rndscaless_round_mask: 4957 case X86::BI__builtin_ia32_rndscalesh_round_mask: 4958 case X86::BI__builtin_ia32_reducesh_mask: 4959 i = 4; l = 0; u = 255; 4960 break; 4961 } 4962 4963 // Note that we don't force a hard error on the range check here, allowing 4964 // template-generated or macro-generated dead code to potentially have out-of- 4965 // range values. These need to code generate, but don't need to necessarily 4966 // make any sense. We use a warning that defaults to an error. 4967 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 4968 } 4969 4970 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 4971 /// parameter with the FormatAttr's correct format_idx and firstDataArg. 4972 /// Returns true when the format fits the function and the FormatStringInfo has 4973 /// been populated. 4974 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 4975 FormatStringInfo *FSI) { 4976 FSI->HasVAListArg = Format->getFirstArg() == 0; 4977 FSI->FormatIdx = Format->getFormatIdx() - 1; 4978 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1; 4979 4980 // The way the format attribute works in GCC, the implicit this argument 4981 // of member functions is counted. However, it doesn't appear in our own 4982 // lists, so decrement format_idx in that case. 4983 if (IsCXXMember) { 4984 if(FSI->FormatIdx == 0) 4985 return false; 4986 --FSI->FormatIdx; 4987 if (FSI->FirstDataArg != 0) 4988 --FSI->FirstDataArg; 4989 } 4990 return true; 4991 } 4992 4993 /// Checks if a the given expression evaluates to null. 4994 /// 4995 /// Returns true if the value evaluates to null. 4996 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 4997 // If the expression has non-null type, it doesn't evaluate to null. 4998 if (auto nullability 4999 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { 5000 if (*nullability == NullabilityKind::NonNull) 5001 return false; 5002 } 5003 5004 // As a special case, transparent unions initialized with zero are 5005 // considered null for the purposes of the nonnull attribute. 5006 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 5007 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 5008 if (const CompoundLiteralExpr *CLE = 5009 dyn_cast<CompoundLiteralExpr>(Expr)) 5010 if (const InitListExpr *ILE = 5011 dyn_cast<InitListExpr>(CLE->getInitializer())) 5012 Expr = ILE->getInit(0); 5013 } 5014 5015 bool Result; 5016 return (!Expr->isValueDependent() && 5017 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 5018 !Result); 5019 } 5020 5021 static void CheckNonNullArgument(Sema &S, 5022 const Expr *ArgExpr, 5023 SourceLocation CallSiteLoc) { 5024 if (CheckNonNullExpr(S, ArgExpr)) 5025 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 5026 S.PDiag(diag::warn_null_arg) 5027 << ArgExpr->getSourceRange()); 5028 } 5029 5030 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 5031 FormatStringInfo FSI; 5032 if ((GetFormatStringType(Format) == FST_NSString) && 5033 getFormatStringInfo(Format, false, &FSI)) { 5034 Idx = FSI.FormatIdx; 5035 return true; 5036 } 5037 return false; 5038 } 5039 5040 /// Diagnose use of %s directive in an NSString which is being passed 5041 /// as formatting string to formatting method. 5042 static void 5043 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 5044 const NamedDecl *FDecl, 5045 Expr **Args, 5046 unsigned NumArgs) { 5047 unsigned Idx = 0; 5048 bool Format = false; 5049 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 5050 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 5051 Idx = 2; 5052 Format = true; 5053 } 5054 else 5055 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 5056 if (S.GetFormatNSStringIdx(I, Idx)) { 5057 Format = true; 5058 break; 5059 } 5060 } 5061 if (!Format || NumArgs <= Idx) 5062 return; 5063 const Expr *FormatExpr = Args[Idx]; 5064 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 5065 FormatExpr = CSCE->getSubExpr(); 5066 const StringLiteral *FormatString; 5067 if (const ObjCStringLiteral *OSL = 5068 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 5069 FormatString = OSL->getString(); 5070 else 5071 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 5072 if (!FormatString) 5073 return; 5074 if (S.FormatStringHasSArg(FormatString)) { 5075 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 5076 << "%s" << 1 << 1; 5077 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 5078 << FDecl->getDeclName(); 5079 } 5080 } 5081 5082 /// Determine whether the given type has a non-null nullability annotation. 5083 static bool isNonNullType(ASTContext &ctx, QualType type) { 5084 if (auto nullability = type->getNullability(ctx)) 5085 return *nullability == NullabilityKind::NonNull; 5086 5087 return false; 5088 } 5089 5090 static void CheckNonNullArguments(Sema &S, 5091 const NamedDecl *FDecl, 5092 const FunctionProtoType *Proto, 5093 ArrayRef<const Expr *> Args, 5094 SourceLocation CallSiteLoc) { 5095 assert((FDecl || Proto) && "Need a function declaration or prototype"); 5096 5097 // Already checked by by constant evaluator. 5098 if (S.isConstantEvaluated()) 5099 return; 5100 // Check the attributes attached to the method/function itself. 5101 llvm::SmallBitVector NonNullArgs; 5102 if (FDecl) { 5103 // Handle the nonnull attribute on the function/method declaration itself. 5104 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 5105 if (!NonNull->args_size()) { 5106 // Easy case: all pointer arguments are nonnull. 5107 for (const auto *Arg : Args) 5108 if (S.isValidPointerAttrType(Arg->getType())) 5109 CheckNonNullArgument(S, Arg, CallSiteLoc); 5110 return; 5111 } 5112 5113 for (const ParamIdx &Idx : NonNull->args()) { 5114 unsigned IdxAST = Idx.getASTIndex(); 5115 if (IdxAST >= Args.size()) 5116 continue; 5117 if (NonNullArgs.empty()) 5118 NonNullArgs.resize(Args.size()); 5119 NonNullArgs.set(IdxAST); 5120 } 5121 } 5122 } 5123 5124 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 5125 // Handle the nonnull attribute on the parameters of the 5126 // function/method. 5127 ArrayRef<ParmVarDecl*> parms; 5128 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 5129 parms = FD->parameters(); 5130 else 5131 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 5132 5133 unsigned ParamIndex = 0; 5134 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 5135 I != E; ++I, ++ParamIndex) { 5136 const ParmVarDecl *PVD = *I; 5137 if (PVD->hasAttr<NonNullAttr>() || 5138 isNonNullType(S.Context, PVD->getType())) { 5139 if (NonNullArgs.empty()) 5140 NonNullArgs.resize(Args.size()); 5141 5142 NonNullArgs.set(ParamIndex); 5143 } 5144 } 5145 } else { 5146 // If we have a non-function, non-method declaration but no 5147 // function prototype, try to dig out the function prototype. 5148 if (!Proto) { 5149 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 5150 QualType type = VD->getType().getNonReferenceType(); 5151 if (auto pointerType = type->getAs<PointerType>()) 5152 type = pointerType->getPointeeType(); 5153 else if (auto blockType = type->getAs<BlockPointerType>()) 5154 type = blockType->getPointeeType(); 5155 // FIXME: data member pointers? 5156 5157 // Dig out the function prototype, if there is one. 5158 Proto = type->getAs<FunctionProtoType>(); 5159 } 5160 } 5161 5162 // Fill in non-null argument information from the nullability 5163 // information on the parameter types (if we have them). 5164 if (Proto) { 5165 unsigned Index = 0; 5166 for (auto paramType : Proto->getParamTypes()) { 5167 if (isNonNullType(S.Context, paramType)) { 5168 if (NonNullArgs.empty()) 5169 NonNullArgs.resize(Args.size()); 5170 5171 NonNullArgs.set(Index); 5172 } 5173 5174 ++Index; 5175 } 5176 } 5177 } 5178 5179 // Check for non-null arguments. 5180 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 5181 ArgIndex != ArgIndexEnd; ++ArgIndex) { 5182 if (NonNullArgs[ArgIndex]) 5183 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc); 5184 } 5185 } 5186 5187 /// Warn if a pointer or reference argument passed to a function points to an 5188 /// object that is less aligned than the parameter. This can happen when 5189 /// creating a typedef with a lower alignment than the original type and then 5190 /// calling functions defined in terms of the original type. 5191 void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, 5192 StringRef ParamName, QualType ArgTy, 5193 QualType ParamTy) { 5194 5195 // If a function accepts a pointer or reference type 5196 if (!ParamTy->isPointerType() && !ParamTy->isReferenceType()) 5197 return; 5198 5199 // If the parameter is a pointer type, get the pointee type for the 5200 // argument too. If the parameter is a reference type, don't try to get 5201 // the pointee type for the argument. 5202 if (ParamTy->isPointerType()) 5203 ArgTy = ArgTy->getPointeeType(); 5204 5205 // Remove reference or pointer 5206 ParamTy = ParamTy->getPointeeType(); 5207 5208 // Find expected alignment, and the actual alignment of the passed object. 5209 // getTypeAlignInChars requires complete types 5210 if (ArgTy.isNull() || ParamTy->isIncompleteType() || 5211 ArgTy->isIncompleteType() || ParamTy->isUndeducedType() || 5212 ArgTy->isUndeducedType()) 5213 return; 5214 5215 CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy); 5216 CharUnits ArgAlign = Context.getTypeAlignInChars(ArgTy); 5217 5218 // If the argument is less aligned than the parameter, there is a 5219 // potential alignment issue. 5220 if (ArgAlign < ParamAlign) 5221 Diag(Loc, diag::warn_param_mismatched_alignment) 5222 << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity() 5223 << ParamName << (FDecl != nullptr) << FDecl; 5224 } 5225 5226 /// Handles the checks for format strings, non-POD arguments to vararg 5227 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if 5228 /// attributes. 5229 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 5230 const Expr *ThisArg, ArrayRef<const Expr *> Args, 5231 bool IsMemberFunction, SourceLocation Loc, 5232 SourceRange Range, VariadicCallType CallType) { 5233 // FIXME: We should check as much as we can in the template definition. 5234 if (CurContext->isDependentContext()) 5235 return; 5236 5237 // Printf and scanf checking. 5238 llvm::SmallBitVector CheckedVarArgs; 5239 if (FDecl) { 5240 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 5241 // Only create vector if there are format attributes. 5242 CheckedVarArgs.resize(Args.size()); 5243 5244 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 5245 CheckedVarArgs); 5246 } 5247 } 5248 5249 // Refuse POD arguments that weren't caught by the format string 5250 // checks above. 5251 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 5252 if (CallType != VariadicDoesNotApply && 5253 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 5254 unsigned NumParams = Proto ? Proto->getNumParams() 5255 : FDecl && isa<FunctionDecl>(FDecl) 5256 ? cast<FunctionDecl>(FDecl)->getNumParams() 5257 : FDecl && isa<ObjCMethodDecl>(FDecl) 5258 ? cast<ObjCMethodDecl>(FDecl)->param_size() 5259 : 0; 5260 5261 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 5262 // Args[ArgIdx] can be null in malformed code. 5263 if (const Expr *Arg = Args[ArgIdx]) { 5264 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 5265 checkVariadicArgument(Arg, CallType); 5266 } 5267 } 5268 } 5269 5270 if (FDecl || Proto) { 5271 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 5272 5273 // Type safety checking. 5274 if (FDecl) { 5275 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 5276 CheckArgumentWithTypeTag(I, Args, Loc); 5277 } 5278 } 5279 5280 // Check that passed arguments match the alignment of original arguments. 5281 // Try to get the missing prototype from the declaration. 5282 if (!Proto && FDecl) { 5283 const auto *FT = FDecl->getFunctionType(); 5284 if (isa_and_nonnull<FunctionProtoType>(FT)) 5285 Proto = cast<FunctionProtoType>(FDecl->getFunctionType()); 5286 } 5287 if (Proto) { 5288 // For variadic functions, we may have more args than parameters. 5289 // For some K&R functions, we may have less args than parameters. 5290 const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size()); 5291 for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) { 5292 // Args[ArgIdx] can be null in malformed code. 5293 if (const Expr *Arg = Args[ArgIdx]) { 5294 if (Arg->containsErrors()) 5295 continue; 5296 5297 QualType ParamTy = Proto->getParamType(ArgIdx); 5298 QualType ArgTy = Arg->getType(); 5299 CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1), 5300 ArgTy, ParamTy); 5301 } 5302 } 5303 } 5304 5305 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) { 5306 auto *AA = FDecl->getAttr<AllocAlignAttr>(); 5307 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()]; 5308 if (!Arg->isValueDependent()) { 5309 Expr::EvalResult Align; 5310 if (Arg->EvaluateAsInt(Align, Context)) { 5311 const llvm::APSInt &I = Align.Val.getInt(); 5312 if (!I.isPowerOf2()) 5313 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two) 5314 << Arg->getSourceRange(); 5315 5316 if (I > Sema::MaximumAlignment) 5317 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great) 5318 << Arg->getSourceRange() << Sema::MaximumAlignment; 5319 } 5320 } 5321 } 5322 5323 if (FD) 5324 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 5325 } 5326 5327 /// CheckConstructorCall - Check a constructor call for correctness and safety 5328 /// properties not enforced by the C type system. 5329 void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, 5330 ArrayRef<const Expr *> Args, 5331 const FunctionProtoType *Proto, 5332 SourceLocation Loc) { 5333 VariadicCallType CallType = 5334 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 5335 5336 auto *Ctor = cast<CXXConstructorDecl>(FDecl); 5337 CheckArgAlignment(Loc, FDecl, "'this'", Context.getPointerType(ThisType), 5338 Context.getPointerType(Ctor->getThisObjectType())); 5339 5340 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 5341 Loc, SourceRange(), CallType); 5342 } 5343 5344 /// CheckFunctionCall - Check a direct function call for various correctness 5345 /// and safety properties not strictly enforced by the C type system. 5346 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 5347 const FunctionProtoType *Proto) { 5348 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 5349 isa<CXXMethodDecl>(FDecl); 5350 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 5351 IsMemberOperatorCall; 5352 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 5353 TheCall->getCallee()); 5354 Expr** Args = TheCall->getArgs(); 5355 unsigned NumArgs = TheCall->getNumArgs(); 5356 5357 Expr *ImplicitThis = nullptr; 5358 if (IsMemberOperatorCall) { 5359 // If this is a call to a member operator, hide the first argument 5360 // from checkCall. 5361 // FIXME: Our choice of AST representation here is less than ideal. 5362 ImplicitThis = Args[0]; 5363 ++Args; 5364 --NumArgs; 5365 } else if (IsMemberFunction) 5366 ImplicitThis = 5367 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 5368 5369 if (ImplicitThis) { 5370 // ImplicitThis may or may not be a pointer, depending on whether . or -> is 5371 // used. 5372 QualType ThisType = ImplicitThis->getType(); 5373 if (!ThisType->isPointerType()) { 5374 assert(!ThisType->isReferenceType()); 5375 ThisType = Context.getPointerType(ThisType); 5376 } 5377 5378 QualType ThisTypeFromDecl = 5379 Context.getPointerType(cast<CXXMethodDecl>(FDecl)->getThisObjectType()); 5380 5381 CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType, 5382 ThisTypeFromDecl); 5383 } 5384 5385 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs), 5386 IsMemberFunction, TheCall->getRParenLoc(), 5387 TheCall->getCallee()->getSourceRange(), CallType); 5388 5389 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 5390 // None of the checks below are needed for functions that don't have 5391 // simple names (e.g., C++ conversion functions). 5392 if (!FnInfo) 5393 return false; 5394 5395 CheckTCBEnforcement(TheCall, FDecl); 5396 5397 CheckAbsoluteValueFunction(TheCall, FDecl); 5398 CheckMaxUnsignedZero(TheCall, FDecl); 5399 5400 if (getLangOpts().ObjC) 5401 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 5402 5403 unsigned CMId = FDecl->getMemoryFunctionKind(); 5404 5405 // Handle memory setting and copying functions. 5406 switch (CMId) { 5407 case 0: 5408 return false; 5409 case Builtin::BIstrlcpy: // fallthrough 5410 case Builtin::BIstrlcat: 5411 CheckStrlcpycatArguments(TheCall, FnInfo); 5412 break; 5413 case Builtin::BIstrncat: 5414 CheckStrncatArguments(TheCall, FnInfo); 5415 break; 5416 case Builtin::BIfree: 5417 CheckFreeArguments(TheCall); 5418 break; 5419 default: 5420 CheckMemaccessArguments(TheCall, CMId, FnInfo); 5421 } 5422 5423 return false; 5424 } 5425 5426 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 5427 ArrayRef<const Expr *> Args) { 5428 VariadicCallType CallType = 5429 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 5430 5431 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 5432 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 5433 CallType); 5434 5435 return false; 5436 } 5437 5438 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 5439 const FunctionProtoType *Proto) { 5440 QualType Ty; 5441 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 5442 Ty = V->getType().getNonReferenceType(); 5443 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 5444 Ty = F->getType().getNonReferenceType(); 5445 else 5446 return false; 5447 5448 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 5449 !Ty->isFunctionProtoType()) 5450 return false; 5451 5452 VariadicCallType CallType; 5453 if (!Proto || !Proto->isVariadic()) { 5454 CallType = VariadicDoesNotApply; 5455 } else if (Ty->isBlockPointerType()) { 5456 CallType = VariadicBlock; 5457 } else { // Ty->isFunctionPointerType() 5458 CallType = VariadicFunction; 5459 } 5460 5461 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 5462 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 5463 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 5464 TheCall->getCallee()->getSourceRange(), CallType); 5465 5466 return false; 5467 } 5468 5469 /// Checks function calls when a FunctionDecl or a NamedDecl is not available, 5470 /// such as function pointers returned from functions. 5471 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 5472 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 5473 TheCall->getCallee()); 5474 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 5475 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 5476 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 5477 TheCall->getCallee()->getSourceRange(), CallType); 5478 5479 return false; 5480 } 5481 5482 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 5483 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 5484 return false; 5485 5486 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 5487 switch (Op) { 5488 case AtomicExpr::AO__c11_atomic_init: 5489 case AtomicExpr::AO__opencl_atomic_init: 5490 llvm_unreachable("There is no ordering argument for an init"); 5491 5492 case AtomicExpr::AO__c11_atomic_load: 5493 case AtomicExpr::AO__opencl_atomic_load: 5494 case AtomicExpr::AO__hip_atomic_load: 5495 case AtomicExpr::AO__atomic_load_n: 5496 case AtomicExpr::AO__atomic_load: 5497 return OrderingCABI != llvm::AtomicOrderingCABI::release && 5498 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 5499 5500 case AtomicExpr::AO__c11_atomic_store: 5501 case AtomicExpr::AO__opencl_atomic_store: 5502 case AtomicExpr::AO__hip_atomic_store: 5503 case AtomicExpr::AO__atomic_store: 5504 case AtomicExpr::AO__atomic_store_n: 5505 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 5506 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 5507 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 5508 5509 default: 5510 return true; 5511 } 5512 } 5513 5514 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 5515 AtomicExpr::AtomicOp Op) { 5516 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 5517 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 5518 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()}; 5519 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()}, 5520 DRE->getSourceRange(), TheCall->getRParenLoc(), Args, 5521 Op); 5522 } 5523 5524 ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, 5525 SourceLocation RParenLoc, MultiExprArg Args, 5526 AtomicExpr::AtomicOp Op, 5527 AtomicArgumentOrder ArgOrder) { 5528 // All the non-OpenCL operations take one of the following forms. 5529 // The OpenCL operations take the __c11 forms with one extra argument for 5530 // synchronization scope. 5531 enum { 5532 // C __c11_atomic_init(A *, C) 5533 Init, 5534 5535 // C __c11_atomic_load(A *, int) 5536 Load, 5537 5538 // void __atomic_load(A *, CP, int) 5539 LoadCopy, 5540 5541 // void __atomic_store(A *, CP, int) 5542 Copy, 5543 5544 // C __c11_atomic_add(A *, M, int) 5545 Arithmetic, 5546 5547 // C __atomic_exchange_n(A *, CP, int) 5548 Xchg, 5549 5550 // void __atomic_exchange(A *, C *, CP, int) 5551 GNUXchg, 5552 5553 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 5554 C11CmpXchg, 5555 5556 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 5557 GNUCmpXchg 5558 } Form = Init; 5559 5560 const unsigned NumForm = GNUCmpXchg + 1; 5561 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 5562 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 5563 // where: 5564 // C is an appropriate type, 5565 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 5566 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 5567 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 5568 // the int parameters are for orderings. 5569 5570 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 5571 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 5572 "need to update code for modified forms"); 5573 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 5574 AtomicExpr::AO__c11_atomic_fetch_min + 1 == 5575 AtomicExpr::AO__atomic_load, 5576 "need to update code for modified C11 atomics"); 5577 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 5578 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 5579 bool IsHIP = Op >= AtomicExpr::AO__hip_atomic_load && 5580 Op <= AtomicExpr::AO__hip_atomic_fetch_max; 5581 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 5582 Op <= AtomicExpr::AO__c11_atomic_fetch_min) || 5583 IsOpenCL; 5584 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 5585 Op == AtomicExpr::AO__atomic_store_n || 5586 Op == AtomicExpr::AO__atomic_exchange_n || 5587 Op == AtomicExpr::AO__atomic_compare_exchange_n; 5588 bool IsAddSub = false; 5589 5590 switch (Op) { 5591 case AtomicExpr::AO__c11_atomic_init: 5592 case AtomicExpr::AO__opencl_atomic_init: 5593 Form = Init; 5594 break; 5595 5596 case AtomicExpr::AO__c11_atomic_load: 5597 case AtomicExpr::AO__opencl_atomic_load: 5598 case AtomicExpr::AO__hip_atomic_load: 5599 case AtomicExpr::AO__atomic_load_n: 5600 Form = Load; 5601 break; 5602 5603 case AtomicExpr::AO__atomic_load: 5604 Form = LoadCopy; 5605 break; 5606 5607 case AtomicExpr::AO__c11_atomic_store: 5608 case AtomicExpr::AO__opencl_atomic_store: 5609 case AtomicExpr::AO__hip_atomic_store: 5610 case AtomicExpr::AO__atomic_store: 5611 case AtomicExpr::AO__atomic_store_n: 5612 Form = Copy; 5613 break; 5614 case AtomicExpr::AO__hip_atomic_fetch_add: 5615 case AtomicExpr::AO__hip_atomic_fetch_min: 5616 case AtomicExpr::AO__hip_atomic_fetch_max: 5617 case AtomicExpr::AO__c11_atomic_fetch_add: 5618 case AtomicExpr::AO__c11_atomic_fetch_sub: 5619 case AtomicExpr::AO__opencl_atomic_fetch_add: 5620 case AtomicExpr::AO__opencl_atomic_fetch_sub: 5621 case AtomicExpr::AO__atomic_fetch_add: 5622 case AtomicExpr::AO__atomic_fetch_sub: 5623 case AtomicExpr::AO__atomic_add_fetch: 5624 case AtomicExpr::AO__atomic_sub_fetch: 5625 IsAddSub = true; 5626 Form = Arithmetic; 5627 break; 5628 case AtomicExpr::AO__c11_atomic_fetch_and: 5629 case AtomicExpr::AO__c11_atomic_fetch_or: 5630 case AtomicExpr::AO__c11_atomic_fetch_xor: 5631 case AtomicExpr::AO__hip_atomic_fetch_and: 5632 case AtomicExpr::AO__hip_atomic_fetch_or: 5633 case AtomicExpr::AO__hip_atomic_fetch_xor: 5634 case AtomicExpr::AO__c11_atomic_fetch_nand: 5635 case AtomicExpr::AO__opencl_atomic_fetch_and: 5636 case AtomicExpr::AO__opencl_atomic_fetch_or: 5637 case AtomicExpr::AO__opencl_atomic_fetch_xor: 5638 case AtomicExpr::AO__atomic_fetch_and: 5639 case AtomicExpr::AO__atomic_fetch_or: 5640 case AtomicExpr::AO__atomic_fetch_xor: 5641 case AtomicExpr::AO__atomic_fetch_nand: 5642 case AtomicExpr::AO__atomic_and_fetch: 5643 case AtomicExpr::AO__atomic_or_fetch: 5644 case AtomicExpr::AO__atomic_xor_fetch: 5645 case AtomicExpr::AO__atomic_nand_fetch: 5646 Form = Arithmetic; 5647 break; 5648 case AtomicExpr::AO__c11_atomic_fetch_min: 5649 case AtomicExpr::AO__c11_atomic_fetch_max: 5650 case AtomicExpr::AO__opencl_atomic_fetch_min: 5651 case AtomicExpr::AO__opencl_atomic_fetch_max: 5652 case AtomicExpr::AO__atomic_min_fetch: 5653 case AtomicExpr::AO__atomic_max_fetch: 5654 case AtomicExpr::AO__atomic_fetch_min: 5655 case AtomicExpr::AO__atomic_fetch_max: 5656 Form = Arithmetic; 5657 break; 5658 5659 case AtomicExpr::AO__c11_atomic_exchange: 5660 case AtomicExpr::AO__hip_atomic_exchange: 5661 case AtomicExpr::AO__opencl_atomic_exchange: 5662 case AtomicExpr::AO__atomic_exchange_n: 5663 Form = Xchg; 5664 break; 5665 5666 case AtomicExpr::AO__atomic_exchange: 5667 Form = GNUXchg; 5668 break; 5669 5670 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 5671 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 5672 case AtomicExpr::AO__hip_atomic_compare_exchange_strong: 5673 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 5674 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 5675 case AtomicExpr::AO__hip_atomic_compare_exchange_weak: 5676 Form = C11CmpXchg; 5677 break; 5678 5679 case AtomicExpr::AO__atomic_compare_exchange: 5680 case AtomicExpr::AO__atomic_compare_exchange_n: 5681 Form = GNUCmpXchg; 5682 break; 5683 } 5684 5685 unsigned AdjustedNumArgs = NumArgs[Form]; 5686 if ((IsOpenCL || IsHIP) && Op != AtomicExpr::AO__opencl_atomic_init) 5687 ++AdjustedNumArgs; 5688 // Check we have the right number of arguments. 5689 if (Args.size() < AdjustedNumArgs) { 5690 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args) 5691 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 5692 << ExprRange; 5693 return ExprError(); 5694 } else if (Args.size() > AdjustedNumArgs) { 5695 Diag(Args[AdjustedNumArgs]->getBeginLoc(), 5696 diag::err_typecheck_call_too_many_args) 5697 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 5698 << ExprRange; 5699 return ExprError(); 5700 } 5701 5702 // Inspect the first argument of the atomic operation. 5703 Expr *Ptr = Args[0]; 5704 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 5705 if (ConvertedPtr.isInvalid()) 5706 return ExprError(); 5707 5708 Ptr = ConvertedPtr.get(); 5709 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 5710 if (!pointerType) { 5711 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer) 5712 << Ptr->getType() << Ptr->getSourceRange(); 5713 return ExprError(); 5714 } 5715 5716 // For a __c11 builtin, this should be a pointer to an _Atomic type. 5717 QualType AtomTy = pointerType->getPointeeType(); // 'A' 5718 QualType ValType = AtomTy; // 'C' 5719 if (IsC11) { 5720 if (!AtomTy->isAtomicType()) { 5721 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic) 5722 << Ptr->getType() << Ptr->getSourceRange(); 5723 return ExprError(); 5724 } 5725 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 5726 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 5727 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic) 5728 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 5729 << Ptr->getSourceRange(); 5730 return ExprError(); 5731 } 5732 ValType = AtomTy->castAs<AtomicType>()->getValueType(); 5733 } else if (Form != Load && Form != LoadCopy) { 5734 if (ValType.isConstQualified()) { 5735 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer) 5736 << Ptr->getType() << Ptr->getSourceRange(); 5737 return ExprError(); 5738 } 5739 } 5740 5741 // For an arithmetic operation, the implied arithmetic must be well-formed. 5742 if (Form == Arithmetic) { 5743 // GCC does not enforce these rules for GNU atomics, but we do to help catch 5744 // trivial type errors. 5745 auto IsAllowedValueType = [&](QualType ValType) { 5746 if (ValType->isIntegerType()) 5747 return true; 5748 if (ValType->isPointerType()) 5749 return true; 5750 if (!ValType->isFloatingType()) 5751 return false; 5752 // LLVM Parser does not allow atomicrmw with x86_fp80 type. 5753 if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) && 5754 &Context.getTargetInfo().getLongDoubleFormat() == 5755 &llvm::APFloat::x87DoubleExtended()) 5756 return false; 5757 return true; 5758 }; 5759 if (IsAddSub && !IsAllowedValueType(ValType)) { 5760 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_ptr_or_fp) 5761 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 5762 return ExprError(); 5763 } 5764 if (!IsAddSub && !ValType->isIntegerType()) { 5765 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int) 5766 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 5767 return ExprError(); 5768 } 5769 if (IsC11 && ValType->isPointerType() && 5770 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 5771 diag::err_incomplete_type)) { 5772 return ExprError(); 5773 } 5774 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 5775 // For __atomic_*_n operations, the value type must be a scalar integral or 5776 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 5777 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 5778 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 5779 return ExprError(); 5780 } 5781 5782 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 5783 !AtomTy->isScalarType()) { 5784 // For GNU atomics, require a trivially-copyable type. This is not part of 5785 // the GNU atomics specification but we enforce it for consistency with 5786 // other atomics which generally all require a trivially-copyable type. This 5787 // is because atomics just copy bits. 5788 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy) 5789 << Ptr->getType() << Ptr->getSourceRange(); 5790 return ExprError(); 5791 } 5792 5793 switch (ValType.getObjCLifetime()) { 5794 case Qualifiers::OCL_None: 5795 case Qualifiers::OCL_ExplicitNone: 5796 // okay 5797 break; 5798 5799 case Qualifiers::OCL_Weak: 5800 case Qualifiers::OCL_Strong: 5801 case Qualifiers::OCL_Autoreleasing: 5802 // FIXME: Can this happen? By this point, ValType should be known 5803 // to be trivially copyable. 5804 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership) 5805 << ValType << Ptr->getSourceRange(); 5806 return ExprError(); 5807 } 5808 5809 // All atomic operations have an overload which takes a pointer to a volatile 5810 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 5811 // into the result or the other operands. Similarly atomic_load takes a 5812 // pointer to a const 'A'. 5813 ValType.removeLocalVolatile(); 5814 ValType.removeLocalConst(); 5815 QualType ResultType = ValType; 5816 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 5817 Form == Init) 5818 ResultType = Context.VoidTy; 5819 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 5820 ResultType = Context.BoolTy; 5821 5822 // The type of a parameter passed 'by value'. In the GNU atomics, such 5823 // arguments are actually passed as pointers. 5824 QualType ByValType = ValType; // 'CP' 5825 bool IsPassedByAddress = false; 5826 if (!IsC11 && !IsHIP && !IsN) { 5827 ByValType = Ptr->getType(); 5828 IsPassedByAddress = true; 5829 } 5830 5831 SmallVector<Expr *, 5> APIOrderedArgs; 5832 if (ArgOrder == Sema::AtomicArgumentOrder::AST) { 5833 APIOrderedArgs.push_back(Args[0]); 5834 switch (Form) { 5835 case Init: 5836 case Load: 5837 APIOrderedArgs.push_back(Args[1]); // Val1/Order 5838 break; 5839 case LoadCopy: 5840 case Copy: 5841 case Arithmetic: 5842 case Xchg: 5843 APIOrderedArgs.push_back(Args[2]); // Val1 5844 APIOrderedArgs.push_back(Args[1]); // Order 5845 break; 5846 case GNUXchg: 5847 APIOrderedArgs.push_back(Args[2]); // Val1 5848 APIOrderedArgs.push_back(Args[3]); // Val2 5849 APIOrderedArgs.push_back(Args[1]); // Order 5850 break; 5851 case C11CmpXchg: 5852 APIOrderedArgs.push_back(Args[2]); // Val1 5853 APIOrderedArgs.push_back(Args[4]); // Val2 5854 APIOrderedArgs.push_back(Args[1]); // Order 5855 APIOrderedArgs.push_back(Args[3]); // OrderFail 5856 break; 5857 case GNUCmpXchg: 5858 APIOrderedArgs.push_back(Args[2]); // Val1 5859 APIOrderedArgs.push_back(Args[4]); // Val2 5860 APIOrderedArgs.push_back(Args[5]); // Weak 5861 APIOrderedArgs.push_back(Args[1]); // Order 5862 APIOrderedArgs.push_back(Args[3]); // OrderFail 5863 break; 5864 } 5865 } else 5866 APIOrderedArgs.append(Args.begin(), Args.end()); 5867 5868 // The first argument's non-CV pointer type is used to deduce the type of 5869 // subsequent arguments, except for: 5870 // - weak flag (always converted to bool) 5871 // - memory order (always converted to int) 5872 // - scope (always converted to int) 5873 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) { 5874 QualType Ty; 5875 if (i < NumVals[Form] + 1) { 5876 switch (i) { 5877 case 0: 5878 // The first argument is always a pointer. It has a fixed type. 5879 // It is always dereferenced, a nullptr is undefined. 5880 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 5881 // Nothing else to do: we already know all we want about this pointer. 5882 continue; 5883 case 1: 5884 // The second argument is the non-atomic operand. For arithmetic, this 5885 // is always passed by value, and for a compare_exchange it is always 5886 // passed by address. For the rest, GNU uses by-address and C11 uses 5887 // by-value. 5888 assert(Form != Load); 5889 if (Form == Arithmetic && ValType->isPointerType()) 5890 Ty = Context.getPointerDiffType(); 5891 else if (Form == Init || Form == Arithmetic) 5892 Ty = ValType; 5893 else if (Form == Copy || Form == Xchg) { 5894 if (IsPassedByAddress) { 5895 // The value pointer is always dereferenced, a nullptr is undefined. 5896 CheckNonNullArgument(*this, APIOrderedArgs[i], 5897 ExprRange.getBegin()); 5898 } 5899 Ty = ByValType; 5900 } else { 5901 Expr *ValArg = APIOrderedArgs[i]; 5902 // The value pointer is always dereferenced, a nullptr is undefined. 5903 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin()); 5904 LangAS AS = LangAS::Default; 5905 // Keep address space of non-atomic pointer type. 5906 if (const PointerType *PtrTy = 5907 ValArg->getType()->getAs<PointerType>()) { 5908 AS = PtrTy->getPointeeType().getAddressSpace(); 5909 } 5910 Ty = Context.getPointerType( 5911 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 5912 } 5913 break; 5914 case 2: 5915 // The third argument to compare_exchange / GNU exchange is the desired 5916 // value, either by-value (for the C11 and *_n variant) or as a pointer. 5917 if (IsPassedByAddress) 5918 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 5919 Ty = ByValType; 5920 break; 5921 case 3: 5922 // The fourth argument to GNU compare_exchange is a 'weak' flag. 5923 Ty = Context.BoolTy; 5924 break; 5925 } 5926 } else { 5927 // The order(s) and scope are always converted to int. 5928 Ty = Context.IntTy; 5929 } 5930 5931 InitializedEntity Entity = 5932 InitializedEntity::InitializeParameter(Context, Ty, false); 5933 ExprResult Arg = APIOrderedArgs[i]; 5934 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5935 if (Arg.isInvalid()) 5936 return true; 5937 APIOrderedArgs[i] = Arg.get(); 5938 } 5939 5940 // Permute the arguments into a 'consistent' order. 5941 SmallVector<Expr*, 5> SubExprs; 5942 SubExprs.push_back(Ptr); 5943 switch (Form) { 5944 case Init: 5945 // Note, AtomicExpr::getVal1() has a special case for this atomic. 5946 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5947 break; 5948 case Load: 5949 SubExprs.push_back(APIOrderedArgs[1]); // Order 5950 break; 5951 case LoadCopy: 5952 case Copy: 5953 case Arithmetic: 5954 case Xchg: 5955 SubExprs.push_back(APIOrderedArgs[2]); // Order 5956 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5957 break; 5958 case GNUXchg: 5959 // Note, AtomicExpr::getVal2() has a special case for this atomic. 5960 SubExprs.push_back(APIOrderedArgs[3]); // Order 5961 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5962 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5963 break; 5964 case C11CmpXchg: 5965 SubExprs.push_back(APIOrderedArgs[3]); // Order 5966 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5967 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail 5968 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5969 break; 5970 case GNUCmpXchg: 5971 SubExprs.push_back(APIOrderedArgs[4]); // Order 5972 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5973 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail 5974 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5975 SubExprs.push_back(APIOrderedArgs[3]); // Weak 5976 break; 5977 } 5978 5979 if (SubExprs.size() >= 2 && Form != Init) { 5980 if (Optional<llvm::APSInt> Result = 5981 SubExprs[1]->getIntegerConstantExpr(Context)) 5982 if (!isValidOrderingForOp(Result->getSExtValue(), Op)) 5983 Diag(SubExprs[1]->getBeginLoc(), 5984 diag::warn_atomic_op_has_invalid_memory_order) 5985 << SubExprs[1]->getSourceRange(); 5986 } 5987 5988 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 5989 auto *Scope = Args[Args.size() - 1]; 5990 if (Optional<llvm::APSInt> Result = 5991 Scope->getIntegerConstantExpr(Context)) { 5992 if (!ScopeModel->isValid(Result->getZExtValue())) 5993 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 5994 << Scope->getSourceRange(); 5995 } 5996 SubExprs.push_back(Scope); 5997 } 5998 5999 AtomicExpr *AE = new (Context) 6000 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc); 6001 6002 if ((Op == AtomicExpr::AO__c11_atomic_load || 6003 Op == AtomicExpr::AO__c11_atomic_store || 6004 Op == AtomicExpr::AO__opencl_atomic_load || 6005 Op == AtomicExpr::AO__hip_atomic_load || 6006 Op == AtomicExpr::AO__opencl_atomic_store || 6007 Op == AtomicExpr::AO__hip_atomic_store) && 6008 Context.AtomicUsesUnsupportedLibcall(AE)) 6009 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 6010 << ((Op == AtomicExpr::AO__c11_atomic_load || 6011 Op == AtomicExpr::AO__opencl_atomic_load || 6012 Op == AtomicExpr::AO__hip_atomic_load) 6013 ? 0 6014 : 1); 6015 6016 if (ValType->isBitIntType()) { 6017 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_bit_int_prohibit); 6018 return ExprError(); 6019 } 6020 6021 return AE; 6022 } 6023 6024 /// checkBuiltinArgument - Given a call to a builtin function, perform 6025 /// normal type-checking on the given argument, updating the call in 6026 /// place. This is useful when a builtin function requires custom 6027 /// type-checking for some of its arguments but not necessarily all of 6028 /// them. 6029 /// 6030 /// Returns true on error. 6031 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 6032 FunctionDecl *Fn = E->getDirectCallee(); 6033 assert(Fn && "builtin call without direct callee!"); 6034 6035 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 6036 InitializedEntity Entity = 6037 InitializedEntity::InitializeParameter(S.Context, Param); 6038 6039 ExprResult Arg = E->getArg(0); 6040 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 6041 if (Arg.isInvalid()) 6042 return true; 6043 6044 E->setArg(ArgIndex, Arg.get()); 6045 return false; 6046 } 6047 6048 /// We have a call to a function like __sync_fetch_and_add, which is an 6049 /// overloaded function based on the pointer type of its first argument. 6050 /// The main BuildCallExpr routines have already promoted the types of 6051 /// arguments because all of these calls are prototyped as void(...). 6052 /// 6053 /// This function goes through and does final semantic checking for these 6054 /// builtins, as well as generating any warnings. 6055 ExprResult 6056 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 6057 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 6058 Expr *Callee = TheCall->getCallee(); 6059 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 6060 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6061 6062 // Ensure that we have at least one argument to do type inference from. 6063 if (TheCall->getNumArgs() < 1) { 6064 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 6065 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange(); 6066 return ExprError(); 6067 } 6068 6069 // Inspect the first argument of the atomic builtin. This should always be 6070 // a pointer type, whose element is an integral scalar or pointer type. 6071 // Because it is a pointer type, we don't have to worry about any implicit 6072 // casts here. 6073 // FIXME: We don't allow floating point scalars as input. 6074 Expr *FirstArg = TheCall->getArg(0); 6075 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 6076 if (FirstArgResult.isInvalid()) 6077 return ExprError(); 6078 FirstArg = FirstArgResult.get(); 6079 TheCall->setArg(0, FirstArg); 6080 6081 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 6082 if (!pointerType) { 6083 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 6084 << FirstArg->getType() << FirstArg->getSourceRange(); 6085 return ExprError(); 6086 } 6087 6088 QualType ValType = pointerType->getPointeeType(); 6089 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 6090 !ValType->isBlockPointerType()) { 6091 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 6092 << FirstArg->getType() << FirstArg->getSourceRange(); 6093 return ExprError(); 6094 } 6095 6096 if (ValType.isConstQualified()) { 6097 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 6098 << FirstArg->getType() << FirstArg->getSourceRange(); 6099 return ExprError(); 6100 } 6101 6102 switch (ValType.getObjCLifetime()) { 6103 case Qualifiers::OCL_None: 6104 case Qualifiers::OCL_ExplicitNone: 6105 // okay 6106 break; 6107 6108 case Qualifiers::OCL_Weak: 6109 case Qualifiers::OCL_Strong: 6110 case Qualifiers::OCL_Autoreleasing: 6111 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 6112 << ValType << FirstArg->getSourceRange(); 6113 return ExprError(); 6114 } 6115 6116 // Strip any qualifiers off ValType. 6117 ValType = ValType.getUnqualifiedType(); 6118 6119 // The majority of builtins return a value, but a few have special return 6120 // types, so allow them to override appropriately below. 6121 QualType ResultType = ValType; 6122 6123 // We need to figure out which concrete builtin this maps onto. For example, 6124 // __sync_fetch_and_add with a 2 byte object turns into 6125 // __sync_fetch_and_add_2. 6126 #define BUILTIN_ROW(x) \ 6127 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 6128 Builtin::BI##x##_8, Builtin::BI##x##_16 } 6129 6130 static const unsigned BuiltinIndices[][5] = { 6131 BUILTIN_ROW(__sync_fetch_and_add), 6132 BUILTIN_ROW(__sync_fetch_and_sub), 6133 BUILTIN_ROW(__sync_fetch_and_or), 6134 BUILTIN_ROW(__sync_fetch_and_and), 6135 BUILTIN_ROW(__sync_fetch_and_xor), 6136 BUILTIN_ROW(__sync_fetch_and_nand), 6137 6138 BUILTIN_ROW(__sync_add_and_fetch), 6139 BUILTIN_ROW(__sync_sub_and_fetch), 6140 BUILTIN_ROW(__sync_and_and_fetch), 6141 BUILTIN_ROW(__sync_or_and_fetch), 6142 BUILTIN_ROW(__sync_xor_and_fetch), 6143 BUILTIN_ROW(__sync_nand_and_fetch), 6144 6145 BUILTIN_ROW(__sync_val_compare_and_swap), 6146 BUILTIN_ROW(__sync_bool_compare_and_swap), 6147 BUILTIN_ROW(__sync_lock_test_and_set), 6148 BUILTIN_ROW(__sync_lock_release), 6149 BUILTIN_ROW(__sync_swap) 6150 }; 6151 #undef BUILTIN_ROW 6152 6153 // Determine the index of the size. 6154 unsigned SizeIndex; 6155 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 6156 case 1: SizeIndex = 0; break; 6157 case 2: SizeIndex = 1; break; 6158 case 4: SizeIndex = 2; break; 6159 case 8: SizeIndex = 3; break; 6160 case 16: SizeIndex = 4; break; 6161 default: 6162 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 6163 << FirstArg->getType() << FirstArg->getSourceRange(); 6164 return ExprError(); 6165 } 6166 6167 // Each of these builtins has one pointer argument, followed by some number of 6168 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 6169 // that we ignore. Find out which row of BuiltinIndices to read from as well 6170 // as the number of fixed args. 6171 unsigned BuiltinID = FDecl->getBuiltinID(); 6172 unsigned BuiltinIndex, NumFixed = 1; 6173 bool WarnAboutSemanticsChange = false; 6174 switch (BuiltinID) { 6175 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 6176 case Builtin::BI__sync_fetch_and_add: 6177 case Builtin::BI__sync_fetch_and_add_1: 6178 case Builtin::BI__sync_fetch_and_add_2: 6179 case Builtin::BI__sync_fetch_and_add_4: 6180 case Builtin::BI__sync_fetch_and_add_8: 6181 case Builtin::BI__sync_fetch_and_add_16: 6182 BuiltinIndex = 0; 6183 break; 6184 6185 case Builtin::BI__sync_fetch_and_sub: 6186 case Builtin::BI__sync_fetch_and_sub_1: 6187 case Builtin::BI__sync_fetch_and_sub_2: 6188 case Builtin::BI__sync_fetch_and_sub_4: 6189 case Builtin::BI__sync_fetch_and_sub_8: 6190 case Builtin::BI__sync_fetch_and_sub_16: 6191 BuiltinIndex = 1; 6192 break; 6193 6194 case Builtin::BI__sync_fetch_and_or: 6195 case Builtin::BI__sync_fetch_and_or_1: 6196 case Builtin::BI__sync_fetch_and_or_2: 6197 case Builtin::BI__sync_fetch_and_or_4: 6198 case Builtin::BI__sync_fetch_and_or_8: 6199 case Builtin::BI__sync_fetch_and_or_16: 6200 BuiltinIndex = 2; 6201 break; 6202 6203 case Builtin::BI__sync_fetch_and_and: 6204 case Builtin::BI__sync_fetch_and_and_1: 6205 case Builtin::BI__sync_fetch_and_and_2: 6206 case Builtin::BI__sync_fetch_and_and_4: 6207 case Builtin::BI__sync_fetch_and_and_8: 6208 case Builtin::BI__sync_fetch_and_and_16: 6209 BuiltinIndex = 3; 6210 break; 6211 6212 case Builtin::BI__sync_fetch_and_xor: 6213 case Builtin::BI__sync_fetch_and_xor_1: 6214 case Builtin::BI__sync_fetch_and_xor_2: 6215 case Builtin::BI__sync_fetch_and_xor_4: 6216 case Builtin::BI__sync_fetch_and_xor_8: 6217 case Builtin::BI__sync_fetch_and_xor_16: 6218 BuiltinIndex = 4; 6219 break; 6220 6221 case Builtin::BI__sync_fetch_and_nand: 6222 case Builtin::BI__sync_fetch_and_nand_1: 6223 case Builtin::BI__sync_fetch_and_nand_2: 6224 case Builtin::BI__sync_fetch_and_nand_4: 6225 case Builtin::BI__sync_fetch_and_nand_8: 6226 case Builtin::BI__sync_fetch_and_nand_16: 6227 BuiltinIndex = 5; 6228 WarnAboutSemanticsChange = true; 6229 break; 6230 6231 case Builtin::BI__sync_add_and_fetch: 6232 case Builtin::BI__sync_add_and_fetch_1: 6233 case Builtin::BI__sync_add_and_fetch_2: 6234 case Builtin::BI__sync_add_and_fetch_4: 6235 case Builtin::BI__sync_add_and_fetch_8: 6236 case Builtin::BI__sync_add_and_fetch_16: 6237 BuiltinIndex = 6; 6238 break; 6239 6240 case Builtin::BI__sync_sub_and_fetch: 6241 case Builtin::BI__sync_sub_and_fetch_1: 6242 case Builtin::BI__sync_sub_and_fetch_2: 6243 case Builtin::BI__sync_sub_and_fetch_4: 6244 case Builtin::BI__sync_sub_and_fetch_8: 6245 case Builtin::BI__sync_sub_and_fetch_16: 6246 BuiltinIndex = 7; 6247 break; 6248 6249 case Builtin::BI__sync_and_and_fetch: 6250 case Builtin::BI__sync_and_and_fetch_1: 6251 case Builtin::BI__sync_and_and_fetch_2: 6252 case Builtin::BI__sync_and_and_fetch_4: 6253 case Builtin::BI__sync_and_and_fetch_8: 6254 case Builtin::BI__sync_and_and_fetch_16: 6255 BuiltinIndex = 8; 6256 break; 6257 6258 case Builtin::BI__sync_or_and_fetch: 6259 case Builtin::BI__sync_or_and_fetch_1: 6260 case Builtin::BI__sync_or_and_fetch_2: 6261 case Builtin::BI__sync_or_and_fetch_4: 6262 case Builtin::BI__sync_or_and_fetch_8: 6263 case Builtin::BI__sync_or_and_fetch_16: 6264 BuiltinIndex = 9; 6265 break; 6266 6267 case Builtin::BI__sync_xor_and_fetch: 6268 case Builtin::BI__sync_xor_and_fetch_1: 6269 case Builtin::BI__sync_xor_and_fetch_2: 6270 case Builtin::BI__sync_xor_and_fetch_4: 6271 case Builtin::BI__sync_xor_and_fetch_8: 6272 case Builtin::BI__sync_xor_and_fetch_16: 6273 BuiltinIndex = 10; 6274 break; 6275 6276 case Builtin::BI__sync_nand_and_fetch: 6277 case Builtin::BI__sync_nand_and_fetch_1: 6278 case Builtin::BI__sync_nand_and_fetch_2: 6279 case Builtin::BI__sync_nand_and_fetch_4: 6280 case Builtin::BI__sync_nand_and_fetch_8: 6281 case Builtin::BI__sync_nand_and_fetch_16: 6282 BuiltinIndex = 11; 6283 WarnAboutSemanticsChange = true; 6284 break; 6285 6286 case Builtin::BI__sync_val_compare_and_swap: 6287 case Builtin::BI__sync_val_compare_and_swap_1: 6288 case Builtin::BI__sync_val_compare_and_swap_2: 6289 case Builtin::BI__sync_val_compare_and_swap_4: 6290 case Builtin::BI__sync_val_compare_and_swap_8: 6291 case Builtin::BI__sync_val_compare_and_swap_16: 6292 BuiltinIndex = 12; 6293 NumFixed = 2; 6294 break; 6295 6296 case Builtin::BI__sync_bool_compare_and_swap: 6297 case Builtin::BI__sync_bool_compare_and_swap_1: 6298 case Builtin::BI__sync_bool_compare_and_swap_2: 6299 case Builtin::BI__sync_bool_compare_and_swap_4: 6300 case Builtin::BI__sync_bool_compare_and_swap_8: 6301 case Builtin::BI__sync_bool_compare_and_swap_16: 6302 BuiltinIndex = 13; 6303 NumFixed = 2; 6304 ResultType = Context.BoolTy; 6305 break; 6306 6307 case Builtin::BI__sync_lock_test_and_set: 6308 case Builtin::BI__sync_lock_test_and_set_1: 6309 case Builtin::BI__sync_lock_test_and_set_2: 6310 case Builtin::BI__sync_lock_test_and_set_4: 6311 case Builtin::BI__sync_lock_test_and_set_8: 6312 case Builtin::BI__sync_lock_test_and_set_16: 6313 BuiltinIndex = 14; 6314 break; 6315 6316 case Builtin::BI__sync_lock_release: 6317 case Builtin::BI__sync_lock_release_1: 6318 case Builtin::BI__sync_lock_release_2: 6319 case Builtin::BI__sync_lock_release_4: 6320 case Builtin::BI__sync_lock_release_8: 6321 case Builtin::BI__sync_lock_release_16: 6322 BuiltinIndex = 15; 6323 NumFixed = 0; 6324 ResultType = Context.VoidTy; 6325 break; 6326 6327 case Builtin::BI__sync_swap: 6328 case Builtin::BI__sync_swap_1: 6329 case Builtin::BI__sync_swap_2: 6330 case Builtin::BI__sync_swap_4: 6331 case Builtin::BI__sync_swap_8: 6332 case Builtin::BI__sync_swap_16: 6333 BuiltinIndex = 16; 6334 break; 6335 } 6336 6337 // Now that we know how many fixed arguments we expect, first check that we 6338 // have at least that many. 6339 if (TheCall->getNumArgs() < 1+NumFixed) { 6340 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 6341 << 0 << 1 + NumFixed << TheCall->getNumArgs() 6342 << Callee->getSourceRange(); 6343 return ExprError(); 6344 } 6345 6346 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 6347 << Callee->getSourceRange(); 6348 6349 if (WarnAboutSemanticsChange) { 6350 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 6351 << Callee->getSourceRange(); 6352 } 6353 6354 // Get the decl for the concrete builtin from this, we can tell what the 6355 // concrete integer type we should convert to is. 6356 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 6357 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 6358 FunctionDecl *NewBuiltinDecl; 6359 if (NewBuiltinID == BuiltinID) 6360 NewBuiltinDecl = FDecl; 6361 else { 6362 // Perform builtin lookup to avoid redeclaring it. 6363 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 6364 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 6365 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 6366 assert(Res.getFoundDecl()); 6367 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 6368 if (!NewBuiltinDecl) 6369 return ExprError(); 6370 } 6371 6372 // The first argument --- the pointer --- has a fixed type; we 6373 // deduce the types of the rest of the arguments accordingly. Walk 6374 // the remaining arguments, converting them to the deduced value type. 6375 for (unsigned i = 0; i != NumFixed; ++i) { 6376 ExprResult Arg = TheCall->getArg(i+1); 6377 6378 // GCC does an implicit conversion to the pointer or integer ValType. This 6379 // can fail in some cases (1i -> int**), check for this error case now. 6380 // Initialize the argument. 6381 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 6382 ValType, /*consume*/ false); 6383 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6384 if (Arg.isInvalid()) 6385 return ExprError(); 6386 6387 // Okay, we have something that *can* be converted to the right type. Check 6388 // to see if there is a potentially weird extension going on here. This can 6389 // happen when you do an atomic operation on something like an char* and 6390 // pass in 42. The 42 gets converted to char. This is even more strange 6391 // for things like 45.123 -> char, etc. 6392 // FIXME: Do this check. 6393 TheCall->setArg(i+1, Arg.get()); 6394 } 6395 6396 // Create a new DeclRefExpr to refer to the new decl. 6397 DeclRefExpr *NewDRE = DeclRefExpr::Create( 6398 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, 6399 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, 6400 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse()); 6401 6402 // Set the callee in the CallExpr. 6403 // FIXME: This loses syntactic information. 6404 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 6405 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 6406 CK_BuiltinFnToFnPtr); 6407 TheCall->setCallee(PromotedCall.get()); 6408 6409 // Change the result type of the call to match the original value type. This 6410 // is arbitrary, but the codegen for these builtins ins design to handle it 6411 // gracefully. 6412 TheCall->setType(ResultType); 6413 6414 // Prohibit problematic uses of bit-precise integer types with atomic 6415 // builtins. The arguments would have already been converted to the first 6416 // argument's type, so only need to check the first argument. 6417 const auto *BitIntValType = ValType->getAs<BitIntType>(); 6418 if (BitIntValType && !llvm::isPowerOf2_64(BitIntValType->getNumBits())) { 6419 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size); 6420 return ExprError(); 6421 } 6422 6423 return TheCallResult; 6424 } 6425 6426 /// SemaBuiltinNontemporalOverloaded - We have a call to 6427 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 6428 /// overloaded function based on the pointer type of its last argument. 6429 /// 6430 /// This function goes through and does final semantic checking for these 6431 /// builtins. 6432 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 6433 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 6434 DeclRefExpr *DRE = 6435 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6436 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6437 unsigned BuiltinID = FDecl->getBuiltinID(); 6438 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 6439 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 6440 "Unexpected nontemporal load/store builtin!"); 6441 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 6442 unsigned numArgs = isStore ? 2 : 1; 6443 6444 // Ensure that we have the proper number of arguments. 6445 if (checkArgCount(*this, TheCall, numArgs)) 6446 return ExprError(); 6447 6448 // Inspect the last argument of the nontemporal builtin. This should always 6449 // be a pointer type, from which we imply the type of the memory access. 6450 // Because it is a pointer type, we don't have to worry about any implicit 6451 // casts here. 6452 Expr *PointerArg = TheCall->getArg(numArgs - 1); 6453 ExprResult PointerArgResult = 6454 DefaultFunctionArrayLvalueConversion(PointerArg); 6455 6456 if (PointerArgResult.isInvalid()) 6457 return ExprError(); 6458 PointerArg = PointerArgResult.get(); 6459 TheCall->setArg(numArgs - 1, PointerArg); 6460 6461 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 6462 if (!pointerType) { 6463 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 6464 << PointerArg->getType() << PointerArg->getSourceRange(); 6465 return ExprError(); 6466 } 6467 6468 QualType ValType = pointerType->getPointeeType(); 6469 6470 // Strip any qualifiers off ValType. 6471 ValType = ValType.getUnqualifiedType(); 6472 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 6473 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 6474 !ValType->isVectorType()) { 6475 Diag(DRE->getBeginLoc(), 6476 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 6477 << PointerArg->getType() << PointerArg->getSourceRange(); 6478 return ExprError(); 6479 } 6480 6481 if (!isStore) { 6482 TheCall->setType(ValType); 6483 return TheCallResult; 6484 } 6485 6486 ExprResult ValArg = TheCall->getArg(0); 6487 InitializedEntity Entity = InitializedEntity::InitializeParameter( 6488 Context, ValType, /*consume*/ false); 6489 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 6490 if (ValArg.isInvalid()) 6491 return ExprError(); 6492 6493 TheCall->setArg(0, ValArg.get()); 6494 TheCall->setType(Context.VoidTy); 6495 return TheCallResult; 6496 } 6497 6498 /// CheckObjCString - Checks that the argument to the builtin 6499 /// CFString constructor is correct 6500 /// Note: It might also make sense to do the UTF-16 conversion here (would 6501 /// simplify the backend). 6502 bool Sema::CheckObjCString(Expr *Arg) { 6503 Arg = Arg->IgnoreParenCasts(); 6504 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 6505 6506 if (!Literal || !Literal->isAscii()) { 6507 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 6508 << Arg->getSourceRange(); 6509 return true; 6510 } 6511 6512 if (Literal->containsNonAsciiOrNull()) { 6513 StringRef String = Literal->getString(); 6514 unsigned NumBytes = String.size(); 6515 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 6516 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 6517 llvm::UTF16 *ToPtr = &ToBuf[0]; 6518 6519 llvm::ConversionResult Result = 6520 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 6521 ToPtr + NumBytes, llvm::strictConversion); 6522 // Check for conversion failure. 6523 if (Result != llvm::conversionOK) 6524 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 6525 << Arg->getSourceRange(); 6526 } 6527 return false; 6528 } 6529 6530 /// CheckObjCString - Checks that the format string argument to the os_log() 6531 /// and os_trace() functions is correct, and converts it to const char *. 6532 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 6533 Arg = Arg->IgnoreParenCasts(); 6534 auto *Literal = dyn_cast<StringLiteral>(Arg); 6535 if (!Literal) { 6536 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 6537 Literal = ObjcLiteral->getString(); 6538 } 6539 } 6540 6541 if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) { 6542 return ExprError( 6543 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 6544 << Arg->getSourceRange()); 6545 } 6546 6547 ExprResult Result(Literal); 6548 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 6549 InitializedEntity Entity = 6550 InitializedEntity::InitializeParameter(Context, ResultTy, false); 6551 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 6552 return Result; 6553 } 6554 6555 /// Check that the user is calling the appropriate va_start builtin for the 6556 /// target and calling convention. 6557 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 6558 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 6559 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 6560 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 || 6561 TT.getArch() == llvm::Triple::aarch64_32); 6562 bool IsWindows = TT.isOSWindows(); 6563 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 6564 if (IsX64 || IsAArch64) { 6565 CallingConv CC = CC_C; 6566 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 6567 CC = FD->getType()->castAs<FunctionType>()->getCallConv(); 6568 if (IsMSVAStart) { 6569 // Don't allow this in System V ABI functions. 6570 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 6571 return S.Diag(Fn->getBeginLoc(), 6572 diag::err_ms_va_start_used_in_sysv_function); 6573 } else { 6574 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 6575 // On x64 Windows, don't allow this in System V ABI functions. 6576 // (Yes, that means there's no corresponding way to support variadic 6577 // System V ABI functions on Windows.) 6578 if ((IsWindows && CC == CC_X86_64SysV) || 6579 (!IsWindows && CC == CC_Win64)) 6580 return S.Diag(Fn->getBeginLoc(), 6581 diag::err_va_start_used_in_wrong_abi_function) 6582 << !IsWindows; 6583 } 6584 return false; 6585 } 6586 6587 if (IsMSVAStart) 6588 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 6589 return false; 6590 } 6591 6592 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 6593 ParmVarDecl **LastParam = nullptr) { 6594 // Determine whether the current function, block, or obj-c method is variadic 6595 // and get its parameter list. 6596 bool IsVariadic = false; 6597 ArrayRef<ParmVarDecl *> Params; 6598 DeclContext *Caller = S.CurContext; 6599 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 6600 IsVariadic = Block->isVariadic(); 6601 Params = Block->parameters(); 6602 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 6603 IsVariadic = FD->isVariadic(); 6604 Params = FD->parameters(); 6605 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 6606 IsVariadic = MD->isVariadic(); 6607 // FIXME: This isn't correct for methods (results in bogus warning). 6608 Params = MD->parameters(); 6609 } else if (isa<CapturedDecl>(Caller)) { 6610 // We don't support va_start in a CapturedDecl. 6611 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 6612 return true; 6613 } else { 6614 // This must be some other declcontext that parses exprs. 6615 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 6616 return true; 6617 } 6618 6619 if (!IsVariadic) { 6620 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 6621 return true; 6622 } 6623 6624 if (LastParam) 6625 *LastParam = Params.empty() ? nullptr : Params.back(); 6626 6627 return false; 6628 } 6629 6630 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 6631 /// for validity. Emit an error and return true on failure; return false 6632 /// on success. 6633 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 6634 Expr *Fn = TheCall->getCallee(); 6635 6636 if (checkVAStartABI(*this, BuiltinID, Fn)) 6637 return true; 6638 6639 if (checkArgCount(*this, TheCall, 2)) 6640 return true; 6641 6642 // Type-check the first argument normally. 6643 if (checkBuiltinArgument(*this, TheCall, 0)) 6644 return true; 6645 6646 // Check that the current function is variadic, and get its last parameter. 6647 ParmVarDecl *LastParam; 6648 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 6649 return true; 6650 6651 // Verify that the second argument to the builtin is the last argument of the 6652 // current function or method. 6653 bool SecondArgIsLastNamedArgument = false; 6654 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 6655 6656 // These are valid if SecondArgIsLastNamedArgument is false after the next 6657 // block. 6658 QualType Type; 6659 SourceLocation ParamLoc; 6660 bool IsCRegister = false; 6661 6662 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 6663 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 6664 SecondArgIsLastNamedArgument = PV == LastParam; 6665 6666 Type = PV->getType(); 6667 ParamLoc = PV->getLocation(); 6668 IsCRegister = 6669 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 6670 } 6671 } 6672 6673 if (!SecondArgIsLastNamedArgument) 6674 Diag(TheCall->getArg(1)->getBeginLoc(), 6675 diag::warn_second_arg_of_va_start_not_last_named_param); 6676 else if (IsCRegister || Type->isReferenceType() || 6677 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 6678 // Promotable integers are UB, but enumerations need a bit of 6679 // extra checking to see what their promotable type actually is. 6680 if (!Type->isPromotableIntegerType()) 6681 return false; 6682 if (!Type->isEnumeralType()) 6683 return true; 6684 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl(); 6685 return !(ED && 6686 Context.typesAreCompatible(ED->getPromotionType(), Type)); 6687 }()) { 6688 unsigned Reason = 0; 6689 if (Type->isReferenceType()) Reason = 1; 6690 else if (IsCRegister) Reason = 2; 6691 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 6692 Diag(ParamLoc, diag::note_parameter_type) << Type; 6693 } 6694 6695 TheCall->setType(Context.VoidTy); 6696 return false; 6697 } 6698 6699 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 6700 auto IsSuitablyTypedFormatArgument = [this](const Expr *Arg) -> bool { 6701 const LangOptions &LO = getLangOpts(); 6702 6703 if (LO.CPlusPlus) 6704 return Arg->getType() 6705 .getCanonicalType() 6706 .getTypePtr() 6707 ->getPointeeType() 6708 .withoutLocalFastQualifiers() == Context.CharTy; 6709 6710 // In C, allow aliasing through `char *`, this is required for AArch64 at 6711 // least. 6712 return true; 6713 }; 6714 6715 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 6716 // const char *named_addr); 6717 6718 Expr *Func = Call->getCallee(); 6719 6720 if (Call->getNumArgs() < 3) 6721 return Diag(Call->getEndLoc(), 6722 diag::err_typecheck_call_too_few_args_at_least) 6723 << 0 /*function call*/ << 3 << Call->getNumArgs(); 6724 6725 // Type-check the first argument normally. 6726 if (checkBuiltinArgument(*this, Call, 0)) 6727 return true; 6728 6729 // Check that the current function is variadic. 6730 if (checkVAStartIsInVariadicFunction(*this, Func)) 6731 return true; 6732 6733 // __va_start on Windows does not validate the parameter qualifiers 6734 6735 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 6736 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 6737 6738 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 6739 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 6740 6741 const QualType &ConstCharPtrTy = 6742 Context.getPointerType(Context.CharTy.withConst()); 6743 if (!Arg1Ty->isPointerType() || !IsSuitablyTypedFormatArgument(Arg1)) 6744 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 6745 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 6746 << 0 /* qualifier difference */ 6747 << 3 /* parameter mismatch */ 6748 << 2 << Arg1->getType() << ConstCharPtrTy; 6749 6750 const QualType SizeTy = Context.getSizeType(); 6751 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 6752 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 6753 << Arg2->getType() << SizeTy << 1 /* different class */ 6754 << 0 /* qualifier difference */ 6755 << 3 /* parameter mismatch */ 6756 << 3 << Arg2->getType() << SizeTy; 6757 6758 return false; 6759 } 6760 6761 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 6762 /// friends. This is declared to take (...), so we have to check everything. 6763 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 6764 if (checkArgCount(*this, TheCall, 2)) 6765 return true; 6766 6767 ExprResult OrigArg0 = TheCall->getArg(0); 6768 ExprResult OrigArg1 = TheCall->getArg(1); 6769 6770 // Do standard promotions between the two arguments, returning their common 6771 // type. 6772 QualType Res = UsualArithmeticConversions( 6773 OrigArg0, OrigArg1, TheCall->getExprLoc(), ACK_Comparison); 6774 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 6775 return true; 6776 6777 // Make sure any conversions are pushed back into the call; this is 6778 // type safe since unordered compare builtins are declared as "_Bool 6779 // foo(...)". 6780 TheCall->setArg(0, OrigArg0.get()); 6781 TheCall->setArg(1, OrigArg1.get()); 6782 6783 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 6784 return false; 6785 6786 // If the common type isn't a real floating type, then the arguments were 6787 // invalid for this operation. 6788 if (Res.isNull() || !Res->isRealFloatingType()) 6789 return Diag(OrigArg0.get()->getBeginLoc(), 6790 diag::err_typecheck_call_invalid_ordered_compare) 6791 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 6792 << SourceRange(OrigArg0.get()->getBeginLoc(), 6793 OrigArg1.get()->getEndLoc()); 6794 6795 return false; 6796 } 6797 6798 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 6799 /// __builtin_isnan and friends. This is declared to take (...), so we have 6800 /// to check everything. We expect the last argument to be a floating point 6801 /// value. 6802 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 6803 if (checkArgCount(*this, TheCall, NumArgs)) 6804 return true; 6805 6806 // __builtin_fpclassify is the only case where NumArgs != 1, so we can count 6807 // on all preceding parameters just being int. Try all of those. 6808 for (unsigned i = 0; i < NumArgs - 1; ++i) { 6809 Expr *Arg = TheCall->getArg(i); 6810 6811 if (Arg->isTypeDependent()) 6812 return false; 6813 6814 ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing); 6815 6816 if (Res.isInvalid()) 6817 return true; 6818 TheCall->setArg(i, Res.get()); 6819 } 6820 6821 Expr *OrigArg = TheCall->getArg(NumArgs-1); 6822 6823 if (OrigArg->isTypeDependent()) 6824 return false; 6825 6826 // Usual Unary Conversions will convert half to float, which we want for 6827 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the 6828 // type how it is, but do normal L->Rvalue conversions. 6829 if (Context.getTargetInfo().useFP16ConversionIntrinsics()) 6830 OrigArg = UsualUnaryConversions(OrigArg).get(); 6831 else 6832 OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get(); 6833 TheCall->setArg(NumArgs - 1, OrigArg); 6834 6835 // This operation requires a non-_Complex floating-point number. 6836 if (!OrigArg->getType()->isRealFloatingType()) 6837 return Diag(OrigArg->getBeginLoc(), 6838 diag::err_typecheck_call_invalid_unary_fp) 6839 << OrigArg->getType() << OrigArg->getSourceRange(); 6840 6841 return false; 6842 } 6843 6844 /// Perform semantic analysis for a call to __builtin_complex. 6845 bool Sema::SemaBuiltinComplex(CallExpr *TheCall) { 6846 if (checkArgCount(*this, TheCall, 2)) 6847 return true; 6848 6849 bool Dependent = false; 6850 for (unsigned I = 0; I != 2; ++I) { 6851 Expr *Arg = TheCall->getArg(I); 6852 QualType T = Arg->getType(); 6853 if (T->isDependentType()) { 6854 Dependent = true; 6855 continue; 6856 } 6857 6858 // Despite supporting _Complex int, GCC requires a real floating point type 6859 // for the operands of __builtin_complex. 6860 if (!T->isRealFloatingType()) { 6861 return Diag(Arg->getBeginLoc(), diag::err_typecheck_call_requires_real_fp) 6862 << Arg->getType() << Arg->getSourceRange(); 6863 } 6864 6865 ExprResult Converted = DefaultLvalueConversion(Arg); 6866 if (Converted.isInvalid()) 6867 return true; 6868 TheCall->setArg(I, Converted.get()); 6869 } 6870 6871 if (Dependent) { 6872 TheCall->setType(Context.DependentTy); 6873 return false; 6874 } 6875 6876 Expr *Real = TheCall->getArg(0); 6877 Expr *Imag = TheCall->getArg(1); 6878 if (!Context.hasSameType(Real->getType(), Imag->getType())) { 6879 return Diag(Real->getBeginLoc(), 6880 diag::err_typecheck_call_different_arg_types) 6881 << Real->getType() << Imag->getType() 6882 << Real->getSourceRange() << Imag->getSourceRange(); 6883 } 6884 6885 // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers; 6886 // don't allow this builtin to form those types either. 6887 // FIXME: Should we allow these types? 6888 if (Real->getType()->isFloat16Type()) 6889 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 6890 << "_Float16"; 6891 if (Real->getType()->isHalfType()) 6892 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 6893 << "half"; 6894 6895 TheCall->setType(Context.getComplexType(Real->getType())); 6896 return false; 6897 } 6898 6899 // Customized Sema Checking for VSX builtins that have the following signature: 6900 // vector [...] builtinName(vector [...], vector [...], const int); 6901 // Which takes the same type of vectors (any legal vector type) for the first 6902 // two arguments and takes compile time constant for the third argument. 6903 // Example builtins are : 6904 // vector double vec_xxpermdi(vector double, vector double, int); 6905 // vector short vec_xxsldwi(vector short, vector short, int); 6906 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 6907 unsigned ExpectedNumArgs = 3; 6908 if (checkArgCount(*this, TheCall, ExpectedNumArgs)) 6909 return true; 6910 6911 // Check the third argument is a compile time constant 6912 if (!TheCall->getArg(2)->isIntegerConstantExpr(Context)) 6913 return Diag(TheCall->getBeginLoc(), 6914 diag::err_vsx_builtin_nonconstant_argument) 6915 << 3 /* argument index */ << TheCall->getDirectCallee() 6916 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 6917 TheCall->getArg(2)->getEndLoc()); 6918 6919 QualType Arg1Ty = TheCall->getArg(0)->getType(); 6920 QualType Arg2Ty = TheCall->getArg(1)->getType(); 6921 6922 // Check the type of argument 1 and argument 2 are vectors. 6923 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 6924 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 6925 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 6926 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 6927 << TheCall->getDirectCallee() 6928 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6929 TheCall->getArg(1)->getEndLoc()); 6930 } 6931 6932 // Check the first two arguments are the same type. 6933 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 6934 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 6935 << TheCall->getDirectCallee() 6936 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6937 TheCall->getArg(1)->getEndLoc()); 6938 } 6939 6940 // When default clang type checking is turned off and the customized type 6941 // checking is used, the returning type of the function must be explicitly 6942 // set. Otherwise it is _Bool by default. 6943 TheCall->setType(Arg1Ty); 6944 6945 return false; 6946 } 6947 6948 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 6949 // This is declared to take (...), so we have to check everything. 6950 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 6951 if (TheCall->getNumArgs() < 2) 6952 return ExprError(Diag(TheCall->getEndLoc(), 6953 diag::err_typecheck_call_too_few_args_at_least) 6954 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 6955 << TheCall->getSourceRange()); 6956 6957 // Determine which of the following types of shufflevector we're checking: 6958 // 1) unary, vector mask: (lhs, mask) 6959 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 6960 QualType resType = TheCall->getArg(0)->getType(); 6961 unsigned numElements = 0; 6962 6963 if (!TheCall->getArg(0)->isTypeDependent() && 6964 !TheCall->getArg(1)->isTypeDependent()) { 6965 QualType LHSType = TheCall->getArg(0)->getType(); 6966 QualType RHSType = TheCall->getArg(1)->getType(); 6967 6968 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 6969 return ExprError( 6970 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 6971 << TheCall->getDirectCallee() 6972 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6973 TheCall->getArg(1)->getEndLoc())); 6974 6975 numElements = LHSType->castAs<VectorType>()->getNumElements(); 6976 unsigned numResElements = TheCall->getNumArgs() - 2; 6977 6978 // Check to see if we have a call with 2 vector arguments, the unary shuffle 6979 // with mask. If so, verify that RHS is an integer vector type with the 6980 // same number of elts as lhs. 6981 if (TheCall->getNumArgs() == 2) { 6982 if (!RHSType->hasIntegerRepresentation() || 6983 RHSType->castAs<VectorType>()->getNumElements() != numElements) 6984 return ExprError(Diag(TheCall->getBeginLoc(), 6985 diag::err_vec_builtin_incompatible_vector) 6986 << TheCall->getDirectCallee() 6987 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 6988 TheCall->getArg(1)->getEndLoc())); 6989 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 6990 return ExprError(Diag(TheCall->getBeginLoc(), 6991 diag::err_vec_builtin_incompatible_vector) 6992 << TheCall->getDirectCallee() 6993 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6994 TheCall->getArg(1)->getEndLoc())); 6995 } else if (numElements != numResElements) { 6996 QualType eltType = LHSType->castAs<VectorType>()->getElementType(); 6997 resType = Context.getVectorType(eltType, numResElements, 6998 VectorType::GenericVector); 6999 } 7000 } 7001 7002 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 7003 if (TheCall->getArg(i)->isTypeDependent() || 7004 TheCall->getArg(i)->isValueDependent()) 7005 continue; 7006 7007 Optional<llvm::APSInt> Result; 7008 if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context))) 7009 return ExprError(Diag(TheCall->getBeginLoc(), 7010 diag::err_shufflevector_nonconstant_argument) 7011 << TheCall->getArg(i)->getSourceRange()); 7012 7013 // Allow -1 which will be translated to undef in the IR. 7014 if (Result->isSigned() && Result->isAllOnes()) 7015 continue; 7016 7017 if (Result->getActiveBits() > 64 || 7018 Result->getZExtValue() >= numElements * 2) 7019 return ExprError(Diag(TheCall->getBeginLoc(), 7020 diag::err_shufflevector_argument_too_large) 7021 << TheCall->getArg(i)->getSourceRange()); 7022 } 7023 7024 SmallVector<Expr*, 32> exprs; 7025 7026 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 7027 exprs.push_back(TheCall->getArg(i)); 7028 TheCall->setArg(i, nullptr); 7029 } 7030 7031 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 7032 TheCall->getCallee()->getBeginLoc(), 7033 TheCall->getRParenLoc()); 7034 } 7035 7036 /// SemaConvertVectorExpr - Handle __builtin_convertvector 7037 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 7038 SourceLocation BuiltinLoc, 7039 SourceLocation RParenLoc) { 7040 ExprValueKind VK = VK_PRValue; 7041 ExprObjectKind OK = OK_Ordinary; 7042 QualType DstTy = TInfo->getType(); 7043 QualType SrcTy = E->getType(); 7044 7045 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 7046 return ExprError(Diag(BuiltinLoc, 7047 diag::err_convertvector_non_vector) 7048 << E->getSourceRange()); 7049 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 7050 return ExprError(Diag(BuiltinLoc, 7051 diag::err_convertvector_non_vector_type)); 7052 7053 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 7054 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements(); 7055 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements(); 7056 if (SrcElts != DstElts) 7057 return ExprError(Diag(BuiltinLoc, 7058 diag::err_convertvector_incompatible_vector) 7059 << E->getSourceRange()); 7060 } 7061 7062 return new (Context) 7063 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 7064 } 7065 7066 /// SemaBuiltinPrefetch - Handle __builtin_prefetch. 7067 // This is declared to take (const void*, ...) and can take two 7068 // optional constant int args. 7069 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 7070 unsigned NumArgs = TheCall->getNumArgs(); 7071 7072 if (NumArgs > 3) 7073 return Diag(TheCall->getEndLoc(), 7074 diag::err_typecheck_call_too_many_args_at_most) 7075 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 7076 7077 // Argument 0 is checked for us and the remaining arguments must be 7078 // constant integers. 7079 for (unsigned i = 1; i != NumArgs; ++i) 7080 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 7081 return true; 7082 7083 return false; 7084 } 7085 7086 /// SemaBuiltinArithmeticFence - Handle __arithmetic_fence. 7087 bool Sema::SemaBuiltinArithmeticFence(CallExpr *TheCall) { 7088 if (!Context.getTargetInfo().checkArithmeticFenceSupported()) 7089 return Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 7090 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 7091 if (checkArgCount(*this, TheCall, 1)) 7092 return true; 7093 Expr *Arg = TheCall->getArg(0); 7094 if (Arg->isInstantiationDependent()) 7095 return false; 7096 7097 QualType ArgTy = Arg->getType(); 7098 if (!ArgTy->hasFloatingRepresentation()) 7099 return Diag(TheCall->getEndLoc(), diag::err_typecheck_expect_flt_or_vector) 7100 << ArgTy; 7101 if (Arg->isLValue()) { 7102 ExprResult FirstArg = DefaultLvalueConversion(Arg); 7103 TheCall->setArg(0, FirstArg.get()); 7104 } 7105 TheCall->setType(TheCall->getArg(0)->getType()); 7106 return false; 7107 } 7108 7109 /// SemaBuiltinAssume - Handle __assume (MS Extension). 7110 // __assume does not evaluate its arguments, and should warn if its argument 7111 // has side effects. 7112 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 7113 Expr *Arg = TheCall->getArg(0); 7114 if (Arg->isInstantiationDependent()) return false; 7115 7116 if (Arg->HasSideEffects(Context)) 7117 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 7118 << Arg->getSourceRange() 7119 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 7120 7121 return false; 7122 } 7123 7124 /// Handle __builtin_alloca_with_align. This is declared 7125 /// as (size_t, size_t) where the second size_t must be a power of 2 greater 7126 /// than 8. 7127 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 7128 // The alignment must be a constant integer. 7129 Expr *Arg = TheCall->getArg(1); 7130 7131 // We can't check the value of a dependent argument. 7132 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 7133 if (const auto *UE = 7134 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 7135 if (UE->getKind() == UETT_AlignOf || 7136 UE->getKind() == UETT_PreferredAlignOf) 7137 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 7138 << Arg->getSourceRange(); 7139 7140 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 7141 7142 if (!Result.isPowerOf2()) 7143 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 7144 << Arg->getSourceRange(); 7145 7146 if (Result < Context.getCharWidth()) 7147 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 7148 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 7149 7150 if (Result > std::numeric_limits<int32_t>::max()) 7151 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 7152 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 7153 } 7154 7155 return false; 7156 } 7157 7158 /// Handle __builtin_assume_aligned. This is declared 7159 /// as (const void*, size_t, ...) and can take one optional constant int arg. 7160 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 7161 unsigned NumArgs = TheCall->getNumArgs(); 7162 7163 if (NumArgs > 3) 7164 return Diag(TheCall->getEndLoc(), 7165 diag::err_typecheck_call_too_many_args_at_most) 7166 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 7167 7168 // The alignment must be a constant integer. 7169 Expr *Arg = TheCall->getArg(1); 7170 7171 // We can't check the value of a dependent argument. 7172 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 7173 llvm::APSInt Result; 7174 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 7175 return true; 7176 7177 if (!Result.isPowerOf2()) 7178 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 7179 << Arg->getSourceRange(); 7180 7181 if (Result > Sema::MaximumAlignment) 7182 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great) 7183 << Arg->getSourceRange() << Sema::MaximumAlignment; 7184 } 7185 7186 if (NumArgs > 2) { 7187 ExprResult Arg(TheCall->getArg(2)); 7188 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 7189 Context.getSizeType(), false); 7190 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 7191 if (Arg.isInvalid()) return true; 7192 TheCall->setArg(2, Arg.get()); 7193 } 7194 7195 return false; 7196 } 7197 7198 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 7199 unsigned BuiltinID = 7200 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 7201 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 7202 7203 unsigned NumArgs = TheCall->getNumArgs(); 7204 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 7205 if (NumArgs < NumRequiredArgs) { 7206 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 7207 << 0 /* function call */ << NumRequiredArgs << NumArgs 7208 << TheCall->getSourceRange(); 7209 } 7210 if (NumArgs >= NumRequiredArgs + 0x100) { 7211 return Diag(TheCall->getEndLoc(), 7212 diag::err_typecheck_call_too_many_args_at_most) 7213 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 7214 << TheCall->getSourceRange(); 7215 } 7216 unsigned i = 0; 7217 7218 // For formatting call, check buffer arg. 7219 if (!IsSizeCall) { 7220 ExprResult Arg(TheCall->getArg(i)); 7221 InitializedEntity Entity = InitializedEntity::InitializeParameter( 7222 Context, Context.VoidPtrTy, false); 7223 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 7224 if (Arg.isInvalid()) 7225 return true; 7226 TheCall->setArg(i, Arg.get()); 7227 i++; 7228 } 7229 7230 // Check string literal arg. 7231 unsigned FormatIdx = i; 7232 { 7233 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 7234 if (Arg.isInvalid()) 7235 return true; 7236 TheCall->setArg(i, Arg.get()); 7237 i++; 7238 } 7239 7240 // Make sure variadic args are scalar. 7241 unsigned FirstDataArg = i; 7242 while (i < NumArgs) { 7243 ExprResult Arg = DefaultVariadicArgumentPromotion( 7244 TheCall->getArg(i), VariadicFunction, nullptr); 7245 if (Arg.isInvalid()) 7246 return true; 7247 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 7248 if (ArgSize.getQuantity() >= 0x100) { 7249 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 7250 << i << (int)ArgSize.getQuantity() << 0xff 7251 << TheCall->getSourceRange(); 7252 } 7253 TheCall->setArg(i, Arg.get()); 7254 i++; 7255 } 7256 7257 // Check formatting specifiers. NOTE: We're only doing this for the non-size 7258 // call to avoid duplicate diagnostics. 7259 if (!IsSizeCall) { 7260 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 7261 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 7262 bool Success = CheckFormatArguments( 7263 Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog, 7264 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 7265 CheckedVarArgs); 7266 if (!Success) 7267 return true; 7268 } 7269 7270 if (IsSizeCall) { 7271 TheCall->setType(Context.getSizeType()); 7272 } else { 7273 TheCall->setType(Context.VoidPtrTy); 7274 } 7275 return false; 7276 } 7277 7278 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 7279 /// TheCall is a constant expression. 7280 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 7281 llvm::APSInt &Result) { 7282 Expr *Arg = TheCall->getArg(ArgNum); 7283 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 7284 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 7285 7286 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 7287 7288 Optional<llvm::APSInt> R; 7289 if (!(R = Arg->getIntegerConstantExpr(Context))) 7290 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 7291 << FDecl->getDeclName() << Arg->getSourceRange(); 7292 Result = *R; 7293 return false; 7294 } 7295 7296 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 7297 /// TheCall is a constant expression in the range [Low, High]. 7298 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 7299 int Low, int High, bool RangeIsError) { 7300 if (isConstantEvaluated()) 7301 return false; 7302 llvm::APSInt Result; 7303 7304 // We can't check the value of a dependent argument. 7305 Expr *Arg = TheCall->getArg(ArgNum); 7306 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7307 return false; 7308 7309 // Check constant-ness first. 7310 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7311 return true; 7312 7313 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 7314 if (RangeIsError) 7315 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 7316 << toString(Result, 10) << Low << High << Arg->getSourceRange(); 7317 else 7318 // Defer the warning until we know if the code will be emitted so that 7319 // dead code can ignore this. 7320 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 7321 PDiag(diag::warn_argument_invalid_range) 7322 << toString(Result, 10) << Low << High 7323 << Arg->getSourceRange()); 7324 } 7325 7326 return false; 7327 } 7328 7329 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 7330 /// TheCall is a constant expression is a multiple of Num.. 7331 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 7332 unsigned Num) { 7333 llvm::APSInt Result; 7334 7335 // We can't check the value of a dependent argument. 7336 Expr *Arg = TheCall->getArg(ArgNum); 7337 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7338 return false; 7339 7340 // Check constant-ness first. 7341 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7342 return true; 7343 7344 if (Result.getSExtValue() % Num != 0) 7345 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 7346 << Num << Arg->getSourceRange(); 7347 7348 return false; 7349 } 7350 7351 /// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a 7352 /// constant expression representing a power of 2. 7353 bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) { 7354 llvm::APSInt Result; 7355 7356 // We can't check the value of a dependent argument. 7357 Expr *Arg = TheCall->getArg(ArgNum); 7358 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7359 return false; 7360 7361 // Check constant-ness first. 7362 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7363 return true; 7364 7365 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if 7366 // and only if x is a power of 2. 7367 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0) 7368 return false; 7369 7370 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2) 7371 << Arg->getSourceRange(); 7372 } 7373 7374 static bool IsShiftedByte(llvm::APSInt Value) { 7375 if (Value.isNegative()) 7376 return false; 7377 7378 // Check if it's a shifted byte, by shifting it down 7379 while (true) { 7380 // If the value fits in the bottom byte, the check passes. 7381 if (Value < 0x100) 7382 return true; 7383 7384 // Otherwise, if the value has _any_ bits in the bottom byte, the check 7385 // fails. 7386 if ((Value & 0xFF) != 0) 7387 return false; 7388 7389 // If the bottom 8 bits are all 0, but something above that is nonzero, 7390 // then shifting the value right by 8 bits won't affect whether it's a 7391 // shifted byte or not. So do that, and go round again. 7392 Value >>= 8; 7393 } 7394 } 7395 7396 /// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is 7397 /// a constant expression representing an arbitrary byte value shifted left by 7398 /// a multiple of 8 bits. 7399 bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, 7400 unsigned ArgBits) { 7401 llvm::APSInt Result; 7402 7403 // We can't check the value of a dependent argument. 7404 Expr *Arg = TheCall->getArg(ArgNum); 7405 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7406 return false; 7407 7408 // Check constant-ness first. 7409 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7410 return true; 7411 7412 // Truncate to the given size. 7413 Result = Result.getLoBits(ArgBits); 7414 Result.setIsUnsigned(true); 7415 7416 if (IsShiftedByte(Result)) 7417 return false; 7418 7419 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte) 7420 << Arg->getSourceRange(); 7421 } 7422 7423 /// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of 7424 /// TheCall is a constant expression representing either a shifted byte value, 7425 /// or a value of the form 0x??FF (i.e. a member of the arithmetic progression 7426 /// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some 7427 /// Arm MVE intrinsics. 7428 bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, 7429 int ArgNum, 7430 unsigned ArgBits) { 7431 llvm::APSInt Result; 7432 7433 // We can't check the value of a dependent argument. 7434 Expr *Arg = TheCall->getArg(ArgNum); 7435 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7436 return false; 7437 7438 // Check constant-ness first. 7439 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7440 return true; 7441 7442 // Truncate to the given size. 7443 Result = Result.getLoBits(ArgBits); 7444 Result.setIsUnsigned(true); 7445 7446 // Check to see if it's in either of the required forms. 7447 if (IsShiftedByte(Result) || 7448 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF)) 7449 return false; 7450 7451 return Diag(TheCall->getBeginLoc(), 7452 diag::err_argument_not_shifted_byte_or_xxff) 7453 << Arg->getSourceRange(); 7454 } 7455 7456 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 7457 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { 7458 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 7459 if (checkArgCount(*this, TheCall, 2)) 7460 return true; 7461 Expr *Arg0 = TheCall->getArg(0); 7462 Expr *Arg1 = TheCall->getArg(1); 7463 7464 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7465 if (FirstArg.isInvalid()) 7466 return true; 7467 QualType FirstArgType = FirstArg.get()->getType(); 7468 if (!FirstArgType->isAnyPointerType()) 7469 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7470 << "first" << FirstArgType << Arg0->getSourceRange(); 7471 TheCall->setArg(0, FirstArg.get()); 7472 7473 ExprResult SecArg = DefaultLvalueConversion(Arg1); 7474 if (SecArg.isInvalid()) 7475 return true; 7476 QualType SecArgType = SecArg.get()->getType(); 7477 if (!SecArgType->isIntegerType()) 7478 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 7479 << "second" << SecArgType << Arg1->getSourceRange(); 7480 7481 // Derive the return type from the pointer argument. 7482 TheCall->setType(FirstArgType); 7483 return false; 7484 } 7485 7486 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 7487 if (checkArgCount(*this, TheCall, 2)) 7488 return true; 7489 7490 Expr *Arg0 = TheCall->getArg(0); 7491 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7492 if (FirstArg.isInvalid()) 7493 return true; 7494 QualType FirstArgType = FirstArg.get()->getType(); 7495 if (!FirstArgType->isAnyPointerType()) 7496 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7497 << "first" << FirstArgType << Arg0->getSourceRange(); 7498 TheCall->setArg(0, FirstArg.get()); 7499 7500 // Derive the return type from the pointer argument. 7501 TheCall->setType(FirstArgType); 7502 7503 // Second arg must be an constant in range [0,15] 7504 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 7505 } 7506 7507 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 7508 if (checkArgCount(*this, TheCall, 2)) 7509 return true; 7510 Expr *Arg0 = TheCall->getArg(0); 7511 Expr *Arg1 = TheCall->getArg(1); 7512 7513 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7514 if (FirstArg.isInvalid()) 7515 return true; 7516 QualType FirstArgType = FirstArg.get()->getType(); 7517 if (!FirstArgType->isAnyPointerType()) 7518 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7519 << "first" << FirstArgType << Arg0->getSourceRange(); 7520 7521 QualType SecArgType = Arg1->getType(); 7522 if (!SecArgType->isIntegerType()) 7523 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 7524 << "second" << SecArgType << Arg1->getSourceRange(); 7525 TheCall->setType(Context.IntTy); 7526 return false; 7527 } 7528 7529 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 7530 BuiltinID == AArch64::BI__builtin_arm_stg) { 7531 if (checkArgCount(*this, TheCall, 1)) 7532 return true; 7533 Expr *Arg0 = TheCall->getArg(0); 7534 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7535 if (FirstArg.isInvalid()) 7536 return true; 7537 7538 QualType FirstArgType = FirstArg.get()->getType(); 7539 if (!FirstArgType->isAnyPointerType()) 7540 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7541 << "first" << FirstArgType << Arg0->getSourceRange(); 7542 TheCall->setArg(0, FirstArg.get()); 7543 7544 // Derive the return type from the pointer argument. 7545 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 7546 TheCall->setType(FirstArgType); 7547 return false; 7548 } 7549 7550 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 7551 Expr *ArgA = TheCall->getArg(0); 7552 Expr *ArgB = TheCall->getArg(1); 7553 7554 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA); 7555 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB); 7556 7557 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 7558 return true; 7559 7560 QualType ArgTypeA = ArgExprA.get()->getType(); 7561 QualType ArgTypeB = ArgExprB.get()->getType(); 7562 7563 auto isNull = [&] (Expr *E) -> bool { 7564 return E->isNullPointerConstant( 7565 Context, Expr::NPC_ValueDependentIsNotNull); }; 7566 7567 // argument should be either a pointer or null 7568 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 7569 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 7570 << "first" << ArgTypeA << ArgA->getSourceRange(); 7571 7572 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 7573 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 7574 << "second" << ArgTypeB << ArgB->getSourceRange(); 7575 7576 // Ensure Pointee types are compatible 7577 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 7578 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 7579 QualType pointeeA = ArgTypeA->getPointeeType(); 7580 QualType pointeeB = ArgTypeB->getPointeeType(); 7581 if (!Context.typesAreCompatible( 7582 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 7583 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 7584 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) 7585 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 7586 << ArgB->getSourceRange(); 7587 } 7588 } 7589 7590 // at least one argument should be pointer type 7591 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 7592 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 7593 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 7594 7595 if (isNull(ArgA)) // adopt type of the other pointer 7596 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 7597 7598 if (isNull(ArgB)) 7599 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 7600 7601 TheCall->setArg(0, ArgExprA.get()); 7602 TheCall->setArg(1, ArgExprB.get()); 7603 TheCall->setType(Context.LongLongTy); 7604 return false; 7605 } 7606 assert(false && "Unhandled ARM MTE intrinsic"); 7607 return true; 7608 } 7609 7610 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 7611 /// TheCall is an ARM/AArch64 special register string literal. 7612 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 7613 int ArgNum, unsigned ExpectedFieldNum, 7614 bool AllowName) { 7615 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 7616 BuiltinID == ARM::BI__builtin_arm_wsr64 || 7617 BuiltinID == ARM::BI__builtin_arm_rsr || 7618 BuiltinID == ARM::BI__builtin_arm_rsrp || 7619 BuiltinID == ARM::BI__builtin_arm_wsr || 7620 BuiltinID == ARM::BI__builtin_arm_wsrp; 7621 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 7622 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 7623 BuiltinID == AArch64::BI__builtin_arm_rsr || 7624 BuiltinID == AArch64::BI__builtin_arm_rsrp || 7625 BuiltinID == AArch64::BI__builtin_arm_wsr || 7626 BuiltinID == AArch64::BI__builtin_arm_wsrp; 7627 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 7628 7629 // We can't check the value of a dependent argument. 7630 Expr *Arg = TheCall->getArg(ArgNum); 7631 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7632 return false; 7633 7634 // Check if the argument is a string literal. 7635 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 7636 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 7637 << Arg->getSourceRange(); 7638 7639 // Check the type of special register given. 7640 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 7641 SmallVector<StringRef, 6> Fields; 7642 Reg.split(Fields, ":"); 7643 7644 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 7645 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 7646 << Arg->getSourceRange(); 7647 7648 // If the string is the name of a register then we cannot check that it is 7649 // valid here but if the string is of one the forms described in ACLE then we 7650 // can check that the supplied fields are integers and within the valid 7651 // ranges. 7652 if (Fields.size() > 1) { 7653 bool FiveFields = Fields.size() == 5; 7654 7655 bool ValidString = true; 7656 if (IsARMBuiltin) { 7657 ValidString &= Fields[0].startswith_insensitive("cp") || 7658 Fields[0].startswith_insensitive("p"); 7659 if (ValidString) 7660 Fields[0] = Fields[0].drop_front( 7661 Fields[0].startswith_insensitive("cp") ? 2 : 1); 7662 7663 ValidString &= Fields[2].startswith_insensitive("c"); 7664 if (ValidString) 7665 Fields[2] = Fields[2].drop_front(1); 7666 7667 if (FiveFields) { 7668 ValidString &= Fields[3].startswith_insensitive("c"); 7669 if (ValidString) 7670 Fields[3] = Fields[3].drop_front(1); 7671 } 7672 } 7673 7674 SmallVector<int, 5> Ranges; 7675 if (FiveFields) 7676 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 7677 else 7678 Ranges.append({15, 7, 15}); 7679 7680 for (unsigned i=0; i<Fields.size(); ++i) { 7681 int IntField; 7682 ValidString &= !Fields[i].getAsInteger(10, IntField); 7683 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 7684 } 7685 7686 if (!ValidString) 7687 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 7688 << Arg->getSourceRange(); 7689 } else if (IsAArch64Builtin && Fields.size() == 1) { 7690 // If the register name is one of those that appear in the condition below 7691 // and the special register builtin being used is one of the write builtins, 7692 // then we require that the argument provided for writing to the register 7693 // is an integer constant expression. This is because it will be lowered to 7694 // an MSR (immediate) instruction, so we need to know the immediate at 7695 // compile time. 7696 if (TheCall->getNumArgs() != 2) 7697 return false; 7698 7699 std::string RegLower = Reg.lower(); 7700 if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" && 7701 RegLower != "pan" && RegLower != "uao") 7702 return false; 7703 7704 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 7705 } 7706 7707 return false; 7708 } 7709 7710 /// SemaBuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity. 7711 /// Emit an error and return true on failure; return false on success. 7712 /// TypeStr is a string containing the type descriptor of the value returned by 7713 /// the builtin and the descriptors of the expected type of the arguments. 7714 bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID, 7715 const char *TypeStr) { 7716 7717 assert((TypeStr[0] != '\0') && 7718 "Invalid types in PPC MMA builtin declaration"); 7719 7720 switch (BuiltinID) { 7721 default: 7722 // This function is called in CheckPPCBuiltinFunctionCall where the 7723 // BuiltinID is guaranteed to be an MMA or pair vector memop builtin, here 7724 // we are isolating the pair vector memop builtins that can be used with mma 7725 // off so the default case is every builtin that requires mma and paired 7726 // vector memops. 7727 if (SemaFeatureCheck(*this, TheCall, "paired-vector-memops", 7728 diag::err_ppc_builtin_only_on_arch, "10") || 7729 SemaFeatureCheck(*this, TheCall, "mma", 7730 diag::err_ppc_builtin_only_on_arch, "10")) 7731 return true; 7732 break; 7733 case PPC::BI__builtin_vsx_lxvp: 7734 case PPC::BI__builtin_vsx_stxvp: 7735 case PPC::BI__builtin_vsx_assemble_pair: 7736 case PPC::BI__builtin_vsx_disassemble_pair: 7737 if (SemaFeatureCheck(*this, TheCall, "paired-vector-memops", 7738 diag::err_ppc_builtin_only_on_arch, "10")) 7739 return true; 7740 break; 7741 } 7742 7743 unsigned Mask = 0; 7744 unsigned ArgNum = 0; 7745 7746 // The first type in TypeStr is the type of the value returned by the 7747 // builtin. So we first read that type and change the type of TheCall. 7748 QualType type = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 7749 TheCall->setType(type); 7750 7751 while (*TypeStr != '\0') { 7752 Mask = 0; 7753 QualType ExpectedType = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 7754 if (ArgNum >= TheCall->getNumArgs()) { 7755 ArgNum++; 7756 break; 7757 } 7758 7759 Expr *Arg = TheCall->getArg(ArgNum); 7760 QualType PassedType = Arg->getType(); 7761 QualType StrippedRVType = PassedType.getCanonicalType(); 7762 7763 // Strip Restrict/Volatile qualifiers. 7764 if (StrippedRVType.isRestrictQualified() || 7765 StrippedRVType.isVolatileQualified()) 7766 StrippedRVType = StrippedRVType.getCanonicalType().getUnqualifiedType(); 7767 7768 // The only case where the argument type and expected type are allowed to 7769 // mismatch is if the argument type is a non-void pointer (or array) and 7770 // expected type is a void pointer. 7771 if (StrippedRVType != ExpectedType) 7772 if (!(ExpectedType->isVoidPointerType() && 7773 (StrippedRVType->isPointerType() || StrippedRVType->isArrayType()))) 7774 return Diag(Arg->getBeginLoc(), 7775 diag::err_typecheck_convert_incompatible) 7776 << PassedType << ExpectedType << 1 << 0 << 0; 7777 7778 // If the value of the Mask is not 0, we have a constraint in the size of 7779 // the integer argument so here we ensure the argument is a constant that 7780 // is in the valid range. 7781 if (Mask != 0 && 7782 SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true)) 7783 return true; 7784 7785 ArgNum++; 7786 } 7787 7788 // In case we exited early from the previous loop, there are other types to 7789 // read from TypeStr. So we need to read them all to ensure we have the right 7790 // number of arguments in TheCall and if it is not the case, to display a 7791 // better error message. 7792 while (*TypeStr != '\0') { 7793 (void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 7794 ArgNum++; 7795 } 7796 if (checkArgCount(*this, TheCall, ArgNum)) 7797 return true; 7798 7799 return false; 7800 } 7801 7802 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 7803 /// This checks that the target supports __builtin_longjmp and 7804 /// that val is a constant 1. 7805 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 7806 if (!Context.getTargetInfo().hasSjLjLowering()) 7807 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 7808 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 7809 7810 Expr *Arg = TheCall->getArg(1); 7811 llvm::APSInt Result; 7812 7813 // TODO: This is less than ideal. Overload this to take a value. 7814 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 7815 return true; 7816 7817 if (Result != 1) 7818 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 7819 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 7820 7821 return false; 7822 } 7823 7824 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 7825 /// This checks that the target supports __builtin_setjmp. 7826 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 7827 if (!Context.getTargetInfo().hasSjLjLowering()) 7828 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 7829 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 7830 return false; 7831 } 7832 7833 namespace { 7834 7835 class UncoveredArgHandler { 7836 enum { Unknown = -1, AllCovered = -2 }; 7837 7838 signed FirstUncoveredArg = Unknown; 7839 SmallVector<const Expr *, 4> DiagnosticExprs; 7840 7841 public: 7842 UncoveredArgHandler() = default; 7843 7844 bool hasUncoveredArg() const { 7845 return (FirstUncoveredArg >= 0); 7846 } 7847 7848 unsigned getUncoveredArg() const { 7849 assert(hasUncoveredArg() && "no uncovered argument"); 7850 return FirstUncoveredArg; 7851 } 7852 7853 void setAllCovered() { 7854 // A string has been found with all arguments covered, so clear out 7855 // the diagnostics. 7856 DiagnosticExprs.clear(); 7857 FirstUncoveredArg = AllCovered; 7858 } 7859 7860 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 7861 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 7862 7863 // Don't update if a previous string covers all arguments. 7864 if (FirstUncoveredArg == AllCovered) 7865 return; 7866 7867 // UncoveredArgHandler tracks the highest uncovered argument index 7868 // and with it all the strings that match this index. 7869 if (NewFirstUncoveredArg == FirstUncoveredArg) 7870 DiagnosticExprs.push_back(StrExpr); 7871 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 7872 DiagnosticExprs.clear(); 7873 DiagnosticExprs.push_back(StrExpr); 7874 FirstUncoveredArg = NewFirstUncoveredArg; 7875 } 7876 } 7877 7878 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 7879 }; 7880 7881 enum StringLiteralCheckType { 7882 SLCT_NotALiteral, 7883 SLCT_UncheckedLiteral, 7884 SLCT_CheckedLiteral 7885 }; 7886 7887 } // namespace 7888 7889 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 7890 BinaryOperatorKind BinOpKind, 7891 bool AddendIsRight) { 7892 unsigned BitWidth = Offset.getBitWidth(); 7893 unsigned AddendBitWidth = Addend.getBitWidth(); 7894 // There might be negative interim results. 7895 if (Addend.isUnsigned()) { 7896 Addend = Addend.zext(++AddendBitWidth); 7897 Addend.setIsSigned(true); 7898 } 7899 // Adjust the bit width of the APSInts. 7900 if (AddendBitWidth > BitWidth) { 7901 Offset = Offset.sext(AddendBitWidth); 7902 BitWidth = AddendBitWidth; 7903 } else if (BitWidth > AddendBitWidth) { 7904 Addend = Addend.sext(BitWidth); 7905 } 7906 7907 bool Ov = false; 7908 llvm::APSInt ResOffset = Offset; 7909 if (BinOpKind == BO_Add) 7910 ResOffset = Offset.sadd_ov(Addend, Ov); 7911 else { 7912 assert(AddendIsRight && BinOpKind == BO_Sub && 7913 "operator must be add or sub with addend on the right"); 7914 ResOffset = Offset.ssub_ov(Addend, Ov); 7915 } 7916 7917 // We add an offset to a pointer here so we should support an offset as big as 7918 // possible. 7919 if (Ov) { 7920 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 7921 "index (intermediate) result too big"); 7922 Offset = Offset.sext(2 * BitWidth); 7923 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 7924 return; 7925 } 7926 7927 Offset = ResOffset; 7928 } 7929 7930 namespace { 7931 7932 // This is a wrapper class around StringLiteral to support offsetted string 7933 // literals as format strings. It takes the offset into account when returning 7934 // the string and its length or the source locations to display notes correctly. 7935 class FormatStringLiteral { 7936 const StringLiteral *FExpr; 7937 int64_t Offset; 7938 7939 public: 7940 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 7941 : FExpr(fexpr), Offset(Offset) {} 7942 7943 StringRef getString() const { 7944 return FExpr->getString().drop_front(Offset); 7945 } 7946 7947 unsigned getByteLength() const { 7948 return FExpr->getByteLength() - getCharByteWidth() * Offset; 7949 } 7950 7951 unsigned getLength() const { return FExpr->getLength() - Offset; } 7952 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 7953 7954 StringLiteral::StringKind getKind() const { return FExpr->getKind(); } 7955 7956 QualType getType() const { return FExpr->getType(); } 7957 7958 bool isAscii() const { return FExpr->isAscii(); } 7959 bool isWide() const { return FExpr->isWide(); } 7960 bool isUTF8() const { return FExpr->isUTF8(); } 7961 bool isUTF16() const { return FExpr->isUTF16(); } 7962 bool isUTF32() const { return FExpr->isUTF32(); } 7963 bool isPascal() const { return FExpr->isPascal(); } 7964 7965 SourceLocation getLocationOfByte( 7966 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 7967 const TargetInfo &Target, unsigned *StartToken = nullptr, 7968 unsigned *StartTokenByteOffset = nullptr) const { 7969 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 7970 StartToken, StartTokenByteOffset); 7971 } 7972 7973 SourceLocation getBeginLoc() const LLVM_READONLY { 7974 return FExpr->getBeginLoc().getLocWithOffset(Offset); 7975 } 7976 7977 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 7978 }; 7979 7980 } // namespace 7981 7982 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 7983 const Expr *OrigFormatExpr, 7984 ArrayRef<const Expr *> Args, 7985 bool HasVAListArg, unsigned format_idx, 7986 unsigned firstDataArg, 7987 Sema::FormatStringType Type, 7988 bool inFunctionCall, 7989 Sema::VariadicCallType CallType, 7990 llvm::SmallBitVector &CheckedVarArgs, 7991 UncoveredArgHandler &UncoveredArg, 7992 bool IgnoreStringsWithoutSpecifiers); 7993 7994 // Determine if an expression is a string literal or constant string. 7995 // If this function returns false on the arguments to a function expecting a 7996 // format string, we will usually need to emit a warning. 7997 // True string literals are then checked by CheckFormatString. 7998 static StringLiteralCheckType 7999 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 8000 bool HasVAListArg, unsigned format_idx, 8001 unsigned firstDataArg, Sema::FormatStringType Type, 8002 Sema::VariadicCallType CallType, bool InFunctionCall, 8003 llvm::SmallBitVector &CheckedVarArgs, 8004 UncoveredArgHandler &UncoveredArg, 8005 llvm::APSInt Offset, 8006 bool IgnoreStringsWithoutSpecifiers = false) { 8007 if (S.isConstantEvaluated()) 8008 return SLCT_NotALiteral; 8009 tryAgain: 8010 assert(Offset.isSigned() && "invalid offset"); 8011 8012 if (E->isTypeDependent() || E->isValueDependent()) 8013 return SLCT_NotALiteral; 8014 8015 E = E->IgnoreParenCasts(); 8016 8017 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 8018 // Technically -Wformat-nonliteral does not warn about this case. 8019 // The behavior of printf and friends in this case is implementation 8020 // dependent. Ideally if the format string cannot be null then 8021 // it should have a 'nonnull' attribute in the function prototype. 8022 return SLCT_UncheckedLiteral; 8023 8024 switch (E->getStmtClass()) { 8025 case Stmt::BinaryConditionalOperatorClass: 8026 case Stmt::ConditionalOperatorClass: { 8027 // The expression is a literal if both sub-expressions were, and it was 8028 // completely checked only if both sub-expressions were checked. 8029 const AbstractConditionalOperator *C = 8030 cast<AbstractConditionalOperator>(E); 8031 8032 // Determine whether it is necessary to check both sub-expressions, for 8033 // example, because the condition expression is a constant that can be 8034 // evaluated at compile time. 8035 bool CheckLeft = true, CheckRight = true; 8036 8037 bool Cond; 8038 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(), 8039 S.isConstantEvaluated())) { 8040 if (Cond) 8041 CheckRight = false; 8042 else 8043 CheckLeft = false; 8044 } 8045 8046 // We need to maintain the offsets for the right and the left hand side 8047 // separately to check if every possible indexed expression is a valid 8048 // string literal. They might have different offsets for different string 8049 // literals in the end. 8050 StringLiteralCheckType Left; 8051 if (!CheckLeft) 8052 Left = SLCT_UncheckedLiteral; 8053 else { 8054 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, 8055 HasVAListArg, format_idx, firstDataArg, 8056 Type, CallType, InFunctionCall, 8057 CheckedVarArgs, UncoveredArg, Offset, 8058 IgnoreStringsWithoutSpecifiers); 8059 if (Left == SLCT_NotALiteral || !CheckRight) { 8060 return Left; 8061 } 8062 } 8063 8064 StringLiteralCheckType Right = checkFormatStringExpr( 8065 S, C->getFalseExpr(), Args, HasVAListArg, format_idx, firstDataArg, 8066 Type, CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8067 IgnoreStringsWithoutSpecifiers); 8068 8069 return (CheckLeft && Left < Right) ? Left : Right; 8070 } 8071 8072 case Stmt::ImplicitCastExprClass: 8073 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 8074 goto tryAgain; 8075 8076 case Stmt::OpaqueValueExprClass: 8077 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 8078 E = src; 8079 goto tryAgain; 8080 } 8081 return SLCT_NotALiteral; 8082 8083 case Stmt::PredefinedExprClass: 8084 // While __func__, etc., are technically not string literals, they 8085 // cannot contain format specifiers and thus are not a security 8086 // liability. 8087 return SLCT_UncheckedLiteral; 8088 8089 case Stmt::DeclRefExprClass: { 8090 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 8091 8092 // As an exception, do not flag errors for variables binding to 8093 // const string literals. 8094 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 8095 bool isConstant = false; 8096 QualType T = DR->getType(); 8097 8098 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 8099 isConstant = AT->getElementType().isConstant(S.Context); 8100 } else if (const PointerType *PT = T->getAs<PointerType>()) { 8101 isConstant = T.isConstant(S.Context) && 8102 PT->getPointeeType().isConstant(S.Context); 8103 } else if (T->isObjCObjectPointerType()) { 8104 // In ObjC, there is usually no "const ObjectPointer" type, 8105 // so don't check if the pointee type is constant. 8106 isConstant = T.isConstant(S.Context); 8107 } 8108 8109 if (isConstant) { 8110 if (const Expr *Init = VD->getAnyInitializer()) { 8111 // Look through initializers like const char c[] = { "foo" } 8112 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 8113 if (InitList->isStringLiteralInit()) 8114 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 8115 } 8116 return checkFormatStringExpr(S, Init, Args, 8117 HasVAListArg, format_idx, 8118 firstDataArg, Type, CallType, 8119 /*InFunctionCall*/ false, CheckedVarArgs, 8120 UncoveredArg, Offset); 8121 } 8122 } 8123 8124 // For vprintf* functions (i.e., HasVAListArg==true), we add a 8125 // special check to see if the format string is a function parameter 8126 // of the function calling the printf function. If the function 8127 // has an attribute indicating it is a printf-like function, then we 8128 // should suppress warnings concerning non-literals being used in a call 8129 // to a vprintf function. For example: 8130 // 8131 // void 8132 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){ 8133 // va_list ap; 8134 // va_start(ap, fmt); 8135 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt". 8136 // ... 8137 // } 8138 if (HasVAListArg) { 8139 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) { 8140 if (const Decl *D = dyn_cast<Decl>(PV->getDeclContext())) { 8141 int PVIndex = PV->getFunctionScopeIndex() + 1; 8142 for (const auto *PVFormat : D->specific_attrs<FormatAttr>()) { 8143 // adjust for implicit parameter 8144 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) 8145 if (MD->isInstance()) 8146 ++PVIndex; 8147 // We also check if the formats are compatible. 8148 // We can't pass a 'scanf' string to a 'printf' function. 8149 if (PVIndex == PVFormat->getFormatIdx() && 8150 Type == S.GetFormatStringType(PVFormat)) 8151 return SLCT_UncheckedLiteral; 8152 } 8153 } 8154 } 8155 } 8156 } 8157 8158 return SLCT_NotALiteral; 8159 } 8160 8161 case Stmt::CallExprClass: 8162 case Stmt::CXXMemberCallExprClass: { 8163 const CallExpr *CE = cast<CallExpr>(E); 8164 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 8165 bool IsFirst = true; 8166 StringLiteralCheckType CommonResult; 8167 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 8168 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 8169 StringLiteralCheckType Result = checkFormatStringExpr( 8170 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 8171 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8172 IgnoreStringsWithoutSpecifiers); 8173 if (IsFirst) { 8174 CommonResult = Result; 8175 IsFirst = false; 8176 } 8177 } 8178 if (!IsFirst) 8179 return CommonResult; 8180 8181 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 8182 unsigned BuiltinID = FD->getBuiltinID(); 8183 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 8184 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 8185 const Expr *Arg = CE->getArg(0); 8186 return checkFormatStringExpr(S, Arg, Args, 8187 HasVAListArg, format_idx, 8188 firstDataArg, Type, CallType, 8189 InFunctionCall, CheckedVarArgs, 8190 UncoveredArg, Offset, 8191 IgnoreStringsWithoutSpecifiers); 8192 } 8193 } 8194 } 8195 8196 return SLCT_NotALiteral; 8197 } 8198 case Stmt::ObjCMessageExprClass: { 8199 const auto *ME = cast<ObjCMessageExpr>(E); 8200 if (const auto *MD = ME->getMethodDecl()) { 8201 if (const auto *FA = MD->getAttr<FormatArgAttr>()) { 8202 // As a special case heuristic, if we're using the method -[NSBundle 8203 // localizedStringForKey:value:table:], ignore any key strings that lack 8204 // format specifiers. The idea is that if the key doesn't have any 8205 // format specifiers then its probably just a key to map to the 8206 // localized strings. If it does have format specifiers though, then its 8207 // likely that the text of the key is the format string in the 8208 // programmer's language, and should be checked. 8209 const ObjCInterfaceDecl *IFace; 8210 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) && 8211 IFace->getIdentifier()->isStr("NSBundle") && 8212 MD->getSelector().isKeywordSelector( 8213 {"localizedStringForKey", "value", "table"})) { 8214 IgnoreStringsWithoutSpecifiers = true; 8215 } 8216 8217 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 8218 return checkFormatStringExpr( 8219 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 8220 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8221 IgnoreStringsWithoutSpecifiers); 8222 } 8223 } 8224 8225 return SLCT_NotALiteral; 8226 } 8227 case Stmt::ObjCStringLiteralClass: 8228 case Stmt::StringLiteralClass: { 8229 const StringLiteral *StrE = nullptr; 8230 8231 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 8232 StrE = ObjCFExpr->getString(); 8233 else 8234 StrE = cast<StringLiteral>(E); 8235 8236 if (StrE) { 8237 if (Offset.isNegative() || Offset > StrE->getLength()) { 8238 // TODO: It would be better to have an explicit warning for out of 8239 // bounds literals. 8240 return SLCT_NotALiteral; 8241 } 8242 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 8243 CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx, 8244 firstDataArg, Type, InFunctionCall, CallType, 8245 CheckedVarArgs, UncoveredArg, 8246 IgnoreStringsWithoutSpecifiers); 8247 return SLCT_CheckedLiteral; 8248 } 8249 8250 return SLCT_NotALiteral; 8251 } 8252 case Stmt::BinaryOperatorClass: { 8253 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 8254 8255 // A string literal + an int offset is still a string literal. 8256 if (BinOp->isAdditiveOp()) { 8257 Expr::EvalResult LResult, RResult; 8258 8259 bool LIsInt = BinOp->getLHS()->EvaluateAsInt( 8260 LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 8261 bool RIsInt = BinOp->getRHS()->EvaluateAsInt( 8262 RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 8263 8264 if (LIsInt != RIsInt) { 8265 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 8266 8267 if (LIsInt) { 8268 if (BinOpKind == BO_Add) { 8269 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt); 8270 E = BinOp->getRHS(); 8271 goto tryAgain; 8272 } 8273 } else { 8274 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt); 8275 E = BinOp->getLHS(); 8276 goto tryAgain; 8277 } 8278 } 8279 } 8280 8281 return SLCT_NotALiteral; 8282 } 8283 case Stmt::UnaryOperatorClass: { 8284 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 8285 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 8286 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 8287 Expr::EvalResult IndexResult; 8288 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context, 8289 Expr::SE_NoSideEffects, 8290 S.isConstantEvaluated())) { 8291 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add, 8292 /*RHS is int*/ true); 8293 E = ASE->getBase(); 8294 goto tryAgain; 8295 } 8296 } 8297 8298 return SLCT_NotALiteral; 8299 } 8300 8301 default: 8302 return SLCT_NotALiteral; 8303 } 8304 } 8305 8306 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 8307 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 8308 .Case("scanf", FST_Scanf) 8309 .Cases("printf", "printf0", FST_Printf) 8310 .Cases("NSString", "CFString", FST_NSString) 8311 .Case("strftime", FST_Strftime) 8312 .Case("strfmon", FST_Strfmon) 8313 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 8314 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 8315 .Case("os_trace", FST_OSLog) 8316 .Case("os_log", FST_OSLog) 8317 .Default(FST_Unknown); 8318 } 8319 8320 /// CheckFormatArguments - Check calls to printf and scanf (and similar 8321 /// functions) for correct use of format strings. 8322 /// Returns true if a format string has been fully checked. 8323 bool Sema::CheckFormatArguments(const FormatAttr *Format, 8324 ArrayRef<const Expr *> Args, 8325 bool IsCXXMember, 8326 VariadicCallType CallType, 8327 SourceLocation Loc, SourceRange Range, 8328 llvm::SmallBitVector &CheckedVarArgs) { 8329 FormatStringInfo FSI; 8330 if (getFormatStringInfo(Format, IsCXXMember, &FSI)) 8331 return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx, 8332 FSI.FirstDataArg, GetFormatStringType(Format), 8333 CallType, Loc, Range, CheckedVarArgs); 8334 return false; 8335 } 8336 8337 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 8338 bool HasVAListArg, unsigned format_idx, 8339 unsigned firstDataArg, FormatStringType Type, 8340 VariadicCallType CallType, 8341 SourceLocation Loc, SourceRange Range, 8342 llvm::SmallBitVector &CheckedVarArgs) { 8343 // CHECK: printf/scanf-like function is called with no format string. 8344 if (format_idx >= Args.size()) { 8345 Diag(Loc, diag::warn_missing_format_string) << Range; 8346 return false; 8347 } 8348 8349 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 8350 8351 // CHECK: format string is not a string literal. 8352 // 8353 // Dynamically generated format strings are difficult to 8354 // automatically vet at compile time. Requiring that format strings 8355 // are string literals: (1) permits the checking of format strings by 8356 // the compiler and thereby (2) can practically remove the source of 8357 // many format string exploits. 8358 8359 // Format string can be either ObjC string (e.g. @"%d") or 8360 // C string (e.g. "%d") 8361 // ObjC string uses the same format specifiers as C string, so we can use 8362 // the same format string checking logic for both ObjC and C strings. 8363 UncoveredArgHandler UncoveredArg; 8364 StringLiteralCheckType CT = 8365 checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg, 8366 format_idx, firstDataArg, Type, CallType, 8367 /*IsFunctionCall*/ true, CheckedVarArgs, 8368 UncoveredArg, 8369 /*no string offset*/ llvm::APSInt(64, false) = 0); 8370 8371 // Generate a diagnostic where an uncovered argument is detected. 8372 if (UncoveredArg.hasUncoveredArg()) { 8373 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 8374 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 8375 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 8376 } 8377 8378 if (CT != SLCT_NotALiteral) 8379 // Literal format string found, check done! 8380 return CT == SLCT_CheckedLiteral; 8381 8382 // Strftime is particular as it always uses a single 'time' argument, 8383 // so it is safe to pass a non-literal string. 8384 if (Type == FST_Strftime) 8385 return false; 8386 8387 // Do not emit diag when the string param is a macro expansion and the 8388 // format is either NSString or CFString. This is a hack to prevent 8389 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 8390 // which are usually used in place of NS and CF string literals. 8391 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 8392 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 8393 return false; 8394 8395 // If there are no arguments specified, warn with -Wformat-security, otherwise 8396 // warn only with -Wformat-nonliteral. 8397 if (Args.size() == firstDataArg) { 8398 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 8399 << OrigFormatExpr->getSourceRange(); 8400 switch (Type) { 8401 default: 8402 break; 8403 case FST_Kprintf: 8404 case FST_FreeBSDKPrintf: 8405 case FST_Printf: 8406 Diag(FormatLoc, diag::note_format_security_fixit) 8407 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 8408 break; 8409 case FST_NSString: 8410 Diag(FormatLoc, diag::note_format_security_fixit) 8411 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 8412 break; 8413 } 8414 } else { 8415 Diag(FormatLoc, diag::warn_format_nonliteral) 8416 << OrigFormatExpr->getSourceRange(); 8417 } 8418 return false; 8419 } 8420 8421 namespace { 8422 8423 class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 8424 protected: 8425 Sema &S; 8426 const FormatStringLiteral *FExpr; 8427 const Expr *OrigFormatExpr; 8428 const Sema::FormatStringType FSType; 8429 const unsigned FirstDataArg; 8430 const unsigned NumDataArgs; 8431 const char *Beg; // Start of format string. 8432 const bool HasVAListArg; 8433 ArrayRef<const Expr *> Args; 8434 unsigned FormatIdx; 8435 llvm::SmallBitVector CoveredArgs; 8436 bool usesPositionalArgs = false; 8437 bool atFirstArg = true; 8438 bool inFunctionCall; 8439 Sema::VariadicCallType CallType; 8440 llvm::SmallBitVector &CheckedVarArgs; 8441 UncoveredArgHandler &UncoveredArg; 8442 8443 public: 8444 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 8445 const Expr *origFormatExpr, 8446 const Sema::FormatStringType type, unsigned firstDataArg, 8447 unsigned numDataArgs, const char *beg, bool hasVAListArg, 8448 ArrayRef<const Expr *> Args, unsigned formatIdx, 8449 bool inFunctionCall, Sema::VariadicCallType callType, 8450 llvm::SmallBitVector &CheckedVarArgs, 8451 UncoveredArgHandler &UncoveredArg) 8452 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 8453 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 8454 HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx), 8455 inFunctionCall(inFunctionCall), CallType(callType), 8456 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 8457 CoveredArgs.resize(numDataArgs); 8458 CoveredArgs.reset(); 8459 } 8460 8461 void DoneProcessing(); 8462 8463 void HandleIncompleteSpecifier(const char *startSpecifier, 8464 unsigned specifierLen) override; 8465 8466 void HandleInvalidLengthModifier( 8467 const analyze_format_string::FormatSpecifier &FS, 8468 const analyze_format_string::ConversionSpecifier &CS, 8469 const char *startSpecifier, unsigned specifierLen, 8470 unsigned DiagID); 8471 8472 void HandleNonStandardLengthModifier( 8473 const analyze_format_string::FormatSpecifier &FS, 8474 const char *startSpecifier, unsigned specifierLen); 8475 8476 void HandleNonStandardConversionSpecifier( 8477 const analyze_format_string::ConversionSpecifier &CS, 8478 const char *startSpecifier, unsigned specifierLen); 8479 8480 void HandlePosition(const char *startPos, unsigned posLen) override; 8481 8482 void HandleInvalidPosition(const char *startSpecifier, 8483 unsigned specifierLen, 8484 analyze_format_string::PositionContext p) override; 8485 8486 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 8487 8488 void HandleNullChar(const char *nullCharacter) override; 8489 8490 template <typename Range> 8491 static void 8492 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 8493 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 8494 bool IsStringLocation, Range StringRange, 8495 ArrayRef<FixItHint> Fixit = None); 8496 8497 protected: 8498 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 8499 const char *startSpec, 8500 unsigned specifierLen, 8501 const char *csStart, unsigned csLen); 8502 8503 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 8504 const char *startSpec, 8505 unsigned specifierLen); 8506 8507 SourceRange getFormatStringRange(); 8508 CharSourceRange getSpecifierRange(const char *startSpecifier, 8509 unsigned specifierLen); 8510 SourceLocation getLocationOfByte(const char *x); 8511 8512 const Expr *getDataArg(unsigned i) const; 8513 8514 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 8515 const analyze_format_string::ConversionSpecifier &CS, 8516 const char *startSpecifier, unsigned specifierLen, 8517 unsigned argIndex); 8518 8519 template <typename Range> 8520 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 8521 bool IsStringLocation, Range StringRange, 8522 ArrayRef<FixItHint> Fixit = None); 8523 }; 8524 8525 } // namespace 8526 8527 SourceRange CheckFormatHandler::getFormatStringRange() { 8528 return OrigFormatExpr->getSourceRange(); 8529 } 8530 8531 CharSourceRange CheckFormatHandler:: 8532 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 8533 SourceLocation Start = getLocationOfByte(startSpecifier); 8534 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 8535 8536 // Advance the end SourceLocation by one due to half-open ranges. 8537 End = End.getLocWithOffset(1); 8538 8539 return CharSourceRange::getCharRange(Start, End); 8540 } 8541 8542 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 8543 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 8544 S.getLangOpts(), S.Context.getTargetInfo()); 8545 } 8546 8547 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 8548 unsigned specifierLen){ 8549 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 8550 getLocationOfByte(startSpecifier), 8551 /*IsStringLocation*/true, 8552 getSpecifierRange(startSpecifier, specifierLen)); 8553 } 8554 8555 void CheckFormatHandler::HandleInvalidLengthModifier( 8556 const analyze_format_string::FormatSpecifier &FS, 8557 const analyze_format_string::ConversionSpecifier &CS, 8558 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 8559 using namespace analyze_format_string; 8560 8561 const LengthModifier &LM = FS.getLengthModifier(); 8562 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 8563 8564 // See if we know how to fix this length modifier. 8565 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 8566 if (FixedLM) { 8567 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 8568 getLocationOfByte(LM.getStart()), 8569 /*IsStringLocation*/true, 8570 getSpecifierRange(startSpecifier, specifierLen)); 8571 8572 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 8573 << FixedLM->toString() 8574 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 8575 8576 } else { 8577 FixItHint Hint; 8578 if (DiagID == diag::warn_format_nonsensical_length) 8579 Hint = FixItHint::CreateRemoval(LMRange); 8580 8581 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 8582 getLocationOfByte(LM.getStart()), 8583 /*IsStringLocation*/true, 8584 getSpecifierRange(startSpecifier, specifierLen), 8585 Hint); 8586 } 8587 } 8588 8589 void CheckFormatHandler::HandleNonStandardLengthModifier( 8590 const analyze_format_string::FormatSpecifier &FS, 8591 const char *startSpecifier, unsigned specifierLen) { 8592 using namespace analyze_format_string; 8593 8594 const LengthModifier &LM = FS.getLengthModifier(); 8595 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 8596 8597 // See if we know how to fix this length modifier. 8598 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 8599 if (FixedLM) { 8600 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8601 << LM.toString() << 0, 8602 getLocationOfByte(LM.getStart()), 8603 /*IsStringLocation*/true, 8604 getSpecifierRange(startSpecifier, specifierLen)); 8605 8606 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 8607 << FixedLM->toString() 8608 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 8609 8610 } else { 8611 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8612 << LM.toString() << 0, 8613 getLocationOfByte(LM.getStart()), 8614 /*IsStringLocation*/true, 8615 getSpecifierRange(startSpecifier, specifierLen)); 8616 } 8617 } 8618 8619 void CheckFormatHandler::HandleNonStandardConversionSpecifier( 8620 const analyze_format_string::ConversionSpecifier &CS, 8621 const char *startSpecifier, unsigned specifierLen) { 8622 using namespace analyze_format_string; 8623 8624 // See if we know how to fix this conversion specifier. 8625 Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 8626 if (FixedCS) { 8627 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8628 << CS.toString() << /*conversion specifier*/1, 8629 getLocationOfByte(CS.getStart()), 8630 /*IsStringLocation*/true, 8631 getSpecifierRange(startSpecifier, specifierLen)); 8632 8633 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 8634 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 8635 << FixedCS->toString() 8636 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 8637 } else { 8638 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8639 << CS.toString() << /*conversion specifier*/1, 8640 getLocationOfByte(CS.getStart()), 8641 /*IsStringLocation*/true, 8642 getSpecifierRange(startSpecifier, specifierLen)); 8643 } 8644 } 8645 8646 void CheckFormatHandler::HandlePosition(const char *startPos, 8647 unsigned posLen) { 8648 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 8649 getLocationOfByte(startPos), 8650 /*IsStringLocation*/true, 8651 getSpecifierRange(startPos, posLen)); 8652 } 8653 8654 void 8655 CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, 8656 analyze_format_string::PositionContext p) { 8657 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) 8658 << (unsigned) p, 8659 getLocationOfByte(startPos), /*IsStringLocation*/true, 8660 getSpecifierRange(startPos, posLen)); 8661 } 8662 8663 void CheckFormatHandler::HandleZeroPosition(const char *startPos, 8664 unsigned posLen) { 8665 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 8666 getLocationOfByte(startPos), 8667 /*IsStringLocation*/true, 8668 getSpecifierRange(startPos, posLen)); 8669 } 8670 8671 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 8672 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 8673 // The presence of a null character is likely an error. 8674 EmitFormatDiagnostic( 8675 S.PDiag(diag::warn_printf_format_string_contains_null_char), 8676 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 8677 getFormatStringRange()); 8678 } 8679 } 8680 8681 // Note that this may return NULL if there was an error parsing or building 8682 // one of the argument expressions. 8683 const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 8684 return Args[FirstDataArg + i]; 8685 } 8686 8687 void CheckFormatHandler::DoneProcessing() { 8688 // Does the number of data arguments exceed the number of 8689 // format conversions in the format string? 8690 if (!HasVAListArg) { 8691 // Find any arguments that weren't covered. 8692 CoveredArgs.flip(); 8693 signed notCoveredArg = CoveredArgs.find_first(); 8694 if (notCoveredArg >= 0) { 8695 assert((unsigned)notCoveredArg < NumDataArgs); 8696 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 8697 } else { 8698 UncoveredArg.setAllCovered(); 8699 } 8700 } 8701 } 8702 8703 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 8704 const Expr *ArgExpr) { 8705 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 && 8706 "Invalid state"); 8707 8708 if (!ArgExpr) 8709 return; 8710 8711 SourceLocation Loc = ArgExpr->getBeginLoc(); 8712 8713 if (S.getSourceManager().isInSystemMacro(Loc)) 8714 return; 8715 8716 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 8717 for (auto E : DiagnosticExprs) 8718 PDiag << E->getSourceRange(); 8719 8720 CheckFormatHandler::EmitFormatDiagnostic( 8721 S, IsFunctionCall, DiagnosticExprs[0], 8722 PDiag, Loc, /*IsStringLocation*/false, 8723 DiagnosticExprs[0]->getSourceRange()); 8724 } 8725 8726 bool 8727 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 8728 SourceLocation Loc, 8729 const char *startSpec, 8730 unsigned specifierLen, 8731 const char *csStart, 8732 unsigned csLen) { 8733 bool keepGoing = true; 8734 if (argIndex < NumDataArgs) { 8735 // Consider the argument coverered, even though the specifier doesn't 8736 // make sense. 8737 CoveredArgs.set(argIndex); 8738 } 8739 else { 8740 // If argIndex exceeds the number of data arguments we 8741 // don't issue a warning because that is just a cascade of warnings (and 8742 // they may have intended '%%' anyway). We don't want to continue processing 8743 // the format string after this point, however, as we will like just get 8744 // gibberish when trying to match arguments. 8745 keepGoing = false; 8746 } 8747 8748 StringRef Specifier(csStart, csLen); 8749 8750 // If the specifier in non-printable, it could be the first byte of a UTF-8 8751 // sequence. In that case, print the UTF-8 code point. If not, print the byte 8752 // hex value. 8753 std::string CodePointStr; 8754 if (!llvm::sys::locale::isPrint(*csStart)) { 8755 llvm::UTF32 CodePoint; 8756 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 8757 const llvm::UTF8 *E = 8758 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 8759 llvm::ConversionResult Result = 8760 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 8761 8762 if (Result != llvm::conversionOK) { 8763 unsigned char FirstChar = *csStart; 8764 CodePoint = (llvm::UTF32)FirstChar; 8765 } 8766 8767 llvm::raw_string_ostream OS(CodePointStr); 8768 if (CodePoint < 256) 8769 OS << "\\x" << llvm::format("%02x", CodePoint); 8770 else if (CodePoint <= 0xFFFF) 8771 OS << "\\u" << llvm::format("%04x", CodePoint); 8772 else 8773 OS << "\\U" << llvm::format("%08x", CodePoint); 8774 OS.flush(); 8775 Specifier = CodePointStr; 8776 } 8777 8778 EmitFormatDiagnostic( 8779 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 8780 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 8781 8782 return keepGoing; 8783 } 8784 8785 void 8786 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 8787 const char *startSpec, 8788 unsigned specifierLen) { 8789 EmitFormatDiagnostic( 8790 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 8791 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 8792 } 8793 8794 bool 8795 CheckFormatHandler::CheckNumArgs( 8796 const analyze_format_string::FormatSpecifier &FS, 8797 const analyze_format_string::ConversionSpecifier &CS, 8798 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 8799 8800 if (argIndex >= NumDataArgs) { 8801 PartialDiagnostic PDiag = FS.usesPositionalArg() 8802 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 8803 << (argIndex+1) << NumDataArgs) 8804 : S.PDiag(diag::warn_printf_insufficient_data_args); 8805 EmitFormatDiagnostic( 8806 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 8807 getSpecifierRange(startSpecifier, specifierLen)); 8808 8809 // Since more arguments than conversion tokens are given, by extension 8810 // all arguments are covered, so mark this as so. 8811 UncoveredArg.setAllCovered(); 8812 return false; 8813 } 8814 return true; 8815 } 8816 8817 template<typename Range> 8818 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 8819 SourceLocation Loc, 8820 bool IsStringLocation, 8821 Range StringRange, 8822 ArrayRef<FixItHint> FixIt) { 8823 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 8824 Loc, IsStringLocation, StringRange, FixIt); 8825 } 8826 8827 /// If the format string is not within the function call, emit a note 8828 /// so that the function call and string are in diagnostic messages. 8829 /// 8830 /// \param InFunctionCall if true, the format string is within the function 8831 /// call and only one diagnostic message will be produced. Otherwise, an 8832 /// extra note will be emitted pointing to location of the format string. 8833 /// 8834 /// \param ArgumentExpr the expression that is passed as the format string 8835 /// argument in the function call. Used for getting locations when two 8836 /// diagnostics are emitted. 8837 /// 8838 /// \param PDiag the callee should already have provided any strings for the 8839 /// diagnostic message. This function only adds locations and fixits 8840 /// to diagnostics. 8841 /// 8842 /// \param Loc primary location for diagnostic. If two diagnostics are 8843 /// required, one will be at Loc and a new SourceLocation will be created for 8844 /// the other one. 8845 /// 8846 /// \param IsStringLocation if true, Loc points to the format string should be 8847 /// used for the note. Otherwise, Loc points to the argument list and will 8848 /// be used with PDiag. 8849 /// 8850 /// \param StringRange some or all of the string to highlight. This is 8851 /// templated so it can accept either a CharSourceRange or a SourceRange. 8852 /// 8853 /// \param FixIt optional fix it hint for the format string. 8854 template <typename Range> 8855 void CheckFormatHandler::EmitFormatDiagnostic( 8856 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 8857 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 8858 Range StringRange, ArrayRef<FixItHint> FixIt) { 8859 if (InFunctionCall) { 8860 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 8861 D << StringRange; 8862 D << FixIt; 8863 } else { 8864 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 8865 << ArgumentExpr->getSourceRange(); 8866 8867 const Sema::SemaDiagnosticBuilder &Note = 8868 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 8869 diag::note_format_string_defined); 8870 8871 Note << StringRange; 8872 Note << FixIt; 8873 } 8874 } 8875 8876 //===--- CHECK: Printf format string checking ------------------------------===// 8877 8878 namespace { 8879 8880 class CheckPrintfHandler : public CheckFormatHandler { 8881 public: 8882 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 8883 const Expr *origFormatExpr, 8884 const Sema::FormatStringType type, unsigned firstDataArg, 8885 unsigned numDataArgs, bool isObjC, const char *beg, 8886 bool hasVAListArg, ArrayRef<const Expr *> Args, 8887 unsigned formatIdx, bool inFunctionCall, 8888 Sema::VariadicCallType CallType, 8889 llvm::SmallBitVector &CheckedVarArgs, 8890 UncoveredArgHandler &UncoveredArg) 8891 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 8892 numDataArgs, beg, hasVAListArg, Args, formatIdx, 8893 inFunctionCall, CallType, CheckedVarArgs, 8894 UncoveredArg) {} 8895 8896 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 8897 8898 /// Returns true if '%@' specifiers are allowed in the format string. 8899 bool allowsObjCArg() const { 8900 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 8901 FSType == Sema::FST_OSTrace; 8902 } 8903 8904 bool HandleInvalidPrintfConversionSpecifier( 8905 const analyze_printf::PrintfSpecifier &FS, 8906 const char *startSpecifier, 8907 unsigned specifierLen) override; 8908 8909 void handleInvalidMaskType(StringRef MaskType) override; 8910 8911 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 8912 const char *startSpecifier, 8913 unsigned specifierLen) override; 8914 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 8915 const char *StartSpecifier, 8916 unsigned SpecifierLen, 8917 const Expr *E); 8918 8919 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 8920 const char *startSpecifier, unsigned specifierLen); 8921 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 8922 const analyze_printf::OptionalAmount &Amt, 8923 unsigned type, 8924 const char *startSpecifier, unsigned specifierLen); 8925 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 8926 const analyze_printf::OptionalFlag &flag, 8927 const char *startSpecifier, unsigned specifierLen); 8928 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 8929 const analyze_printf::OptionalFlag &ignoredFlag, 8930 const analyze_printf::OptionalFlag &flag, 8931 const char *startSpecifier, unsigned specifierLen); 8932 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 8933 const Expr *E); 8934 8935 void HandleEmptyObjCModifierFlag(const char *startFlag, 8936 unsigned flagLen) override; 8937 8938 void HandleInvalidObjCModifierFlag(const char *startFlag, 8939 unsigned flagLen) override; 8940 8941 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 8942 const char *flagsEnd, 8943 const char *conversionPosition) 8944 override; 8945 }; 8946 8947 } // namespace 8948 8949 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 8950 const analyze_printf::PrintfSpecifier &FS, 8951 const char *startSpecifier, 8952 unsigned specifierLen) { 8953 const analyze_printf::PrintfConversionSpecifier &CS = 8954 FS.getConversionSpecifier(); 8955 8956 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 8957 getLocationOfByte(CS.getStart()), 8958 startSpecifier, specifierLen, 8959 CS.getStart(), CS.getLength()); 8960 } 8961 8962 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 8963 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 8964 } 8965 8966 bool CheckPrintfHandler::HandleAmount( 8967 const analyze_format_string::OptionalAmount &Amt, 8968 unsigned k, const char *startSpecifier, 8969 unsigned specifierLen) { 8970 if (Amt.hasDataArgument()) { 8971 if (!HasVAListArg) { 8972 unsigned argIndex = Amt.getArgIndex(); 8973 if (argIndex >= NumDataArgs) { 8974 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 8975 << k, 8976 getLocationOfByte(Amt.getStart()), 8977 /*IsStringLocation*/true, 8978 getSpecifierRange(startSpecifier, specifierLen)); 8979 // Don't do any more checking. We will just emit 8980 // spurious errors. 8981 return false; 8982 } 8983 8984 // Type check the data argument. It should be an 'int'. 8985 // Although not in conformance with C99, we also allow the argument to be 8986 // an 'unsigned int' as that is a reasonably safe case. GCC also 8987 // doesn't emit a warning for that case. 8988 CoveredArgs.set(argIndex); 8989 const Expr *Arg = getDataArg(argIndex); 8990 if (!Arg) 8991 return false; 8992 8993 QualType T = Arg->getType(); 8994 8995 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 8996 assert(AT.isValid()); 8997 8998 if (!AT.matchesType(S.Context, T)) { 8999 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 9000 << k << AT.getRepresentativeTypeName(S.Context) 9001 << T << Arg->getSourceRange(), 9002 getLocationOfByte(Amt.getStart()), 9003 /*IsStringLocation*/true, 9004 getSpecifierRange(startSpecifier, specifierLen)); 9005 // Don't do any more checking. We will just emit 9006 // spurious errors. 9007 return false; 9008 } 9009 } 9010 } 9011 return true; 9012 } 9013 9014 void CheckPrintfHandler::HandleInvalidAmount( 9015 const analyze_printf::PrintfSpecifier &FS, 9016 const analyze_printf::OptionalAmount &Amt, 9017 unsigned type, 9018 const char *startSpecifier, 9019 unsigned specifierLen) { 9020 const analyze_printf::PrintfConversionSpecifier &CS = 9021 FS.getConversionSpecifier(); 9022 9023 FixItHint fixit = 9024 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 9025 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 9026 Amt.getConstantLength())) 9027 : FixItHint(); 9028 9029 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 9030 << type << CS.toString(), 9031 getLocationOfByte(Amt.getStart()), 9032 /*IsStringLocation*/true, 9033 getSpecifierRange(startSpecifier, specifierLen), 9034 fixit); 9035 } 9036 9037 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 9038 const analyze_printf::OptionalFlag &flag, 9039 const char *startSpecifier, 9040 unsigned specifierLen) { 9041 // Warn about pointless flag with a fixit removal. 9042 const analyze_printf::PrintfConversionSpecifier &CS = 9043 FS.getConversionSpecifier(); 9044 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 9045 << flag.toString() << CS.toString(), 9046 getLocationOfByte(flag.getPosition()), 9047 /*IsStringLocation*/true, 9048 getSpecifierRange(startSpecifier, specifierLen), 9049 FixItHint::CreateRemoval( 9050 getSpecifierRange(flag.getPosition(), 1))); 9051 } 9052 9053 void CheckPrintfHandler::HandleIgnoredFlag( 9054 const analyze_printf::PrintfSpecifier &FS, 9055 const analyze_printf::OptionalFlag &ignoredFlag, 9056 const analyze_printf::OptionalFlag &flag, 9057 const char *startSpecifier, 9058 unsigned specifierLen) { 9059 // Warn about ignored flag with a fixit removal. 9060 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 9061 << ignoredFlag.toString() << flag.toString(), 9062 getLocationOfByte(ignoredFlag.getPosition()), 9063 /*IsStringLocation*/true, 9064 getSpecifierRange(startSpecifier, specifierLen), 9065 FixItHint::CreateRemoval( 9066 getSpecifierRange(ignoredFlag.getPosition(), 1))); 9067 } 9068 9069 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 9070 unsigned flagLen) { 9071 // Warn about an empty flag. 9072 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 9073 getLocationOfByte(startFlag), 9074 /*IsStringLocation*/true, 9075 getSpecifierRange(startFlag, flagLen)); 9076 } 9077 9078 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 9079 unsigned flagLen) { 9080 // Warn about an invalid flag. 9081 auto Range = getSpecifierRange(startFlag, flagLen); 9082 StringRef flag(startFlag, flagLen); 9083 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 9084 getLocationOfByte(startFlag), 9085 /*IsStringLocation*/true, 9086 Range, FixItHint::CreateRemoval(Range)); 9087 } 9088 9089 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 9090 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 9091 // Warn about using '[...]' without a '@' conversion. 9092 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 9093 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 9094 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 9095 getLocationOfByte(conversionPosition), 9096 /*IsStringLocation*/true, 9097 Range, FixItHint::CreateRemoval(Range)); 9098 } 9099 9100 // Determines if the specified is a C++ class or struct containing 9101 // a member with the specified name and kind (e.g. a CXXMethodDecl named 9102 // "c_str()"). 9103 template<typename MemberKind> 9104 static llvm::SmallPtrSet<MemberKind*, 1> 9105 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 9106 const RecordType *RT = Ty->getAs<RecordType>(); 9107 llvm::SmallPtrSet<MemberKind*, 1> Results; 9108 9109 if (!RT) 9110 return Results; 9111 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 9112 if (!RD || !RD->getDefinition()) 9113 return Results; 9114 9115 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 9116 Sema::LookupMemberName); 9117 R.suppressDiagnostics(); 9118 9119 // We just need to include all members of the right kind turned up by the 9120 // filter, at this point. 9121 if (S.LookupQualifiedName(R, RT->getDecl())) 9122 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 9123 NamedDecl *decl = (*I)->getUnderlyingDecl(); 9124 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 9125 Results.insert(FK); 9126 } 9127 return Results; 9128 } 9129 9130 /// Check if we could call '.c_str()' on an object. 9131 /// 9132 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 9133 /// allow the call, or if it would be ambiguous). 9134 bool Sema::hasCStrMethod(const Expr *E) { 9135 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 9136 9137 MethodSet Results = 9138 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 9139 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 9140 MI != ME; ++MI) 9141 if ((*MI)->getMinRequiredArguments() == 0) 9142 return true; 9143 return false; 9144 } 9145 9146 // Check if a (w)string was passed when a (w)char* was needed, and offer a 9147 // better diagnostic if so. AT is assumed to be valid. 9148 // Returns true when a c_str() conversion method is found. 9149 bool CheckPrintfHandler::checkForCStrMembers( 9150 const analyze_printf::ArgType &AT, const Expr *E) { 9151 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 9152 9153 MethodSet Results = 9154 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 9155 9156 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 9157 MI != ME; ++MI) { 9158 const CXXMethodDecl *Method = *MI; 9159 if (Method->getMinRequiredArguments() == 0 && 9160 AT.matchesType(S.Context, Method->getReturnType())) { 9161 // FIXME: Suggest parens if the expression needs them. 9162 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 9163 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 9164 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 9165 return true; 9166 } 9167 } 9168 9169 return false; 9170 } 9171 9172 bool 9173 CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier 9174 &FS, 9175 const char *startSpecifier, 9176 unsigned specifierLen) { 9177 using namespace analyze_format_string; 9178 using namespace analyze_printf; 9179 9180 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 9181 9182 if (FS.consumesDataArgument()) { 9183 if (atFirstArg) { 9184 atFirstArg = false; 9185 usesPositionalArgs = FS.usesPositionalArg(); 9186 } 9187 else if (usesPositionalArgs != FS.usesPositionalArg()) { 9188 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 9189 startSpecifier, specifierLen); 9190 return false; 9191 } 9192 } 9193 9194 // First check if the field width, precision, and conversion specifier 9195 // have matching data arguments. 9196 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 9197 startSpecifier, specifierLen)) { 9198 return false; 9199 } 9200 9201 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 9202 startSpecifier, specifierLen)) { 9203 return false; 9204 } 9205 9206 if (!CS.consumesDataArgument()) { 9207 // FIXME: Technically specifying a precision or field width here 9208 // makes no sense. Worth issuing a warning at some point. 9209 return true; 9210 } 9211 9212 // Consume the argument. 9213 unsigned argIndex = FS.getArgIndex(); 9214 if (argIndex < NumDataArgs) { 9215 // The check to see if the argIndex is valid will come later. 9216 // We set the bit here because we may exit early from this 9217 // function if we encounter some other error. 9218 CoveredArgs.set(argIndex); 9219 } 9220 9221 // FreeBSD kernel extensions. 9222 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 9223 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 9224 // We need at least two arguments. 9225 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 9226 return false; 9227 9228 // Claim the second argument. 9229 CoveredArgs.set(argIndex + 1); 9230 9231 // Type check the first argument (int for %b, pointer for %D) 9232 const Expr *Ex = getDataArg(argIndex); 9233 const analyze_printf::ArgType &AT = 9234 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? 9235 ArgType(S.Context.IntTy) : ArgType::CPointerTy; 9236 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 9237 EmitFormatDiagnostic( 9238 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 9239 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 9240 << false << Ex->getSourceRange(), 9241 Ex->getBeginLoc(), /*IsStringLocation*/ false, 9242 getSpecifierRange(startSpecifier, specifierLen)); 9243 9244 // Type check the second argument (char * for both %b and %D) 9245 Ex = getDataArg(argIndex + 1); 9246 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 9247 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 9248 EmitFormatDiagnostic( 9249 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 9250 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 9251 << false << Ex->getSourceRange(), 9252 Ex->getBeginLoc(), /*IsStringLocation*/ false, 9253 getSpecifierRange(startSpecifier, specifierLen)); 9254 9255 return true; 9256 } 9257 9258 // Check for using an Objective-C specific conversion specifier 9259 // in a non-ObjC literal. 9260 if (!allowsObjCArg() && CS.isObjCArg()) { 9261 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9262 specifierLen); 9263 } 9264 9265 // %P can only be used with os_log. 9266 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 9267 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9268 specifierLen); 9269 } 9270 9271 // %n is not allowed with os_log. 9272 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 9273 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 9274 getLocationOfByte(CS.getStart()), 9275 /*IsStringLocation*/ false, 9276 getSpecifierRange(startSpecifier, specifierLen)); 9277 9278 return true; 9279 } 9280 9281 // Only scalars are allowed for os_trace. 9282 if (FSType == Sema::FST_OSTrace && 9283 (CS.getKind() == ConversionSpecifier::PArg || 9284 CS.getKind() == ConversionSpecifier::sArg || 9285 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 9286 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9287 specifierLen); 9288 } 9289 9290 // Check for use of public/private annotation outside of os_log(). 9291 if (FSType != Sema::FST_OSLog) { 9292 if (FS.isPublic().isSet()) { 9293 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 9294 << "public", 9295 getLocationOfByte(FS.isPublic().getPosition()), 9296 /*IsStringLocation*/ false, 9297 getSpecifierRange(startSpecifier, specifierLen)); 9298 } 9299 if (FS.isPrivate().isSet()) { 9300 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 9301 << "private", 9302 getLocationOfByte(FS.isPrivate().getPosition()), 9303 /*IsStringLocation*/ false, 9304 getSpecifierRange(startSpecifier, specifierLen)); 9305 } 9306 } 9307 9308 // Check for invalid use of field width 9309 if (!FS.hasValidFieldWidth()) { 9310 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 9311 startSpecifier, specifierLen); 9312 } 9313 9314 // Check for invalid use of precision 9315 if (!FS.hasValidPrecision()) { 9316 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 9317 startSpecifier, specifierLen); 9318 } 9319 9320 // Precision is mandatory for %P specifier. 9321 if (CS.getKind() == ConversionSpecifier::PArg && 9322 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 9323 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 9324 getLocationOfByte(startSpecifier), 9325 /*IsStringLocation*/ false, 9326 getSpecifierRange(startSpecifier, specifierLen)); 9327 } 9328 9329 // Check each flag does not conflict with any other component. 9330 if (!FS.hasValidThousandsGroupingPrefix()) 9331 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 9332 if (!FS.hasValidLeadingZeros()) 9333 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 9334 if (!FS.hasValidPlusPrefix()) 9335 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 9336 if (!FS.hasValidSpacePrefix()) 9337 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 9338 if (!FS.hasValidAlternativeForm()) 9339 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 9340 if (!FS.hasValidLeftJustified()) 9341 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 9342 9343 // Check that flags are not ignored by another flag 9344 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 9345 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 9346 startSpecifier, specifierLen); 9347 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 9348 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 9349 startSpecifier, specifierLen); 9350 9351 // Check the length modifier is valid with the given conversion specifier. 9352 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 9353 S.getLangOpts())) 9354 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9355 diag::warn_format_nonsensical_length); 9356 else if (!FS.hasStandardLengthModifier()) 9357 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 9358 else if (!FS.hasStandardLengthConversionCombination()) 9359 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9360 diag::warn_format_non_standard_conversion_spec); 9361 9362 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 9363 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 9364 9365 // The remaining checks depend on the data arguments. 9366 if (HasVAListArg) 9367 return true; 9368 9369 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 9370 return false; 9371 9372 const Expr *Arg = getDataArg(argIndex); 9373 if (!Arg) 9374 return true; 9375 9376 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 9377 } 9378 9379 static bool requiresParensToAddCast(const Expr *E) { 9380 // FIXME: We should have a general way to reason about operator 9381 // precedence and whether parens are actually needed here. 9382 // Take care of a few common cases where they aren't. 9383 const Expr *Inside = E->IgnoreImpCasts(); 9384 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 9385 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 9386 9387 switch (Inside->getStmtClass()) { 9388 case Stmt::ArraySubscriptExprClass: 9389 case Stmt::CallExprClass: 9390 case Stmt::CharacterLiteralClass: 9391 case Stmt::CXXBoolLiteralExprClass: 9392 case Stmt::DeclRefExprClass: 9393 case Stmt::FloatingLiteralClass: 9394 case Stmt::IntegerLiteralClass: 9395 case Stmt::MemberExprClass: 9396 case Stmt::ObjCArrayLiteralClass: 9397 case Stmt::ObjCBoolLiteralExprClass: 9398 case Stmt::ObjCBoxedExprClass: 9399 case Stmt::ObjCDictionaryLiteralClass: 9400 case Stmt::ObjCEncodeExprClass: 9401 case Stmt::ObjCIvarRefExprClass: 9402 case Stmt::ObjCMessageExprClass: 9403 case Stmt::ObjCPropertyRefExprClass: 9404 case Stmt::ObjCStringLiteralClass: 9405 case Stmt::ObjCSubscriptRefExprClass: 9406 case Stmt::ParenExprClass: 9407 case Stmt::StringLiteralClass: 9408 case Stmt::UnaryOperatorClass: 9409 return false; 9410 default: 9411 return true; 9412 } 9413 } 9414 9415 static std::pair<QualType, StringRef> 9416 shouldNotPrintDirectly(const ASTContext &Context, 9417 QualType IntendedTy, 9418 const Expr *E) { 9419 // Use a 'while' to peel off layers of typedefs. 9420 QualType TyTy = IntendedTy; 9421 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 9422 StringRef Name = UserTy->getDecl()->getName(); 9423 QualType CastTy = llvm::StringSwitch<QualType>(Name) 9424 .Case("CFIndex", Context.getNSIntegerType()) 9425 .Case("NSInteger", Context.getNSIntegerType()) 9426 .Case("NSUInteger", Context.getNSUIntegerType()) 9427 .Case("SInt32", Context.IntTy) 9428 .Case("UInt32", Context.UnsignedIntTy) 9429 .Default(QualType()); 9430 9431 if (!CastTy.isNull()) 9432 return std::make_pair(CastTy, Name); 9433 9434 TyTy = UserTy->desugar(); 9435 } 9436 9437 // Strip parens if necessary. 9438 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 9439 return shouldNotPrintDirectly(Context, 9440 PE->getSubExpr()->getType(), 9441 PE->getSubExpr()); 9442 9443 // If this is a conditional expression, then its result type is constructed 9444 // via usual arithmetic conversions and thus there might be no necessary 9445 // typedef sugar there. Recurse to operands to check for NSInteger & 9446 // Co. usage condition. 9447 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 9448 QualType TrueTy, FalseTy; 9449 StringRef TrueName, FalseName; 9450 9451 std::tie(TrueTy, TrueName) = 9452 shouldNotPrintDirectly(Context, 9453 CO->getTrueExpr()->getType(), 9454 CO->getTrueExpr()); 9455 std::tie(FalseTy, FalseName) = 9456 shouldNotPrintDirectly(Context, 9457 CO->getFalseExpr()->getType(), 9458 CO->getFalseExpr()); 9459 9460 if (TrueTy == FalseTy) 9461 return std::make_pair(TrueTy, TrueName); 9462 else if (TrueTy.isNull()) 9463 return std::make_pair(FalseTy, FalseName); 9464 else if (FalseTy.isNull()) 9465 return std::make_pair(TrueTy, TrueName); 9466 } 9467 9468 return std::make_pair(QualType(), StringRef()); 9469 } 9470 9471 /// Return true if \p ICE is an implicit argument promotion of an arithmetic 9472 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked 9473 /// type do not count. 9474 static bool 9475 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { 9476 QualType From = ICE->getSubExpr()->getType(); 9477 QualType To = ICE->getType(); 9478 // It's an integer promotion if the destination type is the promoted 9479 // source type. 9480 if (ICE->getCastKind() == CK_IntegralCast && 9481 From->isPromotableIntegerType() && 9482 S.Context.getPromotedIntegerType(From) == To) 9483 return true; 9484 // Look through vector types, since we do default argument promotion for 9485 // those in OpenCL. 9486 if (const auto *VecTy = From->getAs<ExtVectorType>()) 9487 From = VecTy->getElementType(); 9488 if (const auto *VecTy = To->getAs<ExtVectorType>()) 9489 To = VecTy->getElementType(); 9490 // It's a floating promotion if the source type is a lower rank. 9491 return ICE->getCastKind() == CK_FloatingCast && 9492 S.Context.getFloatingTypeOrder(From, To) < 0; 9493 } 9494 9495 bool 9496 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 9497 const char *StartSpecifier, 9498 unsigned SpecifierLen, 9499 const Expr *E) { 9500 using namespace analyze_format_string; 9501 using namespace analyze_printf; 9502 9503 // Now type check the data expression that matches the 9504 // format specifier. 9505 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 9506 if (!AT.isValid()) 9507 return true; 9508 9509 QualType ExprTy = E->getType(); 9510 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 9511 ExprTy = TET->getUnderlyingExpr()->getType(); 9512 } 9513 9514 // Diagnose attempts to print a boolean value as a character. Unlike other 9515 // -Wformat diagnostics, this is fine from a type perspective, but it still 9516 // doesn't make sense. 9517 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg && 9518 E->isKnownToHaveBooleanValue()) { 9519 const CharSourceRange &CSR = 9520 getSpecifierRange(StartSpecifier, SpecifierLen); 9521 SmallString<4> FSString; 9522 llvm::raw_svector_ostream os(FSString); 9523 FS.toString(os); 9524 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character) 9525 << FSString, 9526 E->getExprLoc(), false, CSR); 9527 return true; 9528 } 9529 9530 analyze_printf::ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy); 9531 if (Match == analyze_printf::ArgType::Match) 9532 return true; 9533 9534 // Look through argument promotions for our error message's reported type. 9535 // This includes the integral and floating promotions, but excludes array 9536 // and function pointer decay (seeing that an argument intended to be a 9537 // string has type 'char [6]' is probably more confusing than 'char *') and 9538 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type). 9539 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 9540 if (isArithmeticArgumentPromotion(S, ICE)) { 9541 E = ICE->getSubExpr(); 9542 ExprTy = E->getType(); 9543 9544 // Check if we didn't match because of an implicit cast from a 'char' 9545 // or 'short' to an 'int'. This is done because printf is a varargs 9546 // function. 9547 if (ICE->getType() == S.Context.IntTy || 9548 ICE->getType() == S.Context.UnsignedIntTy) { 9549 // All further checking is done on the subexpression 9550 const analyze_printf::ArgType::MatchKind ImplicitMatch = 9551 AT.matchesType(S.Context, ExprTy); 9552 if (ImplicitMatch == analyze_printf::ArgType::Match) 9553 return true; 9554 if (ImplicitMatch == ArgType::NoMatchPedantic || 9555 ImplicitMatch == ArgType::NoMatchTypeConfusion) 9556 Match = ImplicitMatch; 9557 } 9558 } 9559 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 9560 // Special case for 'a', which has type 'int' in C. 9561 // Note, however, that we do /not/ want to treat multibyte constants like 9562 // 'MooV' as characters! This form is deprecated but still exists. In 9563 // addition, don't treat expressions as of type 'char' if one byte length 9564 // modifier is provided. 9565 if (ExprTy == S.Context.IntTy && 9566 FS.getLengthModifier().getKind() != LengthModifier::AsChar) 9567 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) 9568 ExprTy = S.Context.CharTy; 9569 } 9570 9571 // Look through enums to their underlying type. 9572 bool IsEnum = false; 9573 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 9574 ExprTy = EnumTy->getDecl()->getIntegerType(); 9575 IsEnum = true; 9576 } 9577 9578 // %C in an Objective-C context prints a unichar, not a wchar_t. 9579 // If the argument is an integer of some kind, believe the %C and suggest 9580 // a cast instead of changing the conversion specifier. 9581 QualType IntendedTy = ExprTy; 9582 if (isObjCContext() && 9583 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 9584 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 9585 !ExprTy->isCharType()) { 9586 // 'unichar' is defined as a typedef of unsigned short, but we should 9587 // prefer using the typedef if it is visible. 9588 IntendedTy = S.Context.UnsignedShortTy; 9589 9590 // While we are here, check if the value is an IntegerLiteral that happens 9591 // to be within the valid range. 9592 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 9593 const llvm::APInt &V = IL->getValue(); 9594 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 9595 return true; 9596 } 9597 9598 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 9599 Sema::LookupOrdinaryName); 9600 if (S.LookupName(Result, S.getCurScope())) { 9601 NamedDecl *ND = Result.getFoundDecl(); 9602 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 9603 if (TD->getUnderlyingType() == IntendedTy) 9604 IntendedTy = S.Context.getTypedefType(TD); 9605 } 9606 } 9607 } 9608 9609 // Special-case some of Darwin's platform-independence types by suggesting 9610 // casts to primitive types that are known to be large enough. 9611 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 9612 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 9613 QualType CastTy; 9614 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 9615 if (!CastTy.isNull()) { 9616 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 9617 // (long in ASTContext). Only complain to pedants. 9618 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 9619 (AT.isSizeT() || AT.isPtrdiffT()) && 9620 AT.matchesType(S.Context, CastTy)) 9621 Match = ArgType::NoMatchPedantic; 9622 IntendedTy = CastTy; 9623 ShouldNotPrintDirectly = true; 9624 } 9625 } 9626 9627 // We may be able to offer a FixItHint if it is a supported type. 9628 PrintfSpecifier fixedFS = FS; 9629 bool Success = 9630 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 9631 9632 if (Success) { 9633 // Get the fix string from the fixed format specifier 9634 SmallString<16> buf; 9635 llvm::raw_svector_ostream os(buf); 9636 fixedFS.toString(os); 9637 9638 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 9639 9640 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) { 9641 unsigned Diag; 9642 switch (Match) { 9643 case ArgType::Match: llvm_unreachable("expected non-matching"); 9644 case ArgType::NoMatchPedantic: 9645 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 9646 break; 9647 case ArgType::NoMatchTypeConfusion: 9648 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 9649 break; 9650 case ArgType::NoMatch: 9651 Diag = diag::warn_format_conversion_argument_type_mismatch; 9652 break; 9653 } 9654 9655 // In this case, the specifier is wrong and should be changed to match 9656 // the argument. 9657 EmitFormatDiagnostic(S.PDiag(Diag) 9658 << AT.getRepresentativeTypeName(S.Context) 9659 << IntendedTy << IsEnum << E->getSourceRange(), 9660 E->getBeginLoc(), 9661 /*IsStringLocation*/ false, SpecRange, 9662 FixItHint::CreateReplacement(SpecRange, os.str())); 9663 } else { 9664 // The canonical type for formatting this value is different from the 9665 // actual type of the expression. (This occurs, for example, with Darwin's 9666 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 9667 // should be printed as 'long' for 64-bit compatibility.) 9668 // Rather than emitting a normal format/argument mismatch, we want to 9669 // add a cast to the recommended type (and correct the format string 9670 // if necessary). 9671 SmallString<16> CastBuf; 9672 llvm::raw_svector_ostream CastFix(CastBuf); 9673 CastFix << "("; 9674 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 9675 CastFix << ")"; 9676 9677 SmallVector<FixItHint,4> Hints; 9678 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly) 9679 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 9680 9681 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 9682 // If there's already a cast present, just replace it. 9683 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 9684 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 9685 9686 } else if (!requiresParensToAddCast(E)) { 9687 // If the expression has high enough precedence, 9688 // just write the C-style cast. 9689 Hints.push_back( 9690 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 9691 } else { 9692 // Otherwise, add parens around the expression as well as the cast. 9693 CastFix << "("; 9694 Hints.push_back( 9695 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 9696 9697 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc()); 9698 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 9699 } 9700 9701 if (ShouldNotPrintDirectly) { 9702 // The expression has a type that should not be printed directly. 9703 // We extract the name from the typedef because we don't want to show 9704 // the underlying type in the diagnostic. 9705 StringRef Name; 9706 if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy)) 9707 Name = TypedefTy->getDecl()->getName(); 9708 else 9709 Name = CastTyName; 9710 unsigned Diag = Match == ArgType::NoMatchPedantic 9711 ? diag::warn_format_argument_needs_cast_pedantic 9712 : diag::warn_format_argument_needs_cast; 9713 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 9714 << E->getSourceRange(), 9715 E->getBeginLoc(), /*IsStringLocation=*/false, 9716 SpecRange, Hints); 9717 } else { 9718 // In this case, the expression could be printed using a different 9719 // specifier, but we've decided that the specifier is probably correct 9720 // and we should cast instead. Just use the normal warning message. 9721 EmitFormatDiagnostic( 9722 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 9723 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 9724 << E->getSourceRange(), 9725 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 9726 } 9727 } 9728 } else { 9729 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 9730 SpecifierLen); 9731 // Since the warning for passing non-POD types to variadic functions 9732 // was deferred until now, we emit a warning for non-POD 9733 // arguments here. 9734 switch (S.isValidVarArgType(ExprTy)) { 9735 case Sema::VAK_Valid: 9736 case Sema::VAK_ValidInCXX11: { 9737 unsigned Diag; 9738 switch (Match) { 9739 case ArgType::Match: llvm_unreachable("expected non-matching"); 9740 case ArgType::NoMatchPedantic: 9741 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 9742 break; 9743 case ArgType::NoMatchTypeConfusion: 9744 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 9745 break; 9746 case ArgType::NoMatch: 9747 Diag = diag::warn_format_conversion_argument_type_mismatch; 9748 break; 9749 } 9750 9751 EmitFormatDiagnostic( 9752 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 9753 << IsEnum << CSR << E->getSourceRange(), 9754 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 9755 break; 9756 } 9757 case Sema::VAK_Undefined: 9758 case Sema::VAK_MSVCUndefined: 9759 EmitFormatDiagnostic(S.PDiag(diag::warn_non_pod_vararg_with_format_string) 9760 << S.getLangOpts().CPlusPlus11 << ExprTy 9761 << CallType 9762 << AT.getRepresentativeTypeName(S.Context) << CSR 9763 << E->getSourceRange(), 9764 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 9765 checkForCStrMembers(AT, E); 9766 break; 9767 9768 case Sema::VAK_Invalid: 9769 if (ExprTy->isObjCObjectType()) 9770 EmitFormatDiagnostic( 9771 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 9772 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 9773 << AT.getRepresentativeTypeName(S.Context) << CSR 9774 << E->getSourceRange(), 9775 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 9776 else 9777 // FIXME: If this is an initializer list, suggest removing the braces 9778 // or inserting a cast to the target type. 9779 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 9780 << isa<InitListExpr>(E) << ExprTy << CallType 9781 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 9782 break; 9783 } 9784 9785 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 9786 "format string specifier index out of range"); 9787 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 9788 } 9789 9790 return true; 9791 } 9792 9793 //===--- CHECK: Scanf format string checking ------------------------------===// 9794 9795 namespace { 9796 9797 class CheckScanfHandler : public CheckFormatHandler { 9798 public: 9799 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 9800 const Expr *origFormatExpr, Sema::FormatStringType type, 9801 unsigned firstDataArg, unsigned numDataArgs, 9802 const char *beg, bool hasVAListArg, 9803 ArrayRef<const Expr *> Args, unsigned formatIdx, 9804 bool inFunctionCall, Sema::VariadicCallType CallType, 9805 llvm::SmallBitVector &CheckedVarArgs, 9806 UncoveredArgHandler &UncoveredArg) 9807 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 9808 numDataArgs, beg, hasVAListArg, Args, formatIdx, 9809 inFunctionCall, CallType, CheckedVarArgs, 9810 UncoveredArg) {} 9811 9812 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 9813 const char *startSpecifier, 9814 unsigned specifierLen) override; 9815 9816 bool HandleInvalidScanfConversionSpecifier( 9817 const analyze_scanf::ScanfSpecifier &FS, 9818 const char *startSpecifier, 9819 unsigned specifierLen) override; 9820 9821 void HandleIncompleteScanList(const char *start, const char *end) override; 9822 }; 9823 9824 } // namespace 9825 9826 void CheckScanfHandler::HandleIncompleteScanList(const char *start, 9827 const char *end) { 9828 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 9829 getLocationOfByte(end), /*IsStringLocation*/true, 9830 getSpecifierRange(start, end - start)); 9831 } 9832 9833 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 9834 const analyze_scanf::ScanfSpecifier &FS, 9835 const char *startSpecifier, 9836 unsigned specifierLen) { 9837 const analyze_scanf::ScanfConversionSpecifier &CS = 9838 FS.getConversionSpecifier(); 9839 9840 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 9841 getLocationOfByte(CS.getStart()), 9842 startSpecifier, specifierLen, 9843 CS.getStart(), CS.getLength()); 9844 } 9845 9846 bool CheckScanfHandler::HandleScanfSpecifier( 9847 const analyze_scanf::ScanfSpecifier &FS, 9848 const char *startSpecifier, 9849 unsigned specifierLen) { 9850 using namespace analyze_scanf; 9851 using namespace analyze_format_string; 9852 9853 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 9854 9855 // Handle case where '%' and '*' don't consume an argument. These shouldn't 9856 // be used to decide if we are using positional arguments consistently. 9857 if (FS.consumesDataArgument()) { 9858 if (atFirstArg) { 9859 atFirstArg = false; 9860 usesPositionalArgs = FS.usesPositionalArg(); 9861 } 9862 else if (usesPositionalArgs != FS.usesPositionalArg()) { 9863 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 9864 startSpecifier, specifierLen); 9865 return false; 9866 } 9867 } 9868 9869 // Check if the field with is non-zero. 9870 const OptionalAmount &Amt = FS.getFieldWidth(); 9871 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 9872 if (Amt.getConstantAmount() == 0) { 9873 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 9874 Amt.getConstantLength()); 9875 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 9876 getLocationOfByte(Amt.getStart()), 9877 /*IsStringLocation*/true, R, 9878 FixItHint::CreateRemoval(R)); 9879 } 9880 } 9881 9882 if (!FS.consumesDataArgument()) { 9883 // FIXME: Technically specifying a precision or field width here 9884 // makes no sense. Worth issuing a warning at some point. 9885 return true; 9886 } 9887 9888 // Consume the argument. 9889 unsigned argIndex = FS.getArgIndex(); 9890 if (argIndex < NumDataArgs) { 9891 // The check to see if the argIndex is valid will come later. 9892 // We set the bit here because we may exit early from this 9893 // function if we encounter some other error. 9894 CoveredArgs.set(argIndex); 9895 } 9896 9897 // Check the length modifier is valid with the given conversion specifier. 9898 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 9899 S.getLangOpts())) 9900 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9901 diag::warn_format_nonsensical_length); 9902 else if (!FS.hasStandardLengthModifier()) 9903 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 9904 else if (!FS.hasStandardLengthConversionCombination()) 9905 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9906 diag::warn_format_non_standard_conversion_spec); 9907 9908 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 9909 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 9910 9911 // The remaining checks depend on the data arguments. 9912 if (HasVAListArg) 9913 return true; 9914 9915 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 9916 return false; 9917 9918 // Check that the argument type matches the format specifier. 9919 const Expr *Ex = getDataArg(argIndex); 9920 if (!Ex) 9921 return true; 9922 9923 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 9924 9925 if (!AT.isValid()) { 9926 return true; 9927 } 9928 9929 analyze_format_string::ArgType::MatchKind Match = 9930 AT.matchesType(S.Context, Ex->getType()); 9931 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 9932 if (Match == analyze_format_string::ArgType::Match) 9933 return true; 9934 9935 ScanfSpecifier fixedFS = FS; 9936 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 9937 S.getLangOpts(), S.Context); 9938 9939 unsigned Diag = 9940 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 9941 : diag::warn_format_conversion_argument_type_mismatch; 9942 9943 if (Success) { 9944 // Get the fix string from the fixed format specifier. 9945 SmallString<128> buf; 9946 llvm::raw_svector_ostream os(buf); 9947 fixedFS.toString(os); 9948 9949 EmitFormatDiagnostic( 9950 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 9951 << Ex->getType() << false << Ex->getSourceRange(), 9952 Ex->getBeginLoc(), 9953 /*IsStringLocation*/ false, 9954 getSpecifierRange(startSpecifier, specifierLen), 9955 FixItHint::CreateReplacement( 9956 getSpecifierRange(startSpecifier, specifierLen), os.str())); 9957 } else { 9958 EmitFormatDiagnostic(S.PDiag(Diag) 9959 << AT.getRepresentativeTypeName(S.Context) 9960 << Ex->getType() << false << Ex->getSourceRange(), 9961 Ex->getBeginLoc(), 9962 /*IsStringLocation*/ false, 9963 getSpecifierRange(startSpecifier, specifierLen)); 9964 } 9965 9966 return true; 9967 } 9968 9969 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 9970 const Expr *OrigFormatExpr, 9971 ArrayRef<const Expr *> Args, 9972 bool HasVAListArg, unsigned format_idx, 9973 unsigned firstDataArg, 9974 Sema::FormatStringType Type, 9975 bool inFunctionCall, 9976 Sema::VariadicCallType CallType, 9977 llvm::SmallBitVector &CheckedVarArgs, 9978 UncoveredArgHandler &UncoveredArg, 9979 bool IgnoreStringsWithoutSpecifiers) { 9980 // CHECK: is the format string a wide literal? 9981 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 9982 CheckFormatHandler::EmitFormatDiagnostic( 9983 S, inFunctionCall, Args[format_idx], 9984 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 9985 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 9986 return; 9987 } 9988 9989 // Str - The format string. NOTE: this is NOT null-terminated! 9990 StringRef StrRef = FExpr->getString(); 9991 const char *Str = StrRef.data(); 9992 // Account for cases where the string literal is truncated in a declaration. 9993 const ConstantArrayType *T = 9994 S.Context.getAsConstantArrayType(FExpr->getType()); 9995 assert(T && "String literal not of constant array type!"); 9996 size_t TypeSize = T->getSize().getZExtValue(); 9997 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 9998 const unsigned numDataArgs = Args.size() - firstDataArg; 9999 10000 if (IgnoreStringsWithoutSpecifiers && 10001 !analyze_format_string::parseFormatStringHasFormattingSpecifiers( 10002 Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 10003 return; 10004 10005 // Emit a warning if the string literal is truncated and does not contain an 10006 // embedded null character. 10007 if (TypeSize <= StrRef.size() && !StrRef.substr(0, TypeSize).contains('\0')) { 10008 CheckFormatHandler::EmitFormatDiagnostic( 10009 S, inFunctionCall, Args[format_idx], 10010 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 10011 FExpr->getBeginLoc(), 10012 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 10013 return; 10014 } 10015 10016 // CHECK: empty format string? 10017 if (StrLen == 0 && numDataArgs > 0) { 10018 CheckFormatHandler::EmitFormatDiagnostic( 10019 S, inFunctionCall, Args[format_idx], 10020 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 10021 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 10022 return; 10023 } 10024 10025 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 10026 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog || 10027 Type == Sema::FST_OSTrace) { 10028 CheckPrintfHandler H( 10029 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 10030 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, 10031 HasVAListArg, Args, format_idx, inFunctionCall, CallType, 10032 CheckedVarArgs, UncoveredArg); 10033 10034 if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen, 10035 S.getLangOpts(), 10036 S.Context.getTargetInfo(), 10037 Type == Sema::FST_FreeBSDKPrintf)) 10038 H.DoneProcessing(); 10039 } else if (Type == Sema::FST_Scanf) { 10040 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 10041 numDataArgs, Str, HasVAListArg, Args, format_idx, 10042 inFunctionCall, CallType, CheckedVarArgs, UncoveredArg); 10043 10044 if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen, 10045 S.getLangOpts(), 10046 S.Context.getTargetInfo())) 10047 H.DoneProcessing(); 10048 } // TODO: handle other formats 10049 } 10050 10051 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 10052 // Str - The format string. NOTE: this is NOT null-terminated! 10053 StringRef StrRef = FExpr->getString(); 10054 const char *Str = StrRef.data(); 10055 // Account for cases where the string literal is truncated in a declaration. 10056 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 10057 assert(T && "String literal not of constant array type!"); 10058 size_t TypeSize = T->getSize().getZExtValue(); 10059 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 10060 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 10061 getLangOpts(), 10062 Context.getTargetInfo()); 10063 } 10064 10065 //===--- CHECK: Warn on use of wrong absolute value function. -------------===// 10066 10067 // Returns the related absolute value function that is larger, of 0 if one 10068 // does not exist. 10069 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 10070 switch (AbsFunction) { 10071 default: 10072 return 0; 10073 10074 case Builtin::BI__builtin_abs: 10075 return Builtin::BI__builtin_labs; 10076 case Builtin::BI__builtin_labs: 10077 return Builtin::BI__builtin_llabs; 10078 case Builtin::BI__builtin_llabs: 10079 return 0; 10080 10081 case Builtin::BI__builtin_fabsf: 10082 return Builtin::BI__builtin_fabs; 10083 case Builtin::BI__builtin_fabs: 10084 return Builtin::BI__builtin_fabsl; 10085 case Builtin::BI__builtin_fabsl: 10086 return 0; 10087 10088 case Builtin::BI__builtin_cabsf: 10089 return Builtin::BI__builtin_cabs; 10090 case Builtin::BI__builtin_cabs: 10091 return Builtin::BI__builtin_cabsl; 10092 case Builtin::BI__builtin_cabsl: 10093 return 0; 10094 10095 case Builtin::BIabs: 10096 return Builtin::BIlabs; 10097 case Builtin::BIlabs: 10098 return Builtin::BIllabs; 10099 case Builtin::BIllabs: 10100 return 0; 10101 10102 case Builtin::BIfabsf: 10103 return Builtin::BIfabs; 10104 case Builtin::BIfabs: 10105 return Builtin::BIfabsl; 10106 case Builtin::BIfabsl: 10107 return 0; 10108 10109 case Builtin::BIcabsf: 10110 return Builtin::BIcabs; 10111 case Builtin::BIcabs: 10112 return Builtin::BIcabsl; 10113 case Builtin::BIcabsl: 10114 return 0; 10115 } 10116 } 10117 10118 // Returns the argument type of the absolute value function. 10119 static QualType getAbsoluteValueArgumentType(ASTContext &Context, 10120 unsigned AbsType) { 10121 if (AbsType == 0) 10122 return QualType(); 10123 10124 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 10125 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 10126 if (Error != ASTContext::GE_None) 10127 return QualType(); 10128 10129 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 10130 if (!FT) 10131 return QualType(); 10132 10133 if (FT->getNumParams() != 1) 10134 return QualType(); 10135 10136 return FT->getParamType(0); 10137 } 10138 10139 // Returns the best absolute value function, or zero, based on type and 10140 // current absolute value function. 10141 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 10142 unsigned AbsFunctionKind) { 10143 unsigned BestKind = 0; 10144 uint64_t ArgSize = Context.getTypeSize(ArgType); 10145 for (unsigned Kind = AbsFunctionKind; Kind != 0; 10146 Kind = getLargerAbsoluteValueFunction(Kind)) { 10147 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 10148 if (Context.getTypeSize(ParamType) >= ArgSize) { 10149 if (BestKind == 0) 10150 BestKind = Kind; 10151 else if (Context.hasSameType(ParamType, ArgType)) { 10152 BestKind = Kind; 10153 break; 10154 } 10155 } 10156 } 10157 return BestKind; 10158 } 10159 10160 enum AbsoluteValueKind { 10161 AVK_Integer, 10162 AVK_Floating, 10163 AVK_Complex 10164 }; 10165 10166 static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 10167 if (T->isIntegralOrEnumerationType()) 10168 return AVK_Integer; 10169 if (T->isRealFloatingType()) 10170 return AVK_Floating; 10171 if (T->isAnyComplexType()) 10172 return AVK_Complex; 10173 10174 llvm_unreachable("Type not integer, floating, or complex"); 10175 } 10176 10177 // Changes the absolute value function to a different type. Preserves whether 10178 // the function is a builtin. 10179 static unsigned changeAbsFunction(unsigned AbsKind, 10180 AbsoluteValueKind ValueKind) { 10181 switch (ValueKind) { 10182 case AVK_Integer: 10183 switch (AbsKind) { 10184 default: 10185 return 0; 10186 case Builtin::BI__builtin_fabsf: 10187 case Builtin::BI__builtin_fabs: 10188 case Builtin::BI__builtin_fabsl: 10189 case Builtin::BI__builtin_cabsf: 10190 case Builtin::BI__builtin_cabs: 10191 case Builtin::BI__builtin_cabsl: 10192 return Builtin::BI__builtin_abs; 10193 case Builtin::BIfabsf: 10194 case Builtin::BIfabs: 10195 case Builtin::BIfabsl: 10196 case Builtin::BIcabsf: 10197 case Builtin::BIcabs: 10198 case Builtin::BIcabsl: 10199 return Builtin::BIabs; 10200 } 10201 case AVK_Floating: 10202 switch (AbsKind) { 10203 default: 10204 return 0; 10205 case Builtin::BI__builtin_abs: 10206 case Builtin::BI__builtin_labs: 10207 case Builtin::BI__builtin_llabs: 10208 case Builtin::BI__builtin_cabsf: 10209 case Builtin::BI__builtin_cabs: 10210 case Builtin::BI__builtin_cabsl: 10211 return Builtin::BI__builtin_fabsf; 10212 case Builtin::BIabs: 10213 case Builtin::BIlabs: 10214 case Builtin::BIllabs: 10215 case Builtin::BIcabsf: 10216 case Builtin::BIcabs: 10217 case Builtin::BIcabsl: 10218 return Builtin::BIfabsf; 10219 } 10220 case AVK_Complex: 10221 switch (AbsKind) { 10222 default: 10223 return 0; 10224 case Builtin::BI__builtin_abs: 10225 case Builtin::BI__builtin_labs: 10226 case Builtin::BI__builtin_llabs: 10227 case Builtin::BI__builtin_fabsf: 10228 case Builtin::BI__builtin_fabs: 10229 case Builtin::BI__builtin_fabsl: 10230 return Builtin::BI__builtin_cabsf; 10231 case Builtin::BIabs: 10232 case Builtin::BIlabs: 10233 case Builtin::BIllabs: 10234 case Builtin::BIfabsf: 10235 case Builtin::BIfabs: 10236 case Builtin::BIfabsl: 10237 return Builtin::BIcabsf; 10238 } 10239 } 10240 llvm_unreachable("Unable to convert function"); 10241 } 10242 10243 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 10244 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 10245 if (!FnInfo) 10246 return 0; 10247 10248 switch (FDecl->getBuiltinID()) { 10249 default: 10250 return 0; 10251 case Builtin::BI__builtin_abs: 10252 case Builtin::BI__builtin_fabs: 10253 case Builtin::BI__builtin_fabsf: 10254 case Builtin::BI__builtin_fabsl: 10255 case Builtin::BI__builtin_labs: 10256 case Builtin::BI__builtin_llabs: 10257 case Builtin::BI__builtin_cabs: 10258 case Builtin::BI__builtin_cabsf: 10259 case Builtin::BI__builtin_cabsl: 10260 case Builtin::BIabs: 10261 case Builtin::BIlabs: 10262 case Builtin::BIllabs: 10263 case Builtin::BIfabs: 10264 case Builtin::BIfabsf: 10265 case Builtin::BIfabsl: 10266 case Builtin::BIcabs: 10267 case Builtin::BIcabsf: 10268 case Builtin::BIcabsl: 10269 return FDecl->getBuiltinID(); 10270 } 10271 llvm_unreachable("Unknown Builtin type"); 10272 } 10273 10274 // If the replacement is valid, emit a note with replacement function. 10275 // Additionally, suggest including the proper header if not already included. 10276 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 10277 unsigned AbsKind, QualType ArgType) { 10278 bool EmitHeaderHint = true; 10279 const char *HeaderName = nullptr; 10280 const char *FunctionName = nullptr; 10281 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 10282 FunctionName = "std::abs"; 10283 if (ArgType->isIntegralOrEnumerationType()) { 10284 HeaderName = "cstdlib"; 10285 } else if (ArgType->isRealFloatingType()) { 10286 HeaderName = "cmath"; 10287 } else { 10288 llvm_unreachable("Invalid Type"); 10289 } 10290 10291 // Lookup all std::abs 10292 if (NamespaceDecl *Std = S.getStdNamespace()) { 10293 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 10294 R.suppressDiagnostics(); 10295 S.LookupQualifiedName(R, Std); 10296 10297 for (const auto *I : R) { 10298 const FunctionDecl *FDecl = nullptr; 10299 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 10300 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 10301 } else { 10302 FDecl = dyn_cast<FunctionDecl>(I); 10303 } 10304 if (!FDecl) 10305 continue; 10306 10307 // Found std::abs(), check that they are the right ones. 10308 if (FDecl->getNumParams() != 1) 10309 continue; 10310 10311 // Check that the parameter type can handle the argument. 10312 QualType ParamType = FDecl->getParamDecl(0)->getType(); 10313 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 10314 S.Context.getTypeSize(ArgType) <= 10315 S.Context.getTypeSize(ParamType)) { 10316 // Found a function, don't need the header hint. 10317 EmitHeaderHint = false; 10318 break; 10319 } 10320 } 10321 } 10322 } else { 10323 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 10324 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 10325 10326 if (HeaderName) { 10327 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 10328 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 10329 R.suppressDiagnostics(); 10330 S.LookupName(R, S.getCurScope()); 10331 10332 if (R.isSingleResult()) { 10333 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 10334 if (FD && FD->getBuiltinID() == AbsKind) { 10335 EmitHeaderHint = false; 10336 } else { 10337 return; 10338 } 10339 } else if (!R.empty()) { 10340 return; 10341 } 10342 } 10343 } 10344 10345 S.Diag(Loc, diag::note_replace_abs_function) 10346 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 10347 10348 if (!HeaderName) 10349 return; 10350 10351 if (!EmitHeaderHint) 10352 return; 10353 10354 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 10355 << FunctionName; 10356 } 10357 10358 template <std::size_t StrLen> 10359 static bool IsStdFunction(const FunctionDecl *FDecl, 10360 const char (&Str)[StrLen]) { 10361 if (!FDecl) 10362 return false; 10363 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 10364 return false; 10365 if (!FDecl->isInStdNamespace()) 10366 return false; 10367 10368 return true; 10369 } 10370 10371 // Warn when using the wrong abs() function. 10372 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 10373 const FunctionDecl *FDecl) { 10374 if (Call->getNumArgs() != 1) 10375 return; 10376 10377 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 10378 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 10379 if (AbsKind == 0 && !IsStdAbs) 10380 return; 10381 10382 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 10383 QualType ParamType = Call->getArg(0)->getType(); 10384 10385 // Unsigned types cannot be negative. Suggest removing the absolute value 10386 // function call. 10387 if (ArgType->isUnsignedIntegerType()) { 10388 const char *FunctionName = 10389 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 10390 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 10391 Diag(Call->getExprLoc(), diag::note_remove_abs) 10392 << FunctionName 10393 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 10394 return; 10395 } 10396 10397 // Taking the absolute value of a pointer is very suspicious, they probably 10398 // wanted to index into an array, dereference a pointer, call a function, etc. 10399 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 10400 unsigned DiagType = 0; 10401 if (ArgType->isFunctionType()) 10402 DiagType = 1; 10403 else if (ArgType->isArrayType()) 10404 DiagType = 2; 10405 10406 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 10407 return; 10408 } 10409 10410 // std::abs has overloads which prevent most of the absolute value problems 10411 // from occurring. 10412 if (IsStdAbs) 10413 return; 10414 10415 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 10416 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 10417 10418 // The argument and parameter are the same kind. Check if they are the right 10419 // size. 10420 if (ArgValueKind == ParamValueKind) { 10421 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 10422 return; 10423 10424 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 10425 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 10426 << FDecl << ArgType << ParamType; 10427 10428 if (NewAbsKind == 0) 10429 return; 10430 10431 emitReplacement(*this, Call->getExprLoc(), 10432 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 10433 return; 10434 } 10435 10436 // ArgValueKind != ParamValueKind 10437 // The wrong type of absolute value function was used. Attempt to find the 10438 // proper one. 10439 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 10440 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 10441 if (NewAbsKind == 0) 10442 return; 10443 10444 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 10445 << FDecl << ParamValueKind << ArgValueKind; 10446 10447 emitReplacement(*this, Call->getExprLoc(), 10448 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 10449 } 10450 10451 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 10452 void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 10453 const FunctionDecl *FDecl) { 10454 if (!Call || !FDecl) return; 10455 10456 // Ignore template specializations and macros. 10457 if (inTemplateInstantiation()) return; 10458 if (Call->getExprLoc().isMacroID()) return; 10459 10460 // Only care about the one template argument, two function parameter std::max 10461 if (Call->getNumArgs() != 2) return; 10462 if (!IsStdFunction(FDecl, "max")) return; 10463 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 10464 if (!ArgList) return; 10465 if (ArgList->size() != 1) return; 10466 10467 // Check that template type argument is unsigned integer. 10468 const auto& TA = ArgList->get(0); 10469 if (TA.getKind() != TemplateArgument::Type) return; 10470 QualType ArgType = TA.getAsType(); 10471 if (!ArgType->isUnsignedIntegerType()) return; 10472 10473 // See if either argument is a literal zero. 10474 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 10475 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 10476 if (!MTE) return false; 10477 const auto *Num = dyn_cast<IntegerLiteral>(MTE->getSubExpr()); 10478 if (!Num) return false; 10479 if (Num->getValue() != 0) return false; 10480 return true; 10481 }; 10482 10483 const Expr *FirstArg = Call->getArg(0); 10484 const Expr *SecondArg = Call->getArg(1); 10485 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 10486 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 10487 10488 // Only warn when exactly one argument is zero. 10489 if (IsFirstArgZero == IsSecondArgZero) return; 10490 10491 SourceRange FirstRange = FirstArg->getSourceRange(); 10492 SourceRange SecondRange = SecondArg->getSourceRange(); 10493 10494 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 10495 10496 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 10497 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 10498 10499 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 10500 SourceRange RemovalRange; 10501 if (IsFirstArgZero) { 10502 RemovalRange = SourceRange(FirstRange.getBegin(), 10503 SecondRange.getBegin().getLocWithOffset(-1)); 10504 } else { 10505 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 10506 SecondRange.getEnd()); 10507 } 10508 10509 Diag(Call->getExprLoc(), diag::note_remove_max_call) 10510 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 10511 << FixItHint::CreateRemoval(RemovalRange); 10512 } 10513 10514 //===--- CHECK: Standard memory functions ---------------------------------===// 10515 10516 /// Takes the expression passed to the size_t parameter of functions 10517 /// such as memcmp, strncat, etc and warns if it's a comparison. 10518 /// 10519 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 10520 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 10521 IdentifierInfo *FnName, 10522 SourceLocation FnLoc, 10523 SourceLocation RParenLoc) { 10524 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 10525 if (!Size) 10526 return false; 10527 10528 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 10529 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 10530 return false; 10531 10532 SourceRange SizeRange = Size->getSourceRange(); 10533 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 10534 << SizeRange << FnName; 10535 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 10536 << FnName 10537 << FixItHint::CreateInsertion( 10538 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 10539 << FixItHint::CreateRemoval(RParenLoc); 10540 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 10541 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 10542 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 10543 ")"); 10544 10545 return true; 10546 } 10547 10548 /// Determine whether the given type is or contains a dynamic class type 10549 /// (e.g., whether it has a vtable). 10550 static const CXXRecordDecl *getContainedDynamicClass(QualType T, 10551 bool &IsContained) { 10552 // Look through array types while ignoring qualifiers. 10553 const Type *Ty = T->getBaseElementTypeUnsafe(); 10554 IsContained = false; 10555 10556 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 10557 RD = RD ? RD->getDefinition() : nullptr; 10558 if (!RD || RD->isInvalidDecl()) 10559 return nullptr; 10560 10561 if (RD->isDynamicClass()) 10562 return RD; 10563 10564 // Check all the fields. If any bases were dynamic, the class is dynamic. 10565 // It's impossible for a class to transitively contain itself by value, so 10566 // infinite recursion is impossible. 10567 for (auto *FD : RD->fields()) { 10568 bool SubContained; 10569 if (const CXXRecordDecl *ContainedRD = 10570 getContainedDynamicClass(FD->getType(), SubContained)) { 10571 IsContained = true; 10572 return ContainedRD; 10573 } 10574 } 10575 10576 return nullptr; 10577 } 10578 10579 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 10580 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 10581 if (Unary->getKind() == UETT_SizeOf) 10582 return Unary; 10583 return nullptr; 10584 } 10585 10586 /// If E is a sizeof expression, returns its argument expression, 10587 /// otherwise returns NULL. 10588 static const Expr *getSizeOfExprArg(const Expr *E) { 10589 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 10590 if (!SizeOf->isArgumentType()) 10591 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 10592 return nullptr; 10593 } 10594 10595 /// If E is a sizeof expression, returns its argument type. 10596 static QualType getSizeOfArgType(const Expr *E) { 10597 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 10598 return SizeOf->getTypeOfArgument(); 10599 return QualType(); 10600 } 10601 10602 namespace { 10603 10604 struct SearchNonTrivialToInitializeField 10605 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 10606 using Super = 10607 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 10608 10609 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 10610 10611 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 10612 SourceLocation SL) { 10613 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 10614 asDerived().visitArray(PDIK, AT, SL); 10615 return; 10616 } 10617 10618 Super::visitWithKind(PDIK, FT, SL); 10619 } 10620 10621 void visitARCStrong(QualType FT, SourceLocation SL) { 10622 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 10623 } 10624 void visitARCWeak(QualType FT, SourceLocation SL) { 10625 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 10626 } 10627 void visitStruct(QualType FT, SourceLocation SL) { 10628 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 10629 visit(FD->getType(), FD->getLocation()); 10630 } 10631 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 10632 const ArrayType *AT, SourceLocation SL) { 10633 visit(getContext().getBaseElementType(AT), SL); 10634 } 10635 void visitTrivial(QualType FT, SourceLocation SL) {} 10636 10637 static void diag(QualType RT, const Expr *E, Sema &S) { 10638 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 10639 } 10640 10641 ASTContext &getContext() { return S.getASTContext(); } 10642 10643 const Expr *E; 10644 Sema &S; 10645 }; 10646 10647 struct SearchNonTrivialToCopyField 10648 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 10649 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 10650 10651 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 10652 10653 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 10654 SourceLocation SL) { 10655 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 10656 asDerived().visitArray(PCK, AT, SL); 10657 return; 10658 } 10659 10660 Super::visitWithKind(PCK, FT, SL); 10661 } 10662 10663 void visitARCStrong(QualType FT, SourceLocation SL) { 10664 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 10665 } 10666 void visitARCWeak(QualType FT, SourceLocation SL) { 10667 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 10668 } 10669 void visitStruct(QualType FT, SourceLocation SL) { 10670 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 10671 visit(FD->getType(), FD->getLocation()); 10672 } 10673 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 10674 SourceLocation SL) { 10675 visit(getContext().getBaseElementType(AT), SL); 10676 } 10677 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 10678 SourceLocation SL) {} 10679 void visitTrivial(QualType FT, SourceLocation SL) {} 10680 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 10681 10682 static void diag(QualType RT, const Expr *E, Sema &S) { 10683 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 10684 } 10685 10686 ASTContext &getContext() { return S.getASTContext(); } 10687 10688 const Expr *E; 10689 Sema &S; 10690 }; 10691 10692 } 10693 10694 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 10695 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 10696 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 10697 10698 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 10699 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 10700 return false; 10701 10702 return doesExprLikelyComputeSize(BO->getLHS()) || 10703 doesExprLikelyComputeSize(BO->getRHS()); 10704 } 10705 10706 return getAsSizeOfExpr(SizeofExpr) != nullptr; 10707 } 10708 10709 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 10710 /// 10711 /// \code 10712 /// #define MACRO 0 10713 /// foo(MACRO); 10714 /// foo(0); 10715 /// \endcode 10716 /// 10717 /// This should return true for the first call to foo, but not for the second 10718 /// (regardless of whether foo is a macro or function). 10719 static bool isArgumentExpandedFromMacro(SourceManager &SM, 10720 SourceLocation CallLoc, 10721 SourceLocation ArgLoc) { 10722 if (!CallLoc.isMacroID()) 10723 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 10724 10725 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 10726 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 10727 } 10728 10729 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 10730 /// last two arguments transposed. 10731 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 10732 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 10733 return; 10734 10735 const Expr *SizeArg = 10736 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 10737 10738 auto isLiteralZero = [](const Expr *E) { 10739 return isa<IntegerLiteral>(E) && cast<IntegerLiteral>(E)->getValue() == 0; 10740 }; 10741 10742 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 10743 SourceLocation CallLoc = Call->getRParenLoc(); 10744 SourceManager &SM = S.getSourceManager(); 10745 if (isLiteralZero(SizeArg) && 10746 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 10747 10748 SourceLocation DiagLoc = SizeArg->getExprLoc(); 10749 10750 // Some platforms #define bzero to __builtin_memset. See if this is the 10751 // case, and if so, emit a better diagnostic. 10752 if (BId == Builtin::BIbzero || 10753 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 10754 CallLoc, SM, S.getLangOpts()) == "bzero")) { 10755 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 10756 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 10757 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 10758 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 10759 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 10760 } 10761 return; 10762 } 10763 10764 // If the second argument to a memset is a sizeof expression and the third 10765 // isn't, this is also likely an error. This should catch 10766 // 'memset(buf, sizeof(buf), 0xff)'. 10767 if (BId == Builtin::BImemset && 10768 doesExprLikelyComputeSize(Call->getArg(1)) && 10769 !doesExprLikelyComputeSize(Call->getArg(2))) { 10770 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 10771 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 10772 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 10773 return; 10774 } 10775 } 10776 10777 /// Check for dangerous or invalid arguments to memset(). 10778 /// 10779 /// This issues warnings on known problematic, dangerous or unspecified 10780 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 10781 /// function calls. 10782 /// 10783 /// \param Call The call expression to diagnose. 10784 void Sema::CheckMemaccessArguments(const CallExpr *Call, 10785 unsigned BId, 10786 IdentifierInfo *FnName) { 10787 assert(BId != 0); 10788 10789 // It is possible to have a non-standard definition of memset. Validate 10790 // we have enough arguments, and if not, abort further checking. 10791 unsigned ExpectedNumArgs = 10792 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 10793 if (Call->getNumArgs() < ExpectedNumArgs) 10794 return; 10795 10796 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 10797 BId == Builtin::BIstrndup ? 1 : 2); 10798 unsigned LenArg = 10799 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 10800 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 10801 10802 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 10803 Call->getBeginLoc(), Call->getRParenLoc())) 10804 return; 10805 10806 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 10807 CheckMemaccessSize(*this, BId, Call); 10808 10809 // We have special checking when the length is a sizeof expression. 10810 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 10811 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 10812 llvm::FoldingSetNodeID SizeOfArgID; 10813 10814 // Although widely used, 'bzero' is not a standard function. Be more strict 10815 // with the argument types before allowing diagnostics and only allow the 10816 // form bzero(ptr, sizeof(...)). 10817 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 10818 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 10819 return; 10820 10821 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 10822 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 10823 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 10824 10825 QualType DestTy = Dest->getType(); 10826 QualType PointeeTy; 10827 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 10828 PointeeTy = DestPtrTy->getPointeeType(); 10829 10830 // Never warn about void type pointers. This can be used to suppress 10831 // false positives. 10832 if (PointeeTy->isVoidType()) 10833 continue; 10834 10835 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 10836 // actually comparing the expressions for equality. Because computing the 10837 // expression IDs can be expensive, we only do this if the diagnostic is 10838 // enabled. 10839 if (SizeOfArg && 10840 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 10841 SizeOfArg->getExprLoc())) { 10842 // We only compute IDs for expressions if the warning is enabled, and 10843 // cache the sizeof arg's ID. 10844 if (SizeOfArgID == llvm::FoldingSetNodeID()) 10845 SizeOfArg->Profile(SizeOfArgID, Context, true); 10846 llvm::FoldingSetNodeID DestID; 10847 Dest->Profile(DestID, Context, true); 10848 if (DestID == SizeOfArgID) { 10849 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 10850 // over sizeof(src) as well. 10851 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 10852 StringRef ReadableName = FnName->getName(); 10853 10854 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 10855 if (UnaryOp->getOpcode() == UO_AddrOf) 10856 ActionIdx = 1; // If its an address-of operator, just remove it. 10857 if (!PointeeTy->isIncompleteType() && 10858 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 10859 ActionIdx = 2; // If the pointee's size is sizeof(char), 10860 // suggest an explicit length. 10861 10862 // If the function is defined as a builtin macro, do not show macro 10863 // expansion. 10864 SourceLocation SL = SizeOfArg->getExprLoc(); 10865 SourceRange DSR = Dest->getSourceRange(); 10866 SourceRange SSR = SizeOfArg->getSourceRange(); 10867 SourceManager &SM = getSourceManager(); 10868 10869 if (SM.isMacroArgExpansion(SL)) { 10870 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 10871 SL = SM.getSpellingLoc(SL); 10872 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 10873 SM.getSpellingLoc(DSR.getEnd())); 10874 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 10875 SM.getSpellingLoc(SSR.getEnd())); 10876 } 10877 10878 DiagRuntimeBehavior(SL, SizeOfArg, 10879 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 10880 << ReadableName 10881 << PointeeTy 10882 << DestTy 10883 << DSR 10884 << SSR); 10885 DiagRuntimeBehavior(SL, SizeOfArg, 10886 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 10887 << ActionIdx 10888 << SSR); 10889 10890 break; 10891 } 10892 } 10893 10894 // Also check for cases where the sizeof argument is the exact same 10895 // type as the memory argument, and where it points to a user-defined 10896 // record type. 10897 if (SizeOfArgTy != QualType()) { 10898 if (PointeeTy->isRecordType() && 10899 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 10900 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 10901 PDiag(diag::warn_sizeof_pointer_type_memaccess) 10902 << FnName << SizeOfArgTy << ArgIdx 10903 << PointeeTy << Dest->getSourceRange() 10904 << LenExpr->getSourceRange()); 10905 break; 10906 } 10907 } 10908 } else if (DestTy->isArrayType()) { 10909 PointeeTy = DestTy; 10910 } 10911 10912 if (PointeeTy == QualType()) 10913 continue; 10914 10915 // Always complain about dynamic classes. 10916 bool IsContained; 10917 if (const CXXRecordDecl *ContainedRD = 10918 getContainedDynamicClass(PointeeTy, IsContained)) { 10919 10920 unsigned OperationType = 0; 10921 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp; 10922 // "overwritten" if we're warning about the destination for any call 10923 // but memcmp; otherwise a verb appropriate to the call. 10924 if (ArgIdx != 0 || IsCmp) { 10925 if (BId == Builtin::BImemcpy) 10926 OperationType = 1; 10927 else if(BId == Builtin::BImemmove) 10928 OperationType = 2; 10929 else if (IsCmp) 10930 OperationType = 3; 10931 } 10932 10933 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 10934 PDiag(diag::warn_dyn_class_memaccess) 10935 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName 10936 << IsContained << ContainedRD << OperationType 10937 << Call->getCallee()->getSourceRange()); 10938 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 10939 BId != Builtin::BImemset) 10940 DiagRuntimeBehavior( 10941 Dest->getExprLoc(), Dest, 10942 PDiag(diag::warn_arc_object_memaccess) 10943 << ArgIdx << FnName << PointeeTy 10944 << Call->getCallee()->getSourceRange()); 10945 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 10946 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 10947 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 10948 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 10949 PDiag(diag::warn_cstruct_memaccess) 10950 << ArgIdx << FnName << PointeeTy << 0); 10951 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 10952 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 10953 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 10954 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 10955 PDiag(diag::warn_cstruct_memaccess) 10956 << ArgIdx << FnName << PointeeTy << 1); 10957 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 10958 } else { 10959 continue; 10960 } 10961 } else 10962 continue; 10963 10964 DiagRuntimeBehavior( 10965 Dest->getExprLoc(), Dest, 10966 PDiag(diag::note_bad_memaccess_silence) 10967 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 10968 break; 10969 } 10970 } 10971 10972 // A little helper routine: ignore addition and subtraction of integer literals. 10973 // This intentionally does not ignore all integer constant expressions because 10974 // we don't want to remove sizeof(). 10975 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 10976 Ex = Ex->IgnoreParenCasts(); 10977 10978 while (true) { 10979 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 10980 if (!BO || !BO->isAdditiveOp()) 10981 break; 10982 10983 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 10984 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 10985 10986 if (isa<IntegerLiteral>(RHS)) 10987 Ex = LHS; 10988 else if (isa<IntegerLiteral>(LHS)) 10989 Ex = RHS; 10990 else 10991 break; 10992 } 10993 10994 return Ex; 10995 } 10996 10997 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 10998 ASTContext &Context) { 10999 // Only handle constant-sized or VLAs, but not flexible members. 11000 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 11001 // Only issue the FIXIT for arrays of size > 1. 11002 if (CAT->getSize().getSExtValue() <= 1) 11003 return false; 11004 } else if (!Ty->isVariableArrayType()) { 11005 return false; 11006 } 11007 return true; 11008 } 11009 11010 // Warn if the user has made the 'size' argument to strlcpy or strlcat 11011 // be the size of the source, instead of the destination. 11012 void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 11013 IdentifierInfo *FnName) { 11014 11015 // Don't crash if the user has the wrong number of arguments 11016 unsigned NumArgs = Call->getNumArgs(); 11017 if ((NumArgs != 3) && (NumArgs != 4)) 11018 return; 11019 11020 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 11021 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 11022 const Expr *CompareWithSrc = nullptr; 11023 11024 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 11025 Call->getBeginLoc(), Call->getRParenLoc())) 11026 return; 11027 11028 // Look for 'strlcpy(dst, x, sizeof(x))' 11029 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 11030 CompareWithSrc = Ex; 11031 else { 11032 // Look for 'strlcpy(dst, x, strlen(x))' 11033 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 11034 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 11035 SizeCall->getNumArgs() == 1) 11036 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 11037 } 11038 } 11039 11040 if (!CompareWithSrc) 11041 return; 11042 11043 // Determine if the argument to sizeof/strlen is equal to the source 11044 // argument. In principle there's all kinds of things you could do 11045 // here, for instance creating an == expression and evaluating it with 11046 // EvaluateAsBooleanCondition, but this uses a more direct technique: 11047 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 11048 if (!SrcArgDRE) 11049 return; 11050 11051 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 11052 if (!CompareWithSrcDRE || 11053 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 11054 return; 11055 11056 const Expr *OriginalSizeArg = Call->getArg(2); 11057 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 11058 << OriginalSizeArg->getSourceRange() << FnName; 11059 11060 // Output a FIXIT hint if the destination is an array (rather than a 11061 // pointer to an array). This could be enhanced to handle some 11062 // pointers if we know the actual size, like if DstArg is 'array+2' 11063 // we could say 'sizeof(array)-2'. 11064 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 11065 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 11066 return; 11067 11068 SmallString<128> sizeString; 11069 llvm::raw_svector_ostream OS(sizeString); 11070 OS << "sizeof("; 11071 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11072 OS << ")"; 11073 11074 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 11075 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 11076 OS.str()); 11077 } 11078 11079 /// Check if two expressions refer to the same declaration. 11080 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 11081 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 11082 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 11083 return D1->getDecl() == D2->getDecl(); 11084 return false; 11085 } 11086 11087 static const Expr *getStrlenExprArg(const Expr *E) { 11088 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 11089 const FunctionDecl *FD = CE->getDirectCallee(); 11090 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 11091 return nullptr; 11092 return CE->getArg(0)->IgnoreParenCasts(); 11093 } 11094 return nullptr; 11095 } 11096 11097 // Warn on anti-patterns as the 'size' argument to strncat. 11098 // The correct size argument should look like following: 11099 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 11100 void Sema::CheckStrncatArguments(const CallExpr *CE, 11101 IdentifierInfo *FnName) { 11102 // Don't crash if the user has the wrong number of arguments. 11103 if (CE->getNumArgs() < 3) 11104 return; 11105 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 11106 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 11107 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 11108 11109 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 11110 CE->getRParenLoc())) 11111 return; 11112 11113 // Identify common expressions, which are wrongly used as the size argument 11114 // to strncat and may lead to buffer overflows. 11115 unsigned PatternType = 0; 11116 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 11117 // - sizeof(dst) 11118 if (referToTheSameDecl(SizeOfArg, DstArg)) 11119 PatternType = 1; 11120 // - sizeof(src) 11121 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 11122 PatternType = 2; 11123 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 11124 if (BE->getOpcode() == BO_Sub) { 11125 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 11126 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 11127 // - sizeof(dst) - strlen(dst) 11128 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 11129 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 11130 PatternType = 1; 11131 // - sizeof(src) - (anything) 11132 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 11133 PatternType = 2; 11134 } 11135 } 11136 11137 if (PatternType == 0) 11138 return; 11139 11140 // Generate the diagnostic. 11141 SourceLocation SL = LenArg->getBeginLoc(); 11142 SourceRange SR = LenArg->getSourceRange(); 11143 SourceManager &SM = getSourceManager(); 11144 11145 // If the function is defined as a builtin macro, do not show macro expansion. 11146 if (SM.isMacroArgExpansion(SL)) { 11147 SL = SM.getSpellingLoc(SL); 11148 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 11149 SM.getSpellingLoc(SR.getEnd())); 11150 } 11151 11152 // Check if the destination is an array (rather than a pointer to an array). 11153 QualType DstTy = DstArg->getType(); 11154 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 11155 Context); 11156 if (!isKnownSizeArray) { 11157 if (PatternType == 1) 11158 Diag(SL, diag::warn_strncat_wrong_size) << SR; 11159 else 11160 Diag(SL, diag::warn_strncat_src_size) << SR; 11161 return; 11162 } 11163 11164 if (PatternType == 1) 11165 Diag(SL, diag::warn_strncat_large_size) << SR; 11166 else 11167 Diag(SL, diag::warn_strncat_src_size) << SR; 11168 11169 SmallString<128> sizeString; 11170 llvm::raw_svector_ostream OS(sizeString); 11171 OS << "sizeof("; 11172 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11173 OS << ") - "; 11174 OS << "strlen("; 11175 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11176 OS << ") - 1"; 11177 11178 Diag(SL, diag::note_strncat_wrong_size) 11179 << FixItHint::CreateReplacement(SR, OS.str()); 11180 } 11181 11182 namespace { 11183 void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName, 11184 const UnaryOperator *UnaryExpr, const Decl *D) { 11185 if (isa<FieldDecl, FunctionDecl, VarDecl>(D)) { 11186 S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object) 11187 << CalleeName << 0 /*object: */ << cast<NamedDecl>(D); 11188 return; 11189 } 11190 } 11191 11192 void CheckFreeArgumentsAddressof(Sema &S, const std::string &CalleeName, 11193 const UnaryOperator *UnaryExpr) { 11194 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(UnaryExpr->getSubExpr())) { 11195 const Decl *D = Lvalue->getDecl(); 11196 if (isa<DeclaratorDecl>(D)) 11197 if (!dyn_cast<DeclaratorDecl>(D)->getType()->isReferenceType()) 11198 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, D); 11199 } 11200 11201 if (const auto *Lvalue = dyn_cast<MemberExpr>(UnaryExpr->getSubExpr())) 11202 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, 11203 Lvalue->getMemberDecl()); 11204 } 11205 11206 void CheckFreeArgumentsPlus(Sema &S, const std::string &CalleeName, 11207 const UnaryOperator *UnaryExpr) { 11208 const auto *Lambda = dyn_cast<LambdaExpr>( 11209 UnaryExpr->getSubExpr()->IgnoreImplicitAsWritten()->IgnoreParens()); 11210 if (!Lambda) 11211 return; 11212 11213 S.Diag(Lambda->getBeginLoc(), diag::warn_free_nonheap_object) 11214 << CalleeName << 2 /*object: lambda expression*/; 11215 } 11216 11217 void CheckFreeArgumentsStackArray(Sema &S, const std::string &CalleeName, 11218 const DeclRefExpr *Lvalue) { 11219 const auto *Var = dyn_cast<VarDecl>(Lvalue->getDecl()); 11220 if (Var == nullptr) 11221 return; 11222 11223 S.Diag(Lvalue->getBeginLoc(), diag::warn_free_nonheap_object) 11224 << CalleeName << 0 /*object: */ << Var; 11225 } 11226 11227 void CheckFreeArgumentsCast(Sema &S, const std::string &CalleeName, 11228 const CastExpr *Cast) { 11229 SmallString<128> SizeString; 11230 llvm::raw_svector_ostream OS(SizeString); 11231 11232 clang::CastKind Kind = Cast->getCastKind(); 11233 if (Kind == clang::CK_BitCast && 11234 !Cast->getSubExpr()->getType()->isFunctionPointerType()) 11235 return; 11236 if (Kind == clang::CK_IntegralToPointer && 11237 !isa<IntegerLiteral>( 11238 Cast->getSubExpr()->IgnoreParenImpCasts()->IgnoreParens())) 11239 return; 11240 11241 switch (Cast->getCastKind()) { 11242 case clang::CK_BitCast: 11243 case clang::CK_IntegralToPointer: 11244 case clang::CK_FunctionToPointerDecay: 11245 OS << '\''; 11246 Cast->printPretty(OS, nullptr, S.getPrintingPolicy()); 11247 OS << '\''; 11248 break; 11249 default: 11250 return; 11251 } 11252 11253 S.Diag(Cast->getBeginLoc(), diag::warn_free_nonheap_object) 11254 << CalleeName << 0 /*object: */ << OS.str(); 11255 } 11256 } // namespace 11257 11258 /// Alerts the user that they are attempting to free a non-malloc'd object. 11259 void Sema::CheckFreeArguments(const CallExpr *E) { 11260 const std::string CalleeName = 11261 dyn_cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString(); 11262 11263 { // Prefer something that doesn't involve a cast to make things simpler. 11264 const Expr *Arg = E->getArg(0)->IgnoreParenCasts(); 11265 if (const auto *UnaryExpr = dyn_cast<UnaryOperator>(Arg)) 11266 switch (UnaryExpr->getOpcode()) { 11267 case UnaryOperator::Opcode::UO_AddrOf: 11268 return CheckFreeArgumentsAddressof(*this, CalleeName, UnaryExpr); 11269 case UnaryOperator::Opcode::UO_Plus: 11270 return CheckFreeArgumentsPlus(*this, CalleeName, UnaryExpr); 11271 default: 11272 break; 11273 } 11274 11275 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Arg)) 11276 if (Lvalue->getType()->isArrayType()) 11277 return CheckFreeArgumentsStackArray(*this, CalleeName, Lvalue); 11278 11279 if (const auto *Label = dyn_cast<AddrLabelExpr>(Arg)) { 11280 Diag(Label->getBeginLoc(), diag::warn_free_nonheap_object) 11281 << CalleeName << 0 /*object: */ << Label->getLabel()->getIdentifier(); 11282 return; 11283 } 11284 11285 if (isa<BlockExpr>(Arg)) { 11286 Diag(Arg->getBeginLoc(), diag::warn_free_nonheap_object) 11287 << CalleeName << 1 /*object: block*/; 11288 return; 11289 } 11290 } 11291 // Maybe the cast was important, check after the other cases. 11292 if (const auto *Cast = dyn_cast<CastExpr>(E->getArg(0))) 11293 return CheckFreeArgumentsCast(*this, CalleeName, Cast); 11294 } 11295 11296 void 11297 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 11298 SourceLocation ReturnLoc, 11299 bool isObjCMethod, 11300 const AttrVec *Attrs, 11301 const FunctionDecl *FD) { 11302 // Check if the return value is null but should not be. 11303 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 11304 (!isObjCMethod && isNonNullType(Context, lhsType))) && 11305 CheckNonNullExpr(*this, RetValExp)) 11306 Diag(ReturnLoc, diag::warn_null_ret) 11307 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 11308 11309 // C++11 [basic.stc.dynamic.allocation]p4: 11310 // If an allocation function declared with a non-throwing 11311 // exception-specification fails to allocate storage, it shall return 11312 // a null pointer. Any other allocation function that fails to allocate 11313 // storage shall indicate failure only by throwing an exception [...] 11314 if (FD) { 11315 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 11316 if (Op == OO_New || Op == OO_Array_New) { 11317 const FunctionProtoType *Proto 11318 = FD->getType()->castAs<FunctionProtoType>(); 11319 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 11320 CheckNonNullExpr(*this, RetValExp)) 11321 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 11322 << FD << getLangOpts().CPlusPlus11; 11323 } 11324 } 11325 11326 // PPC MMA non-pointer types are not allowed as return type. Checking the type 11327 // here prevent the user from using a PPC MMA type as trailing return type. 11328 if (Context.getTargetInfo().getTriple().isPPC64()) 11329 CheckPPCMMAType(RetValExp->getType(), ReturnLoc); 11330 } 11331 11332 //===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===// 11333 11334 /// Check for comparisons of floating point operands using != and ==. 11335 /// Issue a warning if these are no self-comparisons, as they are not likely 11336 /// to do what the programmer intended. 11337 void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) { 11338 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 11339 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 11340 11341 // Special case: check for x == x (which is OK). 11342 // Do not emit warnings for such cases. 11343 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 11344 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 11345 if (DRL->getDecl() == DRR->getDecl()) 11346 return; 11347 11348 // Special case: check for comparisons against literals that can be exactly 11349 // represented by APFloat. In such cases, do not emit a warning. This 11350 // is a heuristic: often comparison against such literals are used to 11351 // detect if a value in a variable has not changed. This clearly can 11352 // lead to false negatives. 11353 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 11354 if (FLL->isExact()) 11355 return; 11356 } else 11357 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 11358 if (FLR->isExact()) 11359 return; 11360 11361 // Check for comparisons with builtin types. 11362 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 11363 if (CL->getBuiltinCallee()) 11364 return; 11365 11366 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 11367 if (CR->getBuiltinCallee()) 11368 return; 11369 11370 // Emit the diagnostic. 11371 Diag(Loc, diag::warn_floatingpoint_eq) 11372 << LHS->getSourceRange() << RHS->getSourceRange(); 11373 } 11374 11375 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 11376 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 11377 11378 namespace { 11379 11380 /// Structure recording the 'active' range of an integer-valued 11381 /// expression. 11382 struct IntRange { 11383 /// The number of bits active in the int. Note that this includes exactly one 11384 /// sign bit if !NonNegative. 11385 unsigned Width; 11386 11387 /// True if the int is known not to have negative values. If so, all leading 11388 /// bits before Width are known zero, otherwise they are known to be the 11389 /// same as the MSB within Width. 11390 bool NonNegative; 11391 11392 IntRange(unsigned Width, bool NonNegative) 11393 : Width(Width), NonNegative(NonNegative) {} 11394 11395 /// Number of bits excluding the sign bit. 11396 unsigned valueBits() const { 11397 return NonNegative ? Width : Width - 1; 11398 } 11399 11400 /// Returns the range of the bool type. 11401 static IntRange forBoolType() { 11402 return IntRange(1, true); 11403 } 11404 11405 /// Returns the range of an opaque value of the given integral type. 11406 static IntRange forValueOfType(ASTContext &C, QualType T) { 11407 return forValueOfCanonicalType(C, 11408 T->getCanonicalTypeInternal().getTypePtr()); 11409 } 11410 11411 /// Returns the range of an opaque value of a canonical integral type. 11412 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 11413 assert(T->isCanonicalUnqualified()); 11414 11415 if (const VectorType *VT = dyn_cast<VectorType>(T)) 11416 T = VT->getElementType().getTypePtr(); 11417 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 11418 T = CT->getElementType().getTypePtr(); 11419 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 11420 T = AT->getValueType().getTypePtr(); 11421 11422 if (!C.getLangOpts().CPlusPlus) { 11423 // For enum types in C code, use the underlying datatype. 11424 if (const EnumType *ET = dyn_cast<EnumType>(T)) 11425 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 11426 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 11427 // For enum types in C++, use the known bit width of the enumerators. 11428 EnumDecl *Enum = ET->getDecl(); 11429 // In C++11, enums can have a fixed underlying type. Use this type to 11430 // compute the range. 11431 if (Enum->isFixed()) { 11432 return IntRange(C.getIntWidth(QualType(T, 0)), 11433 !ET->isSignedIntegerOrEnumerationType()); 11434 } 11435 11436 unsigned NumPositive = Enum->getNumPositiveBits(); 11437 unsigned NumNegative = Enum->getNumNegativeBits(); 11438 11439 if (NumNegative == 0) 11440 return IntRange(NumPositive, true/*NonNegative*/); 11441 else 11442 return IntRange(std::max(NumPositive + 1, NumNegative), 11443 false/*NonNegative*/); 11444 } 11445 11446 if (const auto *EIT = dyn_cast<BitIntType>(T)) 11447 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 11448 11449 const BuiltinType *BT = cast<BuiltinType>(T); 11450 assert(BT->isInteger()); 11451 11452 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 11453 } 11454 11455 /// Returns the "target" range of a canonical integral type, i.e. 11456 /// the range of values expressible in the type. 11457 /// 11458 /// This matches forValueOfCanonicalType except that enums have the 11459 /// full range of their type, not the range of their enumerators. 11460 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 11461 assert(T->isCanonicalUnqualified()); 11462 11463 if (const VectorType *VT = dyn_cast<VectorType>(T)) 11464 T = VT->getElementType().getTypePtr(); 11465 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 11466 T = CT->getElementType().getTypePtr(); 11467 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 11468 T = AT->getValueType().getTypePtr(); 11469 if (const EnumType *ET = dyn_cast<EnumType>(T)) 11470 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 11471 11472 if (const auto *EIT = dyn_cast<BitIntType>(T)) 11473 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 11474 11475 const BuiltinType *BT = cast<BuiltinType>(T); 11476 assert(BT->isInteger()); 11477 11478 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 11479 } 11480 11481 /// Returns the supremum of two ranges: i.e. their conservative merge. 11482 static IntRange join(IntRange L, IntRange R) { 11483 bool Unsigned = L.NonNegative && R.NonNegative; 11484 return IntRange(std::max(L.valueBits(), R.valueBits()) + !Unsigned, 11485 L.NonNegative && R.NonNegative); 11486 } 11487 11488 /// Return the range of a bitwise-AND of the two ranges. 11489 static IntRange bit_and(IntRange L, IntRange R) { 11490 unsigned Bits = std::max(L.Width, R.Width); 11491 bool NonNegative = false; 11492 if (L.NonNegative) { 11493 Bits = std::min(Bits, L.Width); 11494 NonNegative = true; 11495 } 11496 if (R.NonNegative) { 11497 Bits = std::min(Bits, R.Width); 11498 NonNegative = true; 11499 } 11500 return IntRange(Bits, NonNegative); 11501 } 11502 11503 /// Return the range of a sum of the two ranges. 11504 static IntRange sum(IntRange L, IntRange R) { 11505 bool Unsigned = L.NonNegative && R.NonNegative; 11506 return IntRange(std::max(L.valueBits(), R.valueBits()) + 1 + !Unsigned, 11507 Unsigned); 11508 } 11509 11510 /// Return the range of a difference of the two ranges. 11511 static IntRange difference(IntRange L, IntRange R) { 11512 // We need a 1-bit-wider range if: 11513 // 1) LHS can be negative: least value can be reduced. 11514 // 2) RHS can be negative: greatest value can be increased. 11515 bool CanWiden = !L.NonNegative || !R.NonNegative; 11516 bool Unsigned = L.NonNegative && R.Width == 0; 11517 return IntRange(std::max(L.valueBits(), R.valueBits()) + CanWiden + 11518 !Unsigned, 11519 Unsigned); 11520 } 11521 11522 /// Return the range of a product of the two ranges. 11523 static IntRange product(IntRange L, IntRange R) { 11524 // If both LHS and RHS can be negative, we can form 11525 // -2^L * -2^R = 2^(L + R) 11526 // which requires L + R + 1 value bits to represent. 11527 bool CanWiden = !L.NonNegative && !R.NonNegative; 11528 bool Unsigned = L.NonNegative && R.NonNegative; 11529 return IntRange(L.valueBits() + R.valueBits() + CanWiden + !Unsigned, 11530 Unsigned); 11531 } 11532 11533 /// Return the range of a remainder operation between the two ranges. 11534 static IntRange rem(IntRange L, IntRange R) { 11535 // The result of a remainder can't be larger than the result of 11536 // either side. The sign of the result is the sign of the LHS. 11537 bool Unsigned = L.NonNegative; 11538 return IntRange(std::min(L.valueBits(), R.valueBits()) + !Unsigned, 11539 Unsigned); 11540 } 11541 }; 11542 11543 } // namespace 11544 11545 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 11546 unsigned MaxWidth) { 11547 if (value.isSigned() && value.isNegative()) 11548 return IntRange(value.getMinSignedBits(), false); 11549 11550 if (value.getBitWidth() > MaxWidth) 11551 value = value.trunc(MaxWidth); 11552 11553 // isNonNegative() just checks the sign bit without considering 11554 // signedness. 11555 return IntRange(value.getActiveBits(), true); 11556 } 11557 11558 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 11559 unsigned MaxWidth) { 11560 if (result.isInt()) 11561 return GetValueRange(C, result.getInt(), MaxWidth); 11562 11563 if (result.isVector()) { 11564 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 11565 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 11566 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 11567 R = IntRange::join(R, El); 11568 } 11569 return R; 11570 } 11571 11572 if (result.isComplexInt()) { 11573 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 11574 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 11575 return IntRange::join(R, I); 11576 } 11577 11578 // This can happen with lossless casts to intptr_t of "based" lvalues. 11579 // Assume it might use arbitrary bits. 11580 // FIXME: The only reason we need to pass the type in here is to get 11581 // the sign right on this one case. It would be nice if APValue 11582 // preserved this. 11583 assert(result.isLValue() || result.isAddrLabelDiff()); 11584 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 11585 } 11586 11587 static QualType GetExprType(const Expr *E) { 11588 QualType Ty = E->getType(); 11589 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 11590 Ty = AtomicRHS->getValueType(); 11591 return Ty; 11592 } 11593 11594 /// Pseudo-evaluate the given integer expression, estimating the 11595 /// range of values it might take. 11596 /// 11597 /// \param MaxWidth The width to which the value will be truncated. 11598 /// \param Approximate If \c true, return a likely range for the result: in 11599 /// particular, assume that arithmetic on narrower types doesn't leave 11600 /// those types. If \c false, return a range including all possible 11601 /// result values. 11602 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth, 11603 bool InConstantContext, bool Approximate) { 11604 E = E->IgnoreParens(); 11605 11606 // Try a full evaluation first. 11607 Expr::EvalResult result; 11608 if (E->EvaluateAsRValue(result, C, InConstantContext)) 11609 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 11610 11611 // I think we only want to look through implicit casts here; if the 11612 // user has an explicit widening cast, we should treat the value as 11613 // being of the new, wider type. 11614 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 11615 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 11616 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext, 11617 Approximate); 11618 11619 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 11620 11621 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 11622 CE->getCastKind() == CK_BooleanToSignedIntegral; 11623 11624 // Assume that non-integer casts can span the full range of the type. 11625 if (!isIntegerCast) 11626 return OutputTypeRange; 11627 11628 IntRange SubRange = GetExprRange(C, CE->getSubExpr(), 11629 std::min(MaxWidth, OutputTypeRange.Width), 11630 InConstantContext, Approximate); 11631 11632 // Bail out if the subexpr's range is as wide as the cast type. 11633 if (SubRange.Width >= OutputTypeRange.Width) 11634 return OutputTypeRange; 11635 11636 // Otherwise, we take the smaller width, and we're non-negative if 11637 // either the output type or the subexpr is. 11638 return IntRange(SubRange.Width, 11639 SubRange.NonNegative || OutputTypeRange.NonNegative); 11640 } 11641 11642 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 11643 // If we can fold the condition, just take that operand. 11644 bool CondResult; 11645 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 11646 return GetExprRange(C, 11647 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(), 11648 MaxWidth, InConstantContext, Approximate); 11649 11650 // Otherwise, conservatively merge. 11651 // GetExprRange requires an integer expression, but a throw expression 11652 // results in a void type. 11653 Expr *E = CO->getTrueExpr(); 11654 IntRange L = E->getType()->isVoidType() 11655 ? IntRange{0, true} 11656 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 11657 E = CO->getFalseExpr(); 11658 IntRange R = E->getType()->isVoidType() 11659 ? IntRange{0, true} 11660 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 11661 return IntRange::join(L, R); 11662 } 11663 11664 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 11665 IntRange (*Combine)(IntRange, IntRange) = IntRange::join; 11666 11667 switch (BO->getOpcode()) { 11668 case BO_Cmp: 11669 llvm_unreachable("builtin <=> should have class type"); 11670 11671 // Boolean-valued operations are single-bit and positive. 11672 case BO_LAnd: 11673 case BO_LOr: 11674 case BO_LT: 11675 case BO_GT: 11676 case BO_LE: 11677 case BO_GE: 11678 case BO_EQ: 11679 case BO_NE: 11680 return IntRange::forBoolType(); 11681 11682 // The type of the assignments is the type of the LHS, so the RHS 11683 // is not necessarily the same type. 11684 case BO_MulAssign: 11685 case BO_DivAssign: 11686 case BO_RemAssign: 11687 case BO_AddAssign: 11688 case BO_SubAssign: 11689 case BO_XorAssign: 11690 case BO_OrAssign: 11691 // TODO: bitfields? 11692 return IntRange::forValueOfType(C, GetExprType(E)); 11693 11694 // Simple assignments just pass through the RHS, which will have 11695 // been coerced to the LHS type. 11696 case BO_Assign: 11697 // TODO: bitfields? 11698 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 11699 Approximate); 11700 11701 // Operations with opaque sources are black-listed. 11702 case BO_PtrMemD: 11703 case BO_PtrMemI: 11704 return IntRange::forValueOfType(C, GetExprType(E)); 11705 11706 // Bitwise-and uses the *infinum* of the two source ranges. 11707 case BO_And: 11708 case BO_AndAssign: 11709 Combine = IntRange::bit_and; 11710 break; 11711 11712 // Left shift gets black-listed based on a judgement call. 11713 case BO_Shl: 11714 // ...except that we want to treat '1 << (blah)' as logically 11715 // positive. It's an important idiom. 11716 if (IntegerLiteral *I 11717 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 11718 if (I->getValue() == 1) { 11719 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 11720 return IntRange(R.Width, /*NonNegative*/ true); 11721 } 11722 } 11723 LLVM_FALLTHROUGH; 11724 11725 case BO_ShlAssign: 11726 return IntRange::forValueOfType(C, GetExprType(E)); 11727 11728 // Right shift by a constant can narrow its left argument. 11729 case BO_Shr: 11730 case BO_ShrAssign: { 11731 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext, 11732 Approximate); 11733 11734 // If the shift amount is a positive constant, drop the width by 11735 // that much. 11736 if (Optional<llvm::APSInt> shift = 11737 BO->getRHS()->getIntegerConstantExpr(C)) { 11738 if (shift->isNonNegative()) { 11739 unsigned zext = shift->getZExtValue(); 11740 if (zext >= L.Width) 11741 L.Width = (L.NonNegative ? 0 : 1); 11742 else 11743 L.Width -= zext; 11744 } 11745 } 11746 11747 return L; 11748 } 11749 11750 // Comma acts as its right operand. 11751 case BO_Comma: 11752 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 11753 Approximate); 11754 11755 case BO_Add: 11756 if (!Approximate) 11757 Combine = IntRange::sum; 11758 break; 11759 11760 case BO_Sub: 11761 if (BO->getLHS()->getType()->isPointerType()) 11762 return IntRange::forValueOfType(C, GetExprType(E)); 11763 if (!Approximate) 11764 Combine = IntRange::difference; 11765 break; 11766 11767 case BO_Mul: 11768 if (!Approximate) 11769 Combine = IntRange::product; 11770 break; 11771 11772 // The width of a division result is mostly determined by the size 11773 // of the LHS. 11774 case BO_Div: { 11775 // Don't 'pre-truncate' the operands. 11776 unsigned opWidth = C.getIntWidth(GetExprType(E)); 11777 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, 11778 Approximate); 11779 11780 // If the divisor is constant, use that. 11781 if (Optional<llvm::APSInt> divisor = 11782 BO->getRHS()->getIntegerConstantExpr(C)) { 11783 unsigned log2 = divisor->logBase2(); // floor(log_2(divisor)) 11784 if (log2 >= L.Width) 11785 L.Width = (L.NonNegative ? 0 : 1); 11786 else 11787 L.Width = std::min(L.Width - log2, MaxWidth); 11788 return L; 11789 } 11790 11791 // Otherwise, just use the LHS's width. 11792 // FIXME: This is wrong if the LHS could be its minimal value and the RHS 11793 // could be -1. 11794 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, 11795 Approximate); 11796 return IntRange(L.Width, L.NonNegative && R.NonNegative); 11797 } 11798 11799 case BO_Rem: 11800 Combine = IntRange::rem; 11801 break; 11802 11803 // The default behavior is okay for these. 11804 case BO_Xor: 11805 case BO_Or: 11806 break; 11807 } 11808 11809 // Combine the two ranges, but limit the result to the type in which we 11810 // performed the computation. 11811 QualType T = GetExprType(E); 11812 unsigned opWidth = C.getIntWidth(T); 11813 IntRange L = 11814 GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, Approximate); 11815 IntRange R = 11816 GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, Approximate); 11817 IntRange C = Combine(L, R); 11818 C.NonNegative |= T->isUnsignedIntegerOrEnumerationType(); 11819 C.Width = std::min(C.Width, MaxWidth); 11820 return C; 11821 } 11822 11823 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 11824 switch (UO->getOpcode()) { 11825 // Boolean-valued operations are white-listed. 11826 case UO_LNot: 11827 return IntRange::forBoolType(); 11828 11829 // Operations with opaque sources are black-listed. 11830 case UO_Deref: 11831 case UO_AddrOf: // should be impossible 11832 return IntRange::forValueOfType(C, GetExprType(E)); 11833 11834 default: 11835 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext, 11836 Approximate); 11837 } 11838 } 11839 11840 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 11841 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext, 11842 Approximate); 11843 11844 if (const auto *BitField = E->getSourceBitField()) 11845 return IntRange(BitField->getBitWidthValue(C), 11846 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 11847 11848 return IntRange::forValueOfType(C, GetExprType(E)); 11849 } 11850 11851 static IntRange GetExprRange(ASTContext &C, const Expr *E, 11852 bool InConstantContext, bool Approximate) { 11853 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext, 11854 Approximate); 11855 } 11856 11857 /// Checks whether the given value, which currently has the given 11858 /// source semantics, has the same value when coerced through the 11859 /// target semantics. 11860 static bool IsSameFloatAfterCast(const llvm::APFloat &value, 11861 const llvm::fltSemantics &Src, 11862 const llvm::fltSemantics &Tgt) { 11863 llvm::APFloat truncated = value; 11864 11865 bool ignored; 11866 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 11867 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 11868 11869 return truncated.bitwiseIsEqual(value); 11870 } 11871 11872 /// Checks whether the given value, which currently has the given 11873 /// source semantics, has the same value when coerced through the 11874 /// target semantics. 11875 /// 11876 /// The value might be a vector of floats (or a complex number). 11877 static bool IsSameFloatAfterCast(const APValue &value, 11878 const llvm::fltSemantics &Src, 11879 const llvm::fltSemantics &Tgt) { 11880 if (value.isFloat()) 11881 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 11882 11883 if (value.isVector()) { 11884 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 11885 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 11886 return false; 11887 return true; 11888 } 11889 11890 assert(value.isComplexFloat()); 11891 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 11892 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 11893 } 11894 11895 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC, 11896 bool IsListInit = false); 11897 11898 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 11899 // Suppress cases where we are comparing against an enum constant. 11900 if (const DeclRefExpr *DR = 11901 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 11902 if (isa<EnumConstantDecl>(DR->getDecl())) 11903 return true; 11904 11905 // Suppress cases where the value is expanded from a macro, unless that macro 11906 // is how a language represents a boolean literal. This is the case in both C 11907 // and Objective-C. 11908 SourceLocation BeginLoc = E->getBeginLoc(); 11909 if (BeginLoc.isMacroID()) { 11910 StringRef MacroName = Lexer::getImmediateMacroName( 11911 BeginLoc, S.getSourceManager(), S.getLangOpts()); 11912 return MacroName != "YES" && MacroName != "NO" && 11913 MacroName != "true" && MacroName != "false"; 11914 } 11915 11916 return false; 11917 } 11918 11919 static bool isKnownToHaveUnsignedValue(Expr *E) { 11920 return E->getType()->isIntegerType() && 11921 (!E->getType()->isSignedIntegerType() || 11922 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 11923 } 11924 11925 namespace { 11926 /// The promoted range of values of a type. In general this has the 11927 /// following structure: 11928 /// 11929 /// |-----------| . . . |-----------| 11930 /// ^ ^ ^ ^ 11931 /// Min HoleMin HoleMax Max 11932 /// 11933 /// ... where there is only a hole if a signed type is promoted to unsigned 11934 /// (in which case Min and Max are the smallest and largest representable 11935 /// values). 11936 struct PromotedRange { 11937 // Min, or HoleMax if there is a hole. 11938 llvm::APSInt PromotedMin; 11939 // Max, or HoleMin if there is a hole. 11940 llvm::APSInt PromotedMax; 11941 11942 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 11943 if (R.Width == 0) 11944 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 11945 else if (R.Width >= BitWidth && !Unsigned) { 11946 // Promotion made the type *narrower*. This happens when promoting 11947 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 11948 // Treat all values of 'signed int' as being in range for now. 11949 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 11950 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 11951 } else { 11952 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 11953 .extOrTrunc(BitWidth); 11954 PromotedMin.setIsUnsigned(Unsigned); 11955 11956 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 11957 .extOrTrunc(BitWidth); 11958 PromotedMax.setIsUnsigned(Unsigned); 11959 } 11960 } 11961 11962 // Determine whether this range is contiguous (has no hole). 11963 bool isContiguous() const { return PromotedMin <= PromotedMax; } 11964 11965 // Where a constant value is within the range. 11966 enum ComparisonResult { 11967 LT = 0x1, 11968 LE = 0x2, 11969 GT = 0x4, 11970 GE = 0x8, 11971 EQ = 0x10, 11972 NE = 0x20, 11973 InRangeFlag = 0x40, 11974 11975 Less = LE | LT | NE, 11976 Min = LE | InRangeFlag, 11977 InRange = InRangeFlag, 11978 Max = GE | InRangeFlag, 11979 Greater = GE | GT | NE, 11980 11981 OnlyValue = LE | GE | EQ | InRangeFlag, 11982 InHole = NE 11983 }; 11984 11985 ComparisonResult compare(const llvm::APSInt &Value) const { 11986 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 11987 Value.isUnsigned() == PromotedMin.isUnsigned()); 11988 if (!isContiguous()) { 11989 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 11990 if (Value.isMinValue()) return Min; 11991 if (Value.isMaxValue()) return Max; 11992 if (Value >= PromotedMin) return InRange; 11993 if (Value <= PromotedMax) return InRange; 11994 return InHole; 11995 } 11996 11997 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 11998 case -1: return Less; 11999 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 12000 case 1: 12001 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 12002 case -1: return InRange; 12003 case 0: return Max; 12004 case 1: return Greater; 12005 } 12006 } 12007 12008 llvm_unreachable("impossible compare result"); 12009 } 12010 12011 static llvm::Optional<StringRef> 12012 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 12013 if (Op == BO_Cmp) { 12014 ComparisonResult LTFlag = LT, GTFlag = GT; 12015 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 12016 12017 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 12018 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 12019 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 12020 return llvm::None; 12021 } 12022 12023 ComparisonResult TrueFlag, FalseFlag; 12024 if (Op == BO_EQ) { 12025 TrueFlag = EQ; 12026 FalseFlag = NE; 12027 } else if (Op == BO_NE) { 12028 TrueFlag = NE; 12029 FalseFlag = EQ; 12030 } else { 12031 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 12032 TrueFlag = LT; 12033 FalseFlag = GE; 12034 } else { 12035 TrueFlag = GT; 12036 FalseFlag = LE; 12037 } 12038 if (Op == BO_GE || Op == BO_LE) 12039 std::swap(TrueFlag, FalseFlag); 12040 } 12041 if (R & TrueFlag) 12042 return StringRef("true"); 12043 if (R & FalseFlag) 12044 return StringRef("false"); 12045 return llvm::None; 12046 } 12047 }; 12048 } 12049 12050 static bool HasEnumType(Expr *E) { 12051 // Strip off implicit integral promotions. 12052 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 12053 if (ICE->getCastKind() != CK_IntegralCast && 12054 ICE->getCastKind() != CK_NoOp) 12055 break; 12056 E = ICE->getSubExpr(); 12057 } 12058 12059 return E->getType()->isEnumeralType(); 12060 } 12061 12062 static int classifyConstantValue(Expr *Constant) { 12063 // The values of this enumeration are used in the diagnostics 12064 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 12065 enum ConstantValueKind { 12066 Miscellaneous = 0, 12067 LiteralTrue, 12068 LiteralFalse 12069 }; 12070 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 12071 return BL->getValue() ? ConstantValueKind::LiteralTrue 12072 : ConstantValueKind::LiteralFalse; 12073 return ConstantValueKind::Miscellaneous; 12074 } 12075 12076 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 12077 Expr *Constant, Expr *Other, 12078 const llvm::APSInt &Value, 12079 bool RhsConstant) { 12080 if (S.inTemplateInstantiation()) 12081 return false; 12082 12083 Expr *OriginalOther = Other; 12084 12085 Constant = Constant->IgnoreParenImpCasts(); 12086 Other = Other->IgnoreParenImpCasts(); 12087 12088 // Suppress warnings on tautological comparisons between values of the same 12089 // enumeration type. There are only two ways we could warn on this: 12090 // - If the constant is outside the range of representable values of 12091 // the enumeration. In such a case, we should warn about the cast 12092 // to enumeration type, not about the comparison. 12093 // - If the constant is the maximum / minimum in-range value. For an 12094 // enumeratin type, such comparisons can be meaningful and useful. 12095 if (Constant->getType()->isEnumeralType() && 12096 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 12097 return false; 12098 12099 IntRange OtherValueRange = GetExprRange( 12100 S.Context, Other, S.isConstantEvaluated(), /*Approximate*/ false); 12101 12102 QualType OtherT = Other->getType(); 12103 if (const auto *AT = OtherT->getAs<AtomicType>()) 12104 OtherT = AT->getValueType(); 12105 IntRange OtherTypeRange = IntRange::forValueOfType(S.Context, OtherT); 12106 12107 // Special case for ObjC BOOL on targets where its a typedef for a signed char 12108 // (Namely, macOS). FIXME: IntRange::forValueOfType should do this. 12109 bool IsObjCSignedCharBool = S.getLangOpts().ObjC && 12110 S.NSAPIObj->isObjCBOOLType(OtherT) && 12111 OtherT->isSpecificBuiltinType(BuiltinType::SChar); 12112 12113 // Whether we're treating Other as being a bool because of the form of 12114 // expression despite it having another type (typically 'int' in C). 12115 bool OtherIsBooleanDespiteType = 12116 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 12117 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool) 12118 OtherTypeRange = OtherValueRange = IntRange::forBoolType(); 12119 12120 // Check if all values in the range of possible values of this expression 12121 // lead to the same comparison outcome. 12122 PromotedRange OtherPromotedValueRange(OtherValueRange, Value.getBitWidth(), 12123 Value.isUnsigned()); 12124 auto Cmp = OtherPromotedValueRange.compare(Value); 12125 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 12126 if (!Result) 12127 return false; 12128 12129 // Also consider the range determined by the type alone. This allows us to 12130 // classify the warning under the proper diagnostic group. 12131 bool TautologicalTypeCompare = false; 12132 { 12133 PromotedRange OtherPromotedTypeRange(OtherTypeRange, Value.getBitWidth(), 12134 Value.isUnsigned()); 12135 auto TypeCmp = OtherPromotedTypeRange.compare(Value); 12136 if (auto TypeResult = PromotedRange::constantValue(E->getOpcode(), TypeCmp, 12137 RhsConstant)) { 12138 TautologicalTypeCompare = true; 12139 Cmp = TypeCmp; 12140 Result = TypeResult; 12141 } 12142 } 12143 12144 // Don't warn if the non-constant operand actually always evaluates to the 12145 // same value. 12146 if (!TautologicalTypeCompare && OtherValueRange.Width == 0) 12147 return false; 12148 12149 // Suppress the diagnostic for an in-range comparison if the constant comes 12150 // from a macro or enumerator. We don't want to diagnose 12151 // 12152 // some_long_value <= INT_MAX 12153 // 12154 // when sizeof(int) == sizeof(long). 12155 bool InRange = Cmp & PromotedRange::InRangeFlag; 12156 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 12157 return false; 12158 12159 // A comparison of an unsigned bit-field against 0 is really a type problem, 12160 // even though at the type level the bit-field might promote to 'signed int'. 12161 if (Other->refersToBitField() && InRange && Value == 0 && 12162 Other->getType()->isUnsignedIntegerOrEnumerationType()) 12163 TautologicalTypeCompare = true; 12164 12165 // If this is a comparison to an enum constant, include that 12166 // constant in the diagnostic. 12167 const EnumConstantDecl *ED = nullptr; 12168 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 12169 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 12170 12171 // Should be enough for uint128 (39 decimal digits) 12172 SmallString<64> PrettySourceValue; 12173 llvm::raw_svector_ostream OS(PrettySourceValue); 12174 if (ED) { 12175 OS << '\'' << *ED << "' (" << Value << ")"; 12176 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>( 12177 Constant->IgnoreParenImpCasts())) { 12178 OS << (BL->getValue() ? "YES" : "NO"); 12179 } else { 12180 OS << Value; 12181 } 12182 12183 if (!TautologicalTypeCompare) { 12184 S.Diag(E->getOperatorLoc(), diag::warn_tautological_compare_value_range) 12185 << RhsConstant << OtherValueRange.Width << OtherValueRange.NonNegative 12186 << E->getOpcodeStr() << OS.str() << *Result 12187 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 12188 return true; 12189 } 12190 12191 if (IsObjCSignedCharBool) { 12192 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 12193 S.PDiag(diag::warn_tautological_compare_objc_bool) 12194 << OS.str() << *Result); 12195 return true; 12196 } 12197 12198 // FIXME: We use a somewhat different formatting for the in-range cases and 12199 // cases involving boolean values for historical reasons. We should pick a 12200 // consistent way of presenting these diagnostics. 12201 if (!InRange || Other->isKnownToHaveBooleanValue()) { 12202 12203 S.DiagRuntimeBehavior( 12204 E->getOperatorLoc(), E, 12205 S.PDiag(!InRange ? diag::warn_out_of_range_compare 12206 : diag::warn_tautological_bool_compare) 12207 << OS.str() << classifyConstantValue(Constant) << OtherT 12208 << OtherIsBooleanDespiteType << *Result 12209 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 12210 } else { 12211 bool IsCharTy = OtherT.withoutLocalFastQualifiers() == S.Context.CharTy; 12212 unsigned Diag = 12213 (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 12214 ? (HasEnumType(OriginalOther) 12215 ? diag::warn_unsigned_enum_always_true_comparison 12216 : IsCharTy ? diag::warn_unsigned_char_always_true_comparison 12217 : diag::warn_unsigned_always_true_comparison) 12218 : diag::warn_tautological_constant_compare; 12219 12220 S.Diag(E->getOperatorLoc(), Diag) 12221 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 12222 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 12223 } 12224 12225 return true; 12226 } 12227 12228 /// Analyze the operands of the given comparison. Implements the 12229 /// fallback case from AnalyzeComparison. 12230 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 12231 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12232 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12233 } 12234 12235 /// Implements -Wsign-compare. 12236 /// 12237 /// \param E the binary operator to check for warnings 12238 static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 12239 // The type the comparison is being performed in. 12240 QualType T = E->getLHS()->getType(); 12241 12242 // Only analyze comparison operators where both sides have been converted to 12243 // the same type. 12244 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 12245 return AnalyzeImpConvsInComparison(S, E); 12246 12247 // Don't analyze value-dependent comparisons directly. 12248 if (E->isValueDependent()) 12249 return AnalyzeImpConvsInComparison(S, E); 12250 12251 Expr *LHS = E->getLHS(); 12252 Expr *RHS = E->getRHS(); 12253 12254 if (T->isIntegralType(S.Context)) { 12255 Optional<llvm::APSInt> RHSValue = RHS->getIntegerConstantExpr(S.Context); 12256 Optional<llvm::APSInt> LHSValue = LHS->getIntegerConstantExpr(S.Context); 12257 12258 // We don't care about expressions whose result is a constant. 12259 if (RHSValue && LHSValue) 12260 return AnalyzeImpConvsInComparison(S, E); 12261 12262 // We only care about expressions where just one side is literal 12263 if ((bool)RHSValue ^ (bool)LHSValue) { 12264 // Is the constant on the RHS or LHS? 12265 const bool RhsConstant = (bool)RHSValue; 12266 Expr *Const = RhsConstant ? RHS : LHS; 12267 Expr *Other = RhsConstant ? LHS : RHS; 12268 const llvm::APSInt &Value = RhsConstant ? *RHSValue : *LHSValue; 12269 12270 // Check whether an integer constant comparison results in a value 12271 // of 'true' or 'false'. 12272 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 12273 return AnalyzeImpConvsInComparison(S, E); 12274 } 12275 } 12276 12277 if (!T->hasUnsignedIntegerRepresentation()) { 12278 // We don't do anything special if this isn't an unsigned integral 12279 // comparison: we're only interested in integral comparisons, and 12280 // signed comparisons only happen in cases we don't care to warn about. 12281 return AnalyzeImpConvsInComparison(S, E); 12282 } 12283 12284 LHS = LHS->IgnoreParenImpCasts(); 12285 RHS = RHS->IgnoreParenImpCasts(); 12286 12287 if (!S.getLangOpts().CPlusPlus) { 12288 // Avoid warning about comparison of integers with different signs when 12289 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 12290 // the type of `E`. 12291 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 12292 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 12293 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 12294 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 12295 } 12296 12297 // Check to see if one of the (unmodified) operands is of different 12298 // signedness. 12299 Expr *signedOperand, *unsignedOperand; 12300 if (LHS->getType()->hasSignedIntegerRepresentation()) { 12301 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 12302 "unsigned comparison between two signed integer expressions?"); 12303 signedOperand = LHS; 12304 unsignedOperand = RHS; 12305 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 12306 signedOperand = RHS; 12307 unsignedOperand = LHS; 12308 } else { 12309 return AnalyzeImpConvsInComparison(S, E); 12310 } 12311 12312 // Otherwise, calculate the effective range of the signed operand. 12313 IntRange signedRange = GetExprRange( 12314 S.Context, signedOperand, S.isConstantEvaluated(), /*Approximate*/ true); 12315 12316 // Go ahead and analyze implicit conversions in the operands. Note 12317 // that we skip the implicit conversions on both sides. 12318 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 12319 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 12320 12321 // If the signed range is non-negative, -Wsign-compare won't fire. 12322 if (signedRange.NonNegative) 12323 return; 12324 12325 // For (in)equality comparisons, if the unsigned operand is a 12326 // constant which cannot collide with a overflowed signed operand, 12327 // then reinterpreting the signed operand as unsigned will not 12328 // change the result of the comparison. 12329 if (E->isEqualityOp()) { 12330 unsigned comparisonWidth = S.Context.getIntWidth(T); 12331 IntRange unsignedRange = 12332 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated(), 12333 /*Approximate*/ true); 12334 12335 // We should never be unable to prove that the unsigned operand is 12336 // non-negative. 12337 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 12338 12339 if (unsignedRange.Width < comparisonWidth) 12340 return; 12341 } 12342 12343 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 12344 S.PDiag(diag::warn_mixed_sign_comparison) 12345 << LHS->getType() << RHS->getType() 12346 << LHS->getSourceRange() << RHS->getSourceRange()); 12347 } 12348 12349 /// Analyzes an attempt to assign the given value to a bitfield. 12350 /// 12351 /// Returns true if there was something fishy about the attempt. 12352 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 12353 SourceLocation InitLoc) { 12354 assert(Bitfield->isBitField()); 12355 if (Bitfield->isInvalidDecl()) 12356 return false; 12357 12358 // White-list bool bitfields. 12359 QualType BitfieldType = Bitfield->getType(); 12360 if (BitfieldType->isBooleanType()) 12361 return false; 12362 12363 if (BitfieldType->isEnumeralType()) { 12364 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl(); 12365 // If the underlying enum type was not explicitly specified as an unsigned 12366 // type and the enum contain only positive values, MSVC++ will cause an 12367 // inconsistency by storing this as a signed type. 12368 if (S.getLangOpts().CPlusPlus11 && 12369 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 12370 BitfieldEnumDecl->getNumPositiveBits() > 0 && 12371 BitfieldEnumDecl->getNumNegativeBits() == 0) { 12372 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 12373 << BitfieldEnumDecl; 12374 } 12375 } 12376 12377 if (Bitfield->getType()->isBooleanType()) 12378 return false; 12379 12380 // Ignore value- or type-dependent expressions. 12381 if (Bitfield->getBitWidth()->isValueDependent() || 12382 Bitfield->getBitWidth()->isTypeDependent() || 12383 Init->isValueDependent() || 12384 Init->isTypeDependent()) 12385 return false; 12386 12387 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 12388 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 12389 12390 Expr::EvalResult Result; 12391 if (!OriginalInit->EvaluateAsInt(Result, S.Context, 12392 Expr::SE_AllowSideEffects)) { 12393 // The RHS is not constant. If the RHS has an enum type, make sure the 12394 // bitfield is wide enough to hold all the values of the enum without 12395 // truncation. 12396 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 12397 EnumDecl *ED = EnumTy->getDecl(); 12398 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 12399 12400 // Enum types are implicitly signed on Windows, so check if there are any 12401 // negative enumerators to see if the enum was intended to be signed or 12402 // not. 12403 bool SignedEnum = ED->getNumNegativeBits() > 0; 12404 12405 // Check for surprising sign changes when assigning enum values to a 12406 // bitfield of different signedness. If the bitfield is signed and we 12407 // have exactly the right number of bits to store this unsigned enum, 12408 // suggest changing the enum to an unsigned type. This typically happens 12409 // on Windows where unfixed enums always use an underlying type of 'int'. 12410 unsigned DiagID = 0; 12411 if (SignedEnum && !SignedBitfield) { 12412 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 12413 } else if (SignedBitfield && !SignedEnum && 12414 ED->getNumPositiveBits() == FieldWidth) { 12415 DiagID = diag::warn_signed_bitfield_enum_conversion; 12416 } 12417 12418 if (DiagID) { 12419 S.Diag(InitLoc, DiagID) << Bitfield << ED; 12420 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 12421 SourceRange TypeRange = 12422 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 12423 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 12424 << SignedEnum << TypeRange; 12425 } 12426 12427 // Compute the required bitwidth. If the enum has negative values, we need 12428 // one more bit than the normal number of positive bits to represent the 12429 // sign bit. 12430 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 12431 ED->getNumNegativeBits()) 12432 : ED->getNumPositiveBits(); 12433 12434 // Check the bitwidth. 12435 if (BitsNeeded > FieldWidth) { 12436 Expr *WidthExpr = Bitfield->getBitWidth(); 12437 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 12438 << Bitfield << ED; 12439 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 12440 << BitsNeeded << ED << WidthExpr->getSourceRange(); 12441 } 12442 } 12443 12444 return false; 12445 } 12446 12447 llvm::APSInt Value = Result.Val.getInt(); 12448 12449 unsigned OriginalWidth = Value.getBitWidth(); 12450 12451 if (!Value.isSigned() || Value.isNegative()) 12452 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 12453 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 12454 OriginalWidth = Value.getMinSignedBits(); 12455 12456 if (OriginalWidth <= FieldWidth) 12457 return false; 12458 12459 // Compute the value which the bitfield will contain. 12460 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 12461 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 12462 12463 // Check whether the stored value is equal to the original value. 12464 TruncatedValue = TruncatedValue.extend(OriginalWidth); 12465 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 12466 return false; 12467 12468 // Special-case bitfields of width 1: booleans are naturally 0/1, and 12469 // therefore don't strictly fit into a signed bitfield of width 1. 12470 if (FieldWidth == 1 && Value == 1) 12471 return false; 12472 12473 std::string PrettyValue = toString(Value, 10); 12474 std::string PrettyTrunc = toString(TruncatedValue, 10); 12475 12476 S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant) 12477 << PrettyValue << PrettyTrunc << OriginalInit->getType() 12478 << Init->getSourceRange(); 12479 12480 return true; 12481 } 12482 12483 /// Analyze the given simple or compound assignment for warning-worthy 12484 /// operations. 12485 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 12486 // Just recurse on the LHS. 12487 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12488 12489 // We want to recurse on the RHS as normal unless we're assigning to 12490 // a bitfield. 12491 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 12492 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 12493 E->getOperatorLoc())) { 12494 // Recurse, ignoring any implicit conversions on the RHS. 12495 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 12496 E->getOperatorLoc()); 12497 } 12498 } 12499 12500 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12501 12502 // Diagnose implicitly sequentially-consistent atomic assignment. 12503 if (E->getLHS()->getType()->isAtomicType()) 12504 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 12505 } 12506 12507 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 12508 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 12509 SourceLocation CContext, unsigned diag, 12510 bool pruneControlFlow = false) { 12511 if (pruneControlFlow) { 12512 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12513 S.PDiag(diag) 12514 << SourceType << T << E->getSourceRange() 12515 << SourceRange(CContext)); 12516 return; 12517 } 12518 S.Diag(E->getExprLoc(), diag) 12519 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 12520 } 12521 12522 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 12523 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 12524 SourceLocation CContext, 12525 unsigned diag, bool pruneControlFlow = false) { 12526 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 12527 } 12528 12529 static bool isObjCSignedCharBool(Sema &S, QualType Ty) { 12530 return Ty->isSpecificBuiltinType(BuiltinType::SChar) && 12531 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty); 12532 } 12533 12534 static void adornObjCBoolConversionDiagWithTernaryFixit( 12535 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) { 12536 Expr *Ignored = SourceExpr->IgnoreImplicit(); 12537 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored)) 12538 Ignored = OVE->getSourceExpr(); 12539 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) || 12540 isa<BinaryOperator>(Ignored) || 12541 isa<CXXOperatorCallExpr>(Ignored); 12542 SourceLocation EndLoc = S.getLocForEndOfToken(SourceExpr->getEndLoc()); 12543 if (NeedsParens) 12544 Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(") 12545 << FixItHint::CreateInsertion(EndLoc, ")"); 12546 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO"); 12547 } 12548 12549 /// Diagnose an implicit cast from a floating point value to an integer value. 12550 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 12551 SourceLocation CContext) { 12552 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 12553 const bool PruneWarnings = S.inTemplateInstantiation(); 12554 12555 Expr *InnerE = E->IgnoreParenImpCasts(); 12556 // We also want to warn on, e.g., "int i = -1.234" 12557 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 12558 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 12559 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 12560 12561 const bool IsLiteral = 12562 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 12563 12564 llvm::APFloat Value(0.0); 12565 bool IsConstant = 12566 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 12567 if (!IsConstant) { 12568 if (isObjCSignedCharBool(S, T)) { 12569 return adornObjCBoolConversionDiagWithTernaryFixit( 12570 S, E, 12571 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool) 12572 << E->getType()); 12573 } 12574 12575 return DiagnoseImpCast(S, E, T, CContext, 12576 diag::warn_impcast_float_integer, PruneWarnings); 12577 } 12578 12579 bool isExact = false; 12580 12581 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 12582 T->hasUnsignedIntegerRepresentation()); 12583 llvm::APFloat::opStatus Result = Value.convertToInteger( 12584 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 12585 12586 // FIXME: Force the precision of the source value down so we don't print 12587 // digits which are usually useless (we don't really care here if we 12588 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 12589 // would automatically print the shortest representation, but it's a bit 12590 // tricky to implement. 12591 SmallString<16> PrettySourceValue; 12592 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 12593 precision = (precision * 59 + 195) / 196; 12594 Value.toString(PrettySourceValue, precision); 12595 12596 if (isObjCSignedCharBool(S, T) && IntegerValue != 0 && IntegerValue != 1) { 12597 return adornObjCBoolConversionDiagWithTernaryFixit( 12598 S, E, 12599 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool) 12600 << PrettySourceValue); 12601 } 12602 12603 if (Result == llvm::APFloat::opOK && isExact) { 12604 if (IsLiteral) return; 12605 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 12606 PruneWarnings); 12607 } 12608 12609 // Conversion of a floating-point value to a non-bool integer where the 12610 // integral part cannot be represented by the integer type is undefined. 12611 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 12612 return DiagnoseImpCast( 12613 S, E, T, CContext, 12614 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 12615 : diag::warn_impcast_float_to_integer_out_of_range, 12616 PruneWarnings); 12617 12618 unsigned DiagID = 0; 12619 if (IsLiteral) { 12620 // Warn on floating point literal to integer. 12621 DiagID = diag::warn_impcast_literal_float_to_integer; 12622 } else if (IntegerValue == 0) { 12623 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 12624 return DiagnoseImpCast(S, E, T, CContext, 12625 diag::warn_impcast_float_integer, PruneWarnings); 12626 } 12627 // Warn on non-zero to zero conversion. 12628 DiagID = diag::warn_impcast_float_to_integer_zero; 12629 } else { 12630 if (IntegerValue.isUnsigned()) { 12631 if (!IntegerValue.isMaxValue()) { 12632 return DiagnoseImpCast(S, E, T, CContext, 12633 diag::warn_impcast_float_integer, PruneWarnings); 12634 } 12635 } else { // IntegerValue.isSigned() 12636 if (!IntegerValue.isMaxSignedValue() && 12637 !IntegerValue.isMinSignedValue()) { 12638 return DiagnoseImpCast(S, E, T, CContext, 12639 diag::warn_impcast_float_integer, PruneWarnings); 12640 } 12641 } 12642 // Warn on evaluatable floating point expression to integer conversion. 12643 DiagID = diag::warn_impcast_float_to_integer; 12644 } 12645 12646 SmallString<16> PrettyTargetValue; 12647 if (IsBool) 12648 PrettyTargetValue = Value.isZero() ? "false" : "true"; 12649 else 12650 IntegerValue.toString(PrettyTargetValue); 12651 12652 if (PruneWarnings) { 12653 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12654 S.PDiag(DiagID) 12655 << E->getType() << T.getUnqualifiedType() 12656 << PrettySourceValue << PrettyTargetValue 12657 << E->getSourceRange() << SourceRange(CContext)); 12658 } else { 12659 S.Diag(E->getExprLoc(), DiagID) 12660 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 12661 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 12662 } 12663 } 12664 12665 /// Analyze the given compound assignment for the possible losing of 12666 /// floating-point precision. 12667 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 12668 assert(isa<CompoundAssignOperator>(E) && 12669 "Must be compound assignment operation"); 12670 // Recurse on the LHS and RHS in here 12671 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12672 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12673 12674 if (E->getLHS()->getType()->isAtomicType()) 12675 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 12676 12677 // Now check the outermost expression 12678 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 12679 const auto *RBT = cast<CompoundAssignOperator>(E) 12680 ->getComputationResultType() 12681 ->getAs<BuiltinType>(); 12682 12683 // The below checks assume source is floating point. 12684 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 12685 12686 // If source is floating point but target is an integer. 12687 if (ResultBT->isInteger()) 12688 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(), 12689 E->getExprLoc(), diag::warn_impcast_float_integer); 12690 12691 if (!ResultBT->isFloatingPoint()) 12692 return; 12693 12694 // If both source and target are floating points, warn about losing precision. 12695 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 12696 QualType(ResultBT, 0), QualType(RBT, 0)); 12697 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 12698 // warn about dropping FP rank. 12699 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 12700 diag::warn_impcast_float_result_precision); 12701 } 12702 12703 static std::string PrettyPrintInRange(const llvm::APSInt &Value, 12704 IntRange Range) { 12705 if (!Range.Width) return "0"; 12706 12707 llvm::APSInt ValueInRange = Value; 12708 ValueInRange.setIsSigned(!Range.NonNegative); 12709 ValueInRange = ValueInRange.trunc(Range.Width); 12710 return toString(ValueInRange, 10); 12711 } 12712 12713 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 12714 if (!isa<ImplicitCastExpr>(Ex)) 12715 return false; 12716 12717 Expr *InnerE = Ex->IgnoreParenImpCasts(); 12718 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 12719 const Type *Source = 12720 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 12721 if (Target->isDependentType()) 12722 return false; 12723 12724 const BuiltinType *FloatCandidateBT = 12725 dyn_cast<BuiltinType>(ToBool ? Source : Target); 12726 const Type *BoolCandidateType = ToBool ? Target : Source; 12727 12728 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 12729 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 12730 } 12731 12732 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 12733 SourceLocation CC) { 12734 unsigned NumArgs = TheCall->getNumArgs(); 12735 for (unsigned i = 0; i < NumArgs; ++i) { 12736 Expr *CurrA = TheCall->getArg(i); 12737 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 12738 continue; 12739 12740 bool IsSwapped = ((i > 0) && 12741 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 12742 IsSwapped |= ((i < (NumArgs - 1)) && 12743 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 12744 if (IsSwapped) { 12745 // Warn on this floating-point to bool conversion. 12746 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 12747 CurrA->getType(), CC, 12748 diag::warn_impcast_floating_point_to_bool); 12749 } 12750 } 12751 } 12752 12753 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 12754 SourceLocation CC) { 12755 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 12756 E->getExprLoc())) 12757 return; 12758 12759 // Don't warn on functions which have return type nullptr_t. 12760 if (isa<CallExpr>(E)) 12761 return; 12762 12763 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 12764 const Expr::NullPointerConstantKind NullKind = 12765 E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull); 12766 if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr) 12767 return; 12768 12769 // Return if target type is a safe conversion. 12770 if (T->isAnyPointerType() || T->isBlockPointerType() || 12771 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 12772 return; 12773 12774 SourceLocation Loc = E->getSourceRange().getBegin(); 12775 12776 // Venture through the macro stacks to get to the source of macro arguments. 12777 // The new location is a better location than the complete location that was 12778 // passed in. 12779 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 12780 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 12781 12782 // __null is usually wrapped in a macro. Go up a macro if that is the case. 12783 if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) { 12784 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 12785 Loc, S.SourceMgr, S.getLangOpts()); 12786 if (MacroName == "NULL") 12787 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 12788 } 12789 12790 // Only warn if the null and context location are in the same macro expansion. 12791 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 12792 return; 12793 12794 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 12795 << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC) 12796 << FixItHint::CreateReplacement(Loc, 12797 S.getFixItZeroLiteralForType(T, Loc)); 12798 } 12799 12800 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 12801 ObjCArrayLiteral *ArrayLiteral); 12802 12803 static void 12804 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 12805 ObjCDictionaryLiteral *DictionaryLiteral); 12806 12807 /// Check a single element within a collection literal against the 12808 /// target element type. 12809 static void checkObjCCollectionLiteralElement(Sema &S, 12810 QualType TargetElementType, 12811 Expr *Element, 12812 unsigned ElementKind) { 12813 // Skip a bitcast to 'id' or qualified 'id'. 12814 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 12815 if (ICE->getCastKind() == CK_BitCast && 12816 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 12817 Element = ICE->getSubExpr(); 12818 } 12819 12820 QualType ElementType = Element->getType(); 12821 ExprResult ElementResult(Element); 12822 if (ElementType->getAs<ObjCObjectPointerType>() && 12823 S.CheckSingleAssignmentConstraints(TargetElementType, 12824 ElementResult, 12825 false, false) 12826 != Sema::Compatible) { 12827 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 12828 << ElementType << ElementKind << TargetElementType 12829 << Element->getSourceRange(); 12830 } 12831 12832 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 12833 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 12834 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 12835 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 12836 } 12837 12838 /// Check an Objective-C array literal being converted to the given 12839 /// target type. 12840 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 12841 ObjCArrayLiteral *ArrayLiteral) { 12842 if (!S.NSArrayDecl) 12843 return; 12844 12845 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 12846 if (!TargetObjCPtr) 12847 return; 12848 12849 if (TargetObjCPtr->isUnspecialized() || 12850 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 12851 != S.NSArrayDecl->getCanonicalDecl()) 12852 return; 12853 12854 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 12855 if (TypeArgs.size() != 1) 12856 return; 12857 12858 QualType TargetElementType = TypeArgs[0]; 12859 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 12860 checkObjCCollectionLiteralElement(S, TargetElementType, 12861 ArrayLiteral->getElement(I), 12862 0); 12863 } 12864 } 12865 12866 /// Check an Objective-C dictionary literal being converted to the given 12867 /// target type. 12868 static void 12869 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 12870 ObjCDictionaryLiteral *DictionaryLiteral) { 12871 if (!S.NSDictionaryDecl) 12872 return; 12873 12874 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 12875 if (!TargetObjCPtr) 12876 return; 12877 12878 if (TargetObjCPtr->isUnspecialized() || 12879 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 12880 != S.NSDictionaryDecl->getCanonicalDecl()) 12881 return; 12882 12883 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 12884 if (TypeArgs.size() != 2) 12885 return; 12886 12887 QualType TargetKeyType = TypeArgs[0]; 12888 QualType TargetObjectType = TypeArgs[1]; 12889 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 12890 auto Element = DictionaryLiteral->getKeyValueElement(I); 12891 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 12892 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 12893 } 12894 } 12895 12896 // Helper function to filter out cases for constant width constant conversion. 12897 // Don't warn on char array initialization or for non-decimal values. 12898 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 12899 SourceLocation CC) { 12900 // If initializing from a constant, and the constant starts with '0', 12901 // then it is a binary, octal, or hexadecimal. Allow these constants 12902 // to fill all the bits, even if there is a sign change. 12903 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 12904 const char FirstLiteralCharacter = 12905 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 12906 if (FirstLiteralCharacter == '0') 12907 return false; 12908 } 12909 12910 // If the CC location points to a '{', and the type is char, then assume 12911 // assume it is an array initialization. 12912 if (CC.isValid() && T->isCharType()) { 12913 const char FirstContextCharacter = 12914 S.getSourceManager().getCharacterData(CC)[0]; 12915 if (FirstContextCharacter == '{') 12916 return false; 12917 } 12918 12919 return true; 12920 } 12921 12922 static const IntegerLiteral *getIntegerLiteral(Expr *E) { 12923 const auto *IL = dyn_cast<IntegerLiteral>(E); 12924 if (!IL) { 12925 if (auto *UO = dyn_cast<UnaryOperator>(E)) { 12926 if (UO->getOpcode() == UO_Minus) 12927 return dyn_cast<IntegerLiteral>(UO->getSubExpr()); 12928 } 12929 } 12930 12931 return IL; 12932 } 12933 12934 static void DiagnoseIntInBoolContext(Sema &S, Expr *E) { 12935 E = E->IgnoreParenImpCasts(); 12936 SourceLocation ExprLoc = E->getExprLoc(); 12937 12938 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 12939 BinaryOperator::Opcode Opc = BO->getOpcode(); 12940 Expr::EvalResult Result; 12941 // Do not diagnose unsigned shifts. 12942 if (Opc == BO_Shl) { 12943 const auto *LHS = getIntegerLiteral(BO->getLHS()); 12944 const auto *RHS = getIntegerLiteral(BO->getRHS()); 12945 if (LHS && LHS->getValue() == 0) 12946 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0; 12947 else if (!E->isValueDependent() && LHS && RHS && 12948 RHS->getValue().isNonNegative() && 12949 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) 12950 S.Diag(ExprLoc, diag::warn_left_shift_always) 12951 << (Result.Val.getInt() != 0); 12952 else if (E->getType()->isSignedIntegerType()) 12953 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E; 12954 } 12955 } 12956 12957 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 12958 const auto *LHS = getIntegerLiteral(CO->getTrueExpr()); 12959 const auto *RHS = getIntegerLiteral(CO->getFalseExpr()); 12960 if (!LHS || !RHS) 12961 return; 12962 if ((LHS->getValue() == 0 || LHS->getValue() == 1) && 12963 (RHS->getValue() == 0 || RHS->getValue() == 1)) 12964 // Do not diagnose common idioms. 12965 return; 12966 if (LHS->getValue() != 0 && RHS->getValue() != 0) 12967 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true); 12968 } 12969 } 12970 12971 static void CheckImplicitConversion(Sema &S, Expr *E, QualType T, 12972 SourceLocation CC, 12973 bool *ICContext = nullptr, 12974 bool IsListInit = false) { 12975 if (E->isTypeDependent() || E->isValueDependent()) return; 12976 12977 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 12978 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 12979 if (Source == Target) return; 12980 if (Target->isDependentType()) return; 12981 12982 // If the conversion context location is invalid don't complain. We also 12983 // don't want to emit a warning if the issue occurs from the expansion of 12984 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 12985 // delay this check as long as possible. Once we detect we are in that 12986 // scenario, we just return. 12987 if (CC.isInvalid()) 12988 return; 12989 12990 if (Source->isAtomicType()) 12991 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 12992 12993 // Diagnose implicit casts to bool. 12994 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 12995 if (isa<StringLiteral>(E)) 12996 // Warn on string literal to bool. Checks for string literals in logical 12997 // and expressions, for instance, assert(0 && "error here"), are 12998 // prevented by a check in AnalyzeImplicitConversions(). 12999 return DiagnoseImpCast(S, E, T, CC, 13000 diag::warn_impcast_string_literal_to_bool); 13001 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 13002 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 13003 // This covers the literal expressions that evaluate to Objective-C 13004 // objects. 13005 return DiagnoseImpCast(S, E, T, CC, 13006 diag::warn_impcast_objective_c_literal_to_bool); 13007 } 13008 if (Source->isPointerType() || Source->canDecayToPointerType()) { 13009 // Warn on pointer to bool conversion that is always true. 13010 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 13011 SourceRange(CC)); 13012 } 13013 } 13014 13015 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL 13016 // is a typedef for signed char (macOS), then that constant value has to be 1 13017 // or 0. 13018 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) { 13019 Expr::EvalResult Result; 13020 if (E->EvaluateAsInt(Result, S.getASTContext(), 13021 Expr::SE_AllowSideEffects)) { 13022 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) { 13023 adornObjCBoolConversionDiagWithTernaryFixit( 13024 S, E, 13025 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool) 13026 << toString(Result.Val.getInt(), 10)); 13027 } 13028 return; 13029 } 13030 } 13031 13032 // Check implicit casts from Objective-C collection literals to specialized 13033 // collection types, e.g., NSArray<NSString *> *. 13034 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 13035 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 13036 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 13037 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 13038 13039 // Strip vector types. 13040 if (isa<VectorType>(Source)) { 13041 if (Target->isVLSTBuiltinType() && 13042 (S.Context.areCompatibleSveTypes(QualType(Target, 0), 13043 QualType(Source, 0)) || 13044 S.Context.areLaxCompatibleSveTypes(QualType(Target, 0), 13045 QualType(Source, 0)))) 13046 return; 13047 13048 if (!isa<VectorType>(Target)) { 13049 if (S.SourceMgr.isInSystemMacro(CC)) 13050 return; 13051 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 13052 } 13053 13054 // If the vector cast is cast between two vectors of the same size, it is 13055 // a bitcast, not a conversion. 13056 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 13057 return; 13058 13059 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 13060 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 13061 } 13062 if (auto VecTy = dyn_cast<VectorType>(Target)) 13063 Target = VecTy->getElementType().getTypePtr(); 13064 13065 // Strip complex types. 13066 if (isa<ComplexType>(Source)) { 13067 if (!isa<ComplexType>(Target)) { 13068 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 13069 return; 13070 13071 return DiagnoseImpCast(S, E, T, CC, 13072 S.getLangOpts().CPlusPlus 13073 ? diag::err_impcast_complex_scalar 13074 : diag::warn_impcast_complex_scalar); 13075 } 13076 13077 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 13078 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 13079 } 13080 13081 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 13082 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 13083 13084 // If the source is floating point... 13085 if (SourceBT && SourceBT->isFloatingPoint()) { 13086 // ...and the target is floating point... 13087 if (TargetBT && TargetBT->isFloatingPoint()) { 13088 // ...then warn if we're dropping FP rank. 13089 13090 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 13091 QualType(SourceBT, 0), QualType(TargetBT, 0)); 13092 if (Order > 0) { 13093 // Don't warn about float constants that are precisely 13094 // representable in the target type. 13095 Expr::EvalResult result; 13096 if (E->EvaluateAsRValue(result, S.Context)) { 13097 // Value might be a float, a float vector, or a float complex. 13098 if (IsSameFloatAfterCast(result.Val, 13099 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 13100 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 13101 return; 13102 } 13103 13104 if (S.SourceMgr.isInSystemMacro(CC)) 13105 return; 13106 13107 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 13108 } 13109 // ... or possibly if we're increasing rank, too 13110 else if (Order < 0) { 13111 if (S.SourceMgr.isInSystemMacro(CC)) 13112 return; 13113 13114 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 13115 } 13116 return; 13117 } 13118 13119 // If the target is integral, always warn. 13120 if (TargetBT && TargetBT->isInteger()) { 13121 if (S.SourceMgr.isInSystemMacro(CC)) 13122 return; 13123 13124 DiagnoseFloatingImpCast(S, E, T, CC); 13125 } 13126 13127 // Detect the case where a call result is converted from floating-point to 13128 // to bool, and the final argument to the call is converted from bool, to 13129 // discover this typo: 13130 // 13131 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 13132 // 13133 // FIXME: This is an incredibly special case; is there some more general 13134 // way to detect this class of misplaced-parentheses bug? 13135 if (Target->isBooleanType() && isa<CallExpr>(E)) { 13136 // Check last argument of function call to see if it is an 13137 // implicit cast from a type matching the type the result 13138 // is being cast to. 13139 CallExpr *CEx = cast<CallExpr>(E); 13140 if (unsigned NumArgs = CEx->getNumArgs()) { 13141 Expr *LastA = CEx->getArg(NumArgs - 1); 13142 Expr *InnerE = LastA->IgnoreParenImpCasts(); 13143 if (isa<ImplicitCastExpr>(LastA) && 13144 InnerE->getType()->isBooleanType()) { 13145 // Warn on this floating-point to bool conversion 13146 DiagnoseImpCast(S, E, T, CC, 13147 diag::warn_impcast_floating_point_to_bool); 13148 } 13149 } 13150 } 13151 return; 13152 } 13153 13154 // Valid casts involving fixed point types should be accounted for here. 13155 if (Source->isFixedPointType()) { 13156 if (Target->isUnsaturatedFixedPointType()) { 13157 Expr::EvalResult Result; 13158 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects, 13159 S.isConstantEvaluated())) { 13160 llvm::APFixedPoint Value = Result.Val.getFixedPoint(); 13161 llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(T); 13162 llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(T); 13163 if (Value > MaxVal || Value < MinVal) { 13164 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13165 S.PDiag(diag::warn_impcast_fixed_point_range) 13166 << Value.toString() << T 13167 << E->getSourceRange() 13168 << clang::SourceRange(CC)); 13169 return; 13170 } 13171 } 13172 } else if (Target->isIntegerType()) { 13173 Expr::EvalResult Result; 13174 if (!S.isConstantEvaluated() && 13175 E->EvaluateAsFixedPoint(Result, S.Context, 13176 Expr::SE_AllowSideEffects)) { 13177 llvm::APFixedPoint FXResult = Result.Val.getFixedPoint(); 13178 13179 bool Overflowed; 13180 llvm::APSInt IntResult = FXResult.convertToInt( 13181 S.Context.getIntWidth(T), 13182 Target->isSignedIntegerOrEnumerationType(), &Overflowed); 13183 13184 if (Overflowed) { 13185 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13186 S.PDiag(diag::warn_impcast_fixed_point_range) 13187 << FXResult.toString() << T 13188 << E->getSourceRange() 13189 << clang::SourceRange(CC)); 13190 return; 13191 } 13192 } 13193 } 13194 } else if (Target->isUnsaturatedFixedPointType()) { 13195 if (Source->isIntegerType()) { 13196 Expr::EvalResult Result; 13197 if (!S.isConstantEvaluated() && 13198 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 13199 llvm::APSInt Value = Result.Val.getInt(); 13200 13201 bool Overflowed; 13202 llvm::APFixedPoint IntResult = llvm::APFixedPoint::getFromIntValue( 13203 Value, S.Context.getFixedPointSemantics(T), &Overflowed); 13204 13205 if (Overflowed) { 13206 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13207 S.PDiag(diag::warn_impcast_fixed_point_range) 13208 << toString(Value, /*Radix=*/10) << T 13209 << E->getSourceRange() 13210 << clang::SourceRange(CC)); 13211 return; 13212 } 13213 } 13214 } 13215 } 13216 13217 // If we are casting an integer type to a floating point type without 13218 // initialization-list syntax, we might lose accuracy if the floating 13219 // point type has a narrower significand than the integer type. 13220 if (SourceBT && TargetBT && SourceBT->isIntegerType() && 13221 TargetBT->isFloatingType() && !IsListInit) { 13222 // Determine the number of precision bits in the source integer type. 13223 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated(), 13224 /*Approximate*/ true); 13225 unsigned int SourcePrecision = SourceRange.Width; 13226 13227 // Determine the number of precision bits in the 13228 // target floating point type. 13229 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision( 13230 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 13231 13232 if (SourcePrecision > 0 && TargetPrecision > 0 && 13233 SourcePrecision > TargetPrecision) { 13234 13235 if (Optional<llvm::APSInt> SourceInt = 13236 E->getIntegerConstantExpr(S.Context)) { 13237 // If the source integer is a constant, convert it to the target 13238 // floating point type. Issue a warning if the value changes 13239 // during the whole conversion. 13240 llvm::APFloat TargetFloatValue( 13241 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 13242 llvm::APFloat::opStatus ConversionStatus = 13243 TargetFloatValue.convertFromAPInt( 13244 *SourceInt, SourceBT->isSignedInteger(), 13245 llvm::APFloat::rmNearestTiesToEven); 13246 13247 if (ConversionStatus != llvm::APFloat::opOK) { 13248 SmallString<32> PrettySourceValue; 13249 SourceInt->toString(PrettySourceValue, 10); 13250 SmallString<32> PrettyTargetValue; 13251 TargetFloatValue.toString(PrettyTargetValue, TargetPrecision); 13252 13253 S.DiagRuntimeBehavior( 13254 E->getExprLoc(), E, 13255 S.PDiag(diag::warn_impcast_integer_float_precision_constant) 13256 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13257 << E->getSourceRange() << clang::SourceRange(CC)); 13258 } 13259 } else { 13260 // Otherwise, the implicit conversion may lose precision. 13261 DiagnoseImpCast(S, E, T, CC, 13262 diag::warn_impcast_integer_float_precision); 13263 } 13264 } 13265 } 13266 13267 DiagnoseNullConversion(S, E, T, CC); 13268 13269 S.DiscardMisalignedMemberAddress(Target, E); 13270 13271 if (Target->isBooleanType()) 13272 DiagnoseIntInBoolContext(S, E); 13273 13274 if (!Source->isIntegerType() || !Target->isIntegerType()) 13275 return; 13276 13277 // TODO: remove this early return once the false positives for constant->bool 13278 // in templates, macros, etc, are reduced or removed. 13279 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 13280 return; 13281 13282 if (isObjCSignedCharBool(S, T) && !Source->isCharType() && 13283 !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) { 13284 return adornObjCBoolConversionDiagWithTernaryFixit( 13285 S, E, 13286 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool) 13287 << E->getType()); 13288 } 13289 13290 IntRange SourceTypeRange = 13291 IntRange::forTargetOfCanonicalType(S.Context, Source); 13292 IntRange LikelySourceRange = 13293 GetExprRange(S.Context, E, S.isConstantEvaluated(), /*Approximate*/ true); 13294 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 13295 13296 if (LikelySourceRange.Width > TargetRange.Width) { 13297 // If the source is a constant, use a default-on diagnostic. 13298 // TODO: this should happen for bitfield stores, too. 13299 Expr::EvalResult Result; 13300 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects, 13301 S.isConstantEvaluated())) { 13302 llvm::APSInt Value(32); 13303 Value = Result.Val.getInt(); 13304 13305 if (S.SourceMgr.isInSystemMacro(CC)) 13306 return; 13307 13308 std::string PrettySourceValue = toString(Value, 10); 13309 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 13310 13311 S.DiagRuntimeBehavior( 13312 E->getExprLoc(), E, 13313 S.PDiag(diag::warn_impcast_integer_precision_constant) 13314 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13315 << E->getSourceRange() << SourceRange(CC)); 13316 return; 13317 } 13318 13319 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 13320 if (S.SourceMgr.isInSystemMacro(CC)) 13321 return; 13322 13323 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 13324 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 13325 /* pruneControlFlow */ true); 13326 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 13327 } 13328 13329 if (TargetRange.Width > SourceTypeRange.Width) { 13330 if (auto *UO = dyn_cast<UnaryOperator>(E)) 13331 if (UO->getOpcode() == UO_Minus) 13332 if (Source->isUnsignedIntegerType()) { 13333 if (Target->isUnsignedIntegerType()) 13334 return DiagnoseImpCast(S, E, T, CC, 13335 diag::warn_impcast_high_order_zero_bits); 13336 if (Target->isSignedIntegerType()) 13337 return DiagnoseImpCast(S, E, T, CC, 13338 diag::warn_impcast_nonnegative_result); 13339 } 13340 } 13341 13342 if (TargetRange.Width == LikelySourceRange.Width && 13343 !TargetRange.NonNegative && LikelySourceRange.NonNegative && 13344 Source->isSignedIntegerType()) { 13345 // Warn when doing a signed to signed conversion, warn if the positive 13346 // source value is exactly the width of the target type, which will 13347 // cause a negative value to be stored. 13348 13349 Expr::EvalResult Result; 13350 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) && 13351 !S.SourceMgr.isInSystemMacro(CC)) { 13352 llvm::APSInt Value = Result.Val.getInt(); 13353 if (isSameWidthConstantConversion(S, E, T, CC)) { 13354 std::string PrettySourceValue = toString(Value, 10); 13355 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 13356 13357 S.DiagRuntimeBehavior( 13358 E->getExprLoc(), E, 13359 S.PDiag(diag::warn_impcast_integer_precision_constant) 13360 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13361 << E->getSourceRange() << SourceRange(CC)); 13362 return; 13363 } 13364 } 13365 13366 // Fall through for non-constants to give a sign conversion warning. 13367 } 13368 13369 if ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) || 13370 (!TargetRange.NonNegative && LikelySourceRange.NonNegative && 13371 LikelySourceRange.Width == TargetRange.Width)) { 13372 if (S.SourceMgr.isInSystemMacro(CC)) 13373 return; 13374 13375 unsigned DiagID = diag::warn_impcast_integer_sign; 13376 13377 // Traditionally, gcc has warned about this under -Wsign-compare. 13378 // We also want to warn about it in -Wconversion. 13379 // So if -Wconversion is off, use a completely identical diagnostic 13380 // in the sign-compare group. 13381 // The conditional-checking code will 13382 if (ICContext) { 13383 DiagID = diag::warn_impcast_integer_sign_conditional; 13384 *ICContext = true; 13385 } 13386 13387 return DiagnoseImpCast(S, E, T, CC, DiagID); 13388 } 13389 13390 // Diagnose conversions between different enumeration types. 13391 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 13392 // type, to give us better diagnostics. 13393 QualType SourceType = E->getType(); 13394 if (!S.getLangOpts().CPlusPlus) { 13395 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 13396 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 13397 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 13398 SourceType = S.Context.getTypeDeclType(Enum); 13399 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 13400 } 13401 } 13402 13403 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 13404 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 13405 if (SourceEnum->getDecl()->hasNameForLinkage() && 13406 TargetEnum->getDecl()->hasNameForLinkage() && 13407 SourceEnum != TargetEnum) { 13408 if (S.SourceMgr.isInSystemMacro(CC)) 13409 return; 13410 13411 return DiagnoseImpCast(S, E, SourceType, T, CC, 13412 diag::warn_impcast_different_enum_types); 13413 } 13414 } 13415 13416 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 13417 SourceLocation CC, QualType T); 13418 13419 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 13420 SourceLocation CC, bool &ICContext) { 13421 E = E->IgnoreParenImpCasts(); 13422 13423 if (auto *CO = dyn_cast<AbstractConditionalOperator>(E)) 13424 return CheckConditionalOperator(S, CO, CC, T); 13425 13426 AnalyzeImplicitConversions(S, E, CC); 13427 if (E->getType() != T) 13428 return CheckImplicitConversion(S, E, T, CC, &ICContext); 13429 } 13430 13431 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 13432 SourceLocation CC, QualType T) { 13433 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 13434 13435 Expr *TrueExpr = E->getTrueExpr(); 13436 if (auto *BCO = dyn_cast<BinaryConditionalOperator>(E)) 13437 TrueExpr = BCO->getCommon(); 13438 13439 bool Suspicious = false; 13440 CheckConditionalOperand(S, TrueExpr, T, CC, Suspicious); 13441 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 13442 13443 if (T->isBooleanType()) 13444 DiagnoseIntInBoolContext(S, E); 13445 13446 // If -Wconversion would have warned about either of the candidates 13447 // for a signedness conversion to the context type... 13448 if (!Suspicious) return; 13449 13450 // ...but it's currently ignored... 13451 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 13452 return; 13453 13454 // ...then check whether it would have warned about either of the 13455 // candidates for a signedness conversion to the condition type. 13456 if (E->getType() == T) return; 13457 13458 Suspicious = false; 13459 CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(), 13460 E->getType(), CC, &Suspicious); 13461 if (!Suspicious) 13462 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 13463 E->getType(), CC, &Suspicious); 13464 } 13465 13466 /// Check conversion of given expression to boolean. 13467 /// Input argument E is a logical expression. 13468 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 13469 if (S.getLangOpts().Bool) 13470 return; 13471 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 13472 return; 13473 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 13474 } 13475 13476 namespace { 13477 struct AnalyzeImplicitConversionsWorkItem { 13478 Expr *E; 13479 SourceLocation CC; 13480 bool IsListInit; 13481 }; 13482 } 13483 13484 /// Data recursive variant of AnalyzeImplicitConversions. Subexpressions 13485 /// that should be visited are added to WorkList. 13486 static void AnalyzeImplicitConversions( 13487 Sema &S, AnalyzeImplicitConversionsWorkItem Item, 13488 llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) { 13489 Expr *OrigE = Item.E; 13490 SourceLocation CC = Item.CC; 13491 13492 QualType T = OrigE->getType(); 13493 Expr *E = OrigE->IgnoreParenImpCasts(); 13494 13495 // Propagate whether we are in a C++ list initialization expression. 13496 // If so, we do not issue warnings for implicit int-float conversion 13497 // precision loss, because C++11 narrowing already handles it. 13498 bool IsListInit = Item.IsListInit || 13499 (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus); 13500 13501 if (E->isTypeDependent() || E->isValueDependent()) 13502 return; 13503 13504 Expr *SourceExpr = E; 13505 // Examine, but don't traverse into the source expression of an 13506 // OpaqueValueExpr, since it may have multiple parents and we don't want to 13507 // emit duplicate diagnostics. Its fine to examine the form or attempt to 13508 // evaluate it in the context of checking the specific conversion to T though. 13509 if (auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 13510 if (auto *Src = OVE->getSourceExpr()) 13511 SourceExpr = Src; 13512 13513 if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr)) 13514 if (UO->getOpcode() == UO_Not && 13515 UO->getSubExpr()->isKnownToHaveBooleanValue()) 13516 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool) 13517 << OrigE->getSourceRange() << T->isBooleanType() 13518 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!"); 13519 13520 if (const auto *BO = dyn_cast<BinaryOperator>(SourceExpr)) 13521 if ((BO->getOpcode() == BO_And || BO->getOpcode() == BO_Or) && 13522 BO->getLHS()->isKnownToHaveBooleanValue() && 13523 BO->getRHS()->isKnownToHaveBooleanValue() && 13524 BO->getLHS()->HasSideEffects(S.Context) && 13525 BO->getRHS()->HasSideEffects(S.Context)) { 13526 S.Diag(BO->getBeginLoc(), diag::warn_bitwise_instead_of_logical) 13527 << (BO->getOpcode() == BO_And ? "&" : "|") << OrigE->getSourceRange() 13528 << FixItHint::CreateReplacement( 13529 BO->getOperatorLoc(), 13530 (BO->getOpcode() == BO_And ? "&&" : "||")); 13531 S.Diag(BO->getBeginLoc(), diag::note_cast_operand_to_int); 13532 } 13533 13534 // For conditional operators, we analyze the arguments as if they 13535 // were being fed directly into the output. 13536 if (auto *CO = dyn_cast<AbstractConditionalOperator>(SourceExpr)) { 13537 CheckConditionalOperator(S, CO, CC, T); 13538 return; 13539 } 13540 13541 // Check implicit argument conversions for function calls. 13542 if (CallExpr *Call = dyn_cast<CallExpr>(SourceExpr)) 13543 CheckImplicitArgumentConversions(S, Call, CC); 13544 13545 // Go ahead and check any implicit conversions we might have skipped. 13546 // The non-canonical typecheck is just an optimization; 13547 // CheckImplicitConversion will filter out dead implicit conversions. 13548 if (SourceExpr->getType() != T) 13549 CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit); 13550 13551 // Now continue drilling into this expression. 13552 13553 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 13554 // The bound subexpressions in a PseudoObjectExpr are not reachable 13555 // as transitive children. 13556 // FIXME: Use a more uniform representation for this. 13557 for (auto *SE : POE->semantics()) 13558 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 13559 WorkList.push_back({OVE->getSourceExpr(), CC, IsListInit}); 13560 } 13561 13562 // Skip past explicit casts. 13563 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 13564 E = CE->getSubExpr()->IgnoreParenImpCasts(); 13565 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 13566 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 13567 WorkList.push_back({E, CC, IsListInit}); 13568 return; 13569 } 13570 13571 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 13572 // Do a somewhat different check with comparison operators. 13573 if (BO->isComparisonOp()) 13574 return AnalyzeComparison(S, BO); 13575 13576 // And with simple assignments. 13577 if (BO->getOpcode() == BO_Assign) 13578 return AnalyzeAssignment(S, BO); 13579 // And with compound assignments. 13580 if (BO->isAssignmentOp()) 13581 return AnalyzeCompoundAssignment(S, BO); 13582 } 13583 13584 // These break the otherwise-useful invariant below. Fortunately, 13585 // we don't really need to recurse into them, because any internal 13586 // expressions should have been analyzed already when they were 13587 // built into statements. 13588 if (isa<StmtExpr>(E)) return; 13589 13590 // Don't descend into unevaluated contexts. 13591 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 13592 13593 // Now just recurse over the expression's children. 13594 CC = E->getExprLoc(); 13595 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 13596 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 13597 for (Stmt *SubStmt : E->children()) { 13598 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 13599 if (!ChildExpr) 13600 continue; 13601 13602 if (IsLogicalAndOperator && 13603 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 13604 // Ignore checking string literals that are in logical and operators. 13605 // This is a common pattern for asserts. 13606 continue; 13607 WorkList.push_back({ChildExpr, CC, IsListInit}); 13608 } 13609 13610 if (BO && BO->isLogicalOp()) { 13611 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 13612 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 13613 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 13614 13615 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 13616 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 13617 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 13618 } 13619 13620 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 13621 if (U->getOpcode() == UO_LNot) { 13622 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 13623 } else if (U->getOpcode() != UO_AddrOf) { 13624 if (U->getSubExpr()->getType()->isAtomicType()) 13625 S.Diag(U->getSubExpr()->getBeginLoc(), 13626 diag::warn_atomic_implicit_seq_cst); 13627 } 13628 } 13629 } 13630 13631 /// AnalyzeImplicitConversions - Find and report any interesting 13632 /// implicit conversions in the given expression. There are a couple 13633 /// of competing diagnostics here, -Wconversion and -Wsign-compare. 13634 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC, 13635 bool IsListInit/*= false*/) { 13636 llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList; 13637 WorkList.push_back({OrigE, CC, IsListInit}); 13638 while (!WorkList.empty()) 13639 AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList); 13640 } 13641 13642 /// Diagnose integer type and any valid implicit conversion to it. 13643 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 13644 // Taking into account implicit conversions, 13645 // allow any integer. 13646 if (!E->getType()->isIntegerType()) { 13647 S.Diag(E->getBeginLoc(), 13648 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 13649 return true; 13650 } 13651 // Potentially emit standard warnings for implicit conversions if enabled 13652 // using -Wconversion. 13653 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 13654 return false; 13655 } 13656 13657 // Helper function for Sema::DiagnoseAlwaysNonNullPointer. 13658 // Returns true when emitting a warning about taking the address of a reference. 13659 static bool CheckForReference(Sema &SemaRef, const Expr *E, 13660 const PartialDiagnostic &PD) { 13661 E = E->IgnoreParenImpCasts(); 13662 13663 const FunctionDecl *FD = nullptr; 13664 13665 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 13666 if (!DRE->getDecl()->getType()->isReferenceType()) 13667 return false; 13668 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 13669 if (!M->getMemberDecl()->getType()->isReferenceType()) 13670 return false; 13671 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 13672 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 13673 return false; 13674 FD = Call->getDirectCallee(); 13675 } else { 13676 return false; 13677 } 13678 13679 SemaRef.Diag(E->getExprLoc(), PD); 13680 13681 // If possible, point to location of function. 13682 if (FD) { 13683 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 13684 } 13685 13686 return true; 13687 } 13688 13689 // Returns true if the SourceLocation is expanded from any macro body. 13690 // Returns false if the SourceLocation is invalid, is from not in a macro 13691 // expansion, or is from expanded from a top-level macro argument. 13692 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 13693 if (Loc.isInvalid()) 13694 return false; 13695 13696 while (Loc.isMacroID()) { 13697 if (SM.isMacroBodyExpansion(Loc)) 13698 return true; 13699 Loc = SM.getImmediateMacroCallerLoc(Loc); 13700 } 13701 13702 return false; 13703 } 13704 13705 /// Diagnose pointers that are always non-null. 13706 /// \param E the expression containing the pointer 13707 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 13708 /// compared to a null pointer 13709 /// \param IsEqual True when the comparison is equal to a null pointer 13710 /// \param Range Extra SourceRange to highlight in the diagnostic 13711 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 13712 Expr::NullPointerConstantKind NullKind, 13713 bool IsEqual, SourceRange Range) { 13714 if (!E) 13715 return; 13716 13717 // Don't warn inside macros. 13718 if (E->getExprLoc().isMacroID()) { 13719 const SourceManager &SM = getSourceManager(); 13720 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 13721 IsInAnyMacroBody(SM, Range.getBegin())) 13722 return; 13723 } 13724 E = E->IgnoreImpCasts(); 13725 13726 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 13727 13728 if (isa<CXXThisExpr>(E)) { 13729 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 13730 : diag::warn_this_bool_conversion; 13731 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 13732 return; 13733 } 13734 13735 bool IsAddressOf = false; 13736 13737 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 13738 if (UO->getOpcode() != UO_AddrOf) 13739 return; 13740 IsAddressOf = true; 13741 E = UO->getSubExpr(); 13742 } 13743 13744 if (IsAddressOf) { 13745 unsigned DiagID = IsCompare 13746 ? diag::warn_address_of_reference_null_compare 13747 : diag::warn_address_of_reference_bool_conversion; 13748 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 13749 << IsEqual; 13750 if (CheckForReference(*this, E, PD)) { 13751 return; 13752 } 13753 } 13754 13755 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 13756 bool IsParam = isa<NonNullAttr>(NonnullAttr); 13757 std::string Str; 13758 llvm::raw_string_ostream S(Str); 13759 E->printPretty(S, nullptr, getPrintingPolicy()); 13760 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 13761 : diag::warn_cast_nonnull_to_bool; 13762 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 13763 << E->getSourceRange() << Range << IsEqual; 13764 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 13765 }; 13766 13767 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 13768 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 13769 if (auto *Callee = Call->getDirectCallee()) { 13770 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 13771 ComplainAboutNonnullParamOrCall(A); 13772 return; 13773 } 13774 } 13775 } 13776 13777 // Expect to find a single Decl. Skip anything more complicated. 13778 ValueDecl *D = nullptr; 13779 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 13780 D = R->getDecl(); 13781 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 13782 D = M->getMemberDecl(); 13783 } 13784 13785 // Weak Decls can be null. 13786 if (!D || D->isWeak()) 13787 return; 13788 13789 // Check for parameter decl with nonnull attribute 13790 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 13791 if (getCurFunction() && 13792 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 13793 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 13794 ComplainAboutNonnullParamOrCall(A); 13795 return; 13796 } 13797 13798 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 13799 // Skip function template not specialized yet. 13800 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 13801 return; 13802 auto ParamIter = llvm::find(FD->parameters(), PV); 13803 assert(ParamIter != FD->param_end()); 13804 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 13805 13806 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 13807 if (!NonNull->args_size()) { 13808 ComplainAboutNonnullParamOrCall(NonNull); 13809 return; 13810 } 13811 13812 for (const ParamIdx &ArgNo : NonNull->args()) { 13813 if (ArgNo.getASTIndex() == ParamNo) { 13814 ComplainAboutNonnullParamOrCall(NonNull); 13815 return; 13816 } 13817 } 13818 } 13819 } 13820 } 13821 } 13822 13823 QualType T = D->getType(); 13824 const bool IsArray = T->isArrayType(); 13825 const bool IsFunction = T->isFunctionType(); 13826 13827 // Address of function is used to silence the function warning. 13828 if (IsAddressOf && IsFunction) { 13829 return; 13830 } 13831 13832 // Found nothing. 13833 if (!IsAddressOf && !IsFunction && !IsArray) 13834 return; 13835 13836 // Pretty print the expression for the diagnostic. 13837 std::string Str; 13838 llvm::raw_string_ostream S(Str); 13839 E->printPretty(S, nullptr, getPrintingPolicy()); 13840 13841 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 13842 : diag::warn_impcast_pointer_to_bool; 13843 enum { 13844 AddressOf, 13845 FunctionPointer, 13846 ArrayPointer 13847 } DiagType; 13848 if (IsAddressOf) 13849 DiagType = AddressOf; 13850 else if (IsFunction) 13851 DiagType = FunctionPointer; 13852 else if (IsArray) 13853 DiagType = ArrayPointer; 13854 else 13855 llvm_unreachable("Could not determine diagnostic."); 13856 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 13857 << Range << IsEqual; 13858 13859 if (!IsFunction) 13860 return; 13861 13862 // Suggest '&' to silence the function warning. 13863 Diag(E->getExprLoc(), diag::note_function_warning_silence) 13864 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 13865 13866 // Check to see if '()' fixit should be emitted. 13867 QualType ReturnType; 13868 UnresolvedSet<4> NonTemplateOverloads; 13869 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 13870 if (ReturnType.isNull()) 13871 return; 13872 13873 if (IsCompare) { 13874 // There are two cases here. If there is null constant, the only suggest 13875 // for a pointer return type. If the null is 0, then suggest if the return 13876 // type is a pointer or an integer type. 13877 if (!ReturnType->isPointerType()) { 13878 if (NullKind == Expr::NPCK_ZeroExpression || 13879 NullKind == Expr::NPCK_ZeroLiteral) { 13880 if (!ReturnType->isIntegerType()) 13881 return; 13882 } else { 13883 return; 13884 } 13885 } 13886 } else { // !IsCompare 13887 // For function to bool, only suggest if the function pointer has bool 13888 // return type. 13889 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 13890 return; 13891 } 13892 Diag(E->getExprLoc(), diag::note_function_to_function_call) 13893 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 13894 } 13895 13896 /// Diagnoses "dangerous" implicit conversions within the given 13897 /// expression (which is a full expression). Implements -Wconversion 13898 /// and -Wsign-compare. 13899 /// 13900 /// \param CC the "context" location of the implicit conversion, i.e. 13901 /// the most location of the syntactic entity requiring the implicit 13902 /// conversion 13903 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 13904 // Don't diagnose in unevaluated contexts. 13905 if (isUnevaluatedContext()) 13906 return; 13907 13908 // Don't diagnose for value- or type-dependent expressions. 13909 if (E->isTypeDependent() || E->isValueDependent()) 13910 return; 13911 13912 // Check for array bounds violations in cases where the check isn't triggered 13913 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 13914 // ArraySubscriptExpr is on the RHS of a variable initialization. 13915 CheckArrayAccess(E); 13916 13917 // This is not the right CC for (e.g.) a variable initialization. 13918 AnalyzeImplicitConversions(*this, E, CC); 13919 } 13920 13921 /// CheckBoolLikeConversion - Check conversion of given expression to boolean. 13922 /// Input argument E is a logical expression. 13923 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 13924 ::CheckBoolLikeConversion(*this, E, CC); 13925 } 13926 13927 /// Diagnose when expression is an integer constant expression and its evaluation 13928 /// results in integer overflow 13929 void Sema::CheckForIntOverflow (Expr *E) { 13930 // Use a work list to deal with nested struct initializers. 13931 SmallVector<Expr *, 2> Exprs(1, E); 13932 13933 do { 13934 Expr *OriginalE = Exprs.pop_back_val(); 13935 Expr *E = OriginalE->IgnoreParenCasts(); 13936 13937 if (isa<BinaryOperator>(E)) { 13938 E->EvaluateForOverflow(Context); 13939 continue; 13940 } 13941 13942 if (auto InitList = dyn_cast<InitListExpr>(OriginalE)) 13943 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 13944 else if (isa<ObjCBoxedExpr>(OriginalE)) 13945 E->EvaluateForOverflow(Context); 13946 else if (auto Call = dyn_cast<CallExpr>(E)) 13947 Exprs.append(Call->arg_begin(), Call->arg_end()); 13948 else if (auto Message = dyn_cast<ObjCMessageExpr>(E)) 13949 Exprs.append(Message->arg_begin(), Message->arg_end()); 13950 } while (!Exprs.empty()); 13951 } 13952 13953 namespace { 13954 13955 /// Visitor for expressions which looks for unsequenced operations on the 13956 /// same object. 13957 class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> { 13958 using Base = ConstEvaluatedExprVisitor<SequenceChecker>; 13959 13960 /// A tree of sequenced regions within an expression. Two regions are 13961 /// unsequenced if one is an ancestor or a descendent of the other. When we 13962 /// finish processing an expression with sequencing, such as a comma 13963 /// expression, we fold its tree nodes into its parent, since they are 13964 /// unsequenced with respect to nodes we will visit later. 13965 class SequenceTree { 13966 struct Value { 13967 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 13968 unsigned Parent : 31; 13969 unsigned Merged : 1; 13970 }; 13971 SmallVector<Value, 8> Values; 13972 13973 public: 13974 /// A region within an expression which may be sequenced with respect 13975 /// to some other region. 13976 class Seq { 13977 friend class SequenceTree; 13978 13979 unsigned Index; 13980 13981 explicit Seq(unsigned N) : Index(N) {} 13982 13983 public: 13984 Seq() : Index(0) {} 13985 }; 13986 13987 SequenceTree() { Values.push_back(Value(0)); } 13988 Seq root() const { return Seq(0); } 13989 13990 /// Create a new sequence of operations, which is an unsequenced 13991 /// subset of \p Parent. This sequence of operations is sequenced with 13992 /// respect to other children of \p Parent. 13993 Seq allocate(Seq Parent) { 13994 Values.push_back(Value(Parent.Index)); 13995 return Seq(Values.size() - 1); 13996 } 13997 13998 /// Merge a sequence of operations into its parent. 13999 void merge(Seq S) { 14000 Values[S.Index].Merged = true; 14001 } 14002 14003 /// Determine whether two operations are unsequenced. This operation 14004 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 14005 /// should have been merged into its parent as appropriate. 14006 bool isUnsequenced(Seq Cur, Seq Old) { 14007 unsigned C = representative(Cur.Index); 14008 unsigned Target = representative(Old.Index); 14009 while (C >= Target) { 14010 if (C == Target) 14011 return true; 14012 C = Values[C].Parent; 14013 } 14014 return false; 14015 } 14016 14017 private: 14018 /// Pick a representative for a sequence. 14019 unsigned representative(unsigned K) { 14020 if (Values[K].Merged) 14021 // Perform path compression as we go. 14022 return Values[K].Parent = representative(Values[K].Parent); 14023 return K; 14024 } 14025 }; 14026 14027 /// An object for which we can track unsequenced uses. 14028 using Object = const NamedDecl *; 14029 14030 /// Different flavors of object usage which we track. We only track the 14031 /// least-sequenced usage of each kind. 14032 enum UsageKind { 14033 /// A read of an object. Multiple unsequenced reads are OK. 14034 UK_Use, 14035 14036 /// A modification of an object which is sequenced before the value 14037 /// computation of the expression, such as ++n in C++. 14038 UK_ModAsValue, 14039 14040 /// A modification of an object which is not sequenced before the value 14041 /// computation of the expression, such as n++. 14042 UK_ModAsSideEffect, 14043 14044 UK_Count = UK_ModAsSideEffect + 1 14045 }; 14046 14047 /// Bundle together a sequencing region and the expression corresponding 14048 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo. 14049 struct Usage { 14050 const Expr *UsageExpr; 14051 SequenceTree::Seq Seq; 14052 14053 Usage() : UsageExpr(nullptr) {} 14054 }; 14055 14056 struct UsageInfo { 14057 Usage Uses[UK_Count]; 14058 14059 /// Have we issued a diagnostic for this object already? 14060 bool Diagnosed; 14061 14062 UsageInfo() : Diagnosed(false) {} 14063 }; 14064 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 14065 14066 Sema &SemaRef; 14067 14068 /// Sequenced regions within the expression. 14069 SequenceTree Tree; 14070 14071 /// Declaration modifications and references which we have seen. 14072 UsageInfoMap UsageMap; 14073 14074 /// The region we are currently within. 14075 SequenceTree::Seq Region; 14076 14077 /// Filled in with declarations which were modified as a side-effect 14078 /// (that is, post-increment operations). 14079 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 14080 14081 /// Expressions to check later. We defer checking these to reduce 14082 /// stack usage. 14083 SmallVectorImpl<const Expr *> &WorkList; 14084 14085 /// RAII object wrapping the visitation of a sequenced subexpression of an 14086 /// expression. At the end of this process, the side-effects of the evaluation 14087 /// become sequenced with respect to the value computation of the result, so 14088 /// we downgrade any UK_ModAsSideEffect within the evaluation to 14089 /// UK_ModAsValue. 14090 struct SequencedSubexpression { 14091 SequencedSubexpression(SequenceChecker &Self) 14092 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 14093 Self.ModAsSideEffect = &ModAsSideEffect; 14094 } 14095 14096 ~SequencedSubexpression() { 14097 for (const std::pair<Object, Usage> &M : llvm::reverse(ModAsSideEffect)) { 14098 // Add a new usage with usage kind UK_ModAsValue, and then restore 14099 // the previous usage with UK_ModAsSideEffect (thus clearing it if 14100 // the previous one was empty). 14101 UsageInfo &UI = Self.UsageMap[M.first]; 14102 auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect]; 14103 Self.addUsage(M.first, UI, SideEffectUsage.UsageExpr, UK_ModAsValue); 14104 SideEffectUsage = M.second; 14105 } 14106 Self.ModAsSideEffect = OldModAsSideEffect; 14107 } 14108 14109 SequenceChecker &Self; 14110 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 14111 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 14112 }; 14113 14114 /// RAII object wrapping the visitation of a subexpression which we might 14115 /// choose to evaluate as a constant. If any subexpression is evaluated and 14116 /// found to be non-constant, this allows us to suppress the evaluation of 14117 /// the outer expression. 14118 class EvaluationTracker { 14119 public: 14120 EvaluationTracker(SequenceChecker &Self) 14121 : Self(Self), Prev(Self.EvalTracker) { 14122 Self.EvalTracker = this; 14123 } 14124 14125 ~EvaluationTracker() { 14126 Self.EvalTracker = Prev; 14127 if (Prev) 14128 Prev->EvalOK &= EvalOK; 14129 } 14130 14131 bool evaluate(const Expr *E, bool &Result) { 14132 if (!EvalOK || E->isValueDependent()) 14133 return false; 14134 EvalOK = E->EvaluateAsBooleanCondition( 14135 Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated()); 14136 return EvalOK; 14137 } 14138 14139 private: 14140 SequenceChecker &Self; 14141 EvaluationTracker *Prev; 14142 bool EvalOK = true; 14143 } *EvalTracker = nullptr; 14144 14145 /// Find the object which is produced by the specified expression, 14146 /// if any. 14147 Object getObject(const Expr *E, bool Mod) const { 14148 E = E->IgnoreParenCasts(); 14149 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 14150 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 14151 return getObject(UO->getSubExpr(), Mod); 14152 } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 14153 if (BO->getOpcode() == BO_Comma) 14154 return getObject(BO->getRHS(), Mod); 14155 if (Mod && BO->isAssignmentOp()) 14156 return getObject(BO->getLHS(), Mod); 14157 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 14158 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 14159 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 14160 return ME->getMemberDecl(); 14161 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 14162 // FIXME: If this is a reference, map through to its value. 14163 return DRE->getDecl(); 14164 return nullptr; 14165 } 14166 14167 /// Note that an object \p O was modified or used by an expression 14168 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for 14169 /// the object \p O as obtained via the \p UsageMap. 14170 void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) { 14171 // Get the old usage for the given object and usage kind. 14172 Usage &U = UI.Uses[UK]; 14173 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) { 14174 // If we have a modification as side effect and are in a sequenced 14175 // subexpression, save the old Usage so that we can restore it later 14176 // in SequencedSubexpression::~SequencedSubexpression. 14177 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 14178 ModAsSideEffect->push_back(std::make_pair(O, U)); 14179 // Then record the new usage with the current sequencing region. 14180 U.UsageExpr = UsageExpr; 14181 U.Seq = Region; 14182 } 14183 } 14184 14185 /// Check whether a modification or use of an object \p O in an expression 14186 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is 14187 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap. 14188 /// \p IsModMod is true when we are checking for a mod-mod unsequenced 14189 /// usage and false we are checking for a mod-use unsequenced usage. 14190 void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, 14191 UsageKind OtherKind, bool IsModMod) { 14192 if (UI.Diagnosed) 14193 return; 14194 14195 const Usage &U = UI.Uses[OtherKind]; 14196 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) 14197 return; 14198 14199 const Expr *Mod = U.UsageExpr; 14200 const Expr *ModOrUse = UsageExpr; 14201 if (OtherKind == UK_Use) 14202 std::swap(Mod, ModOrUse); 14203 14204 SemaRef.DiagRuntimeBehavior( 14205 Mod->getExprLoc(), {Mod, ModOrUse}, 14206 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod 14207 : diag::warn_unsequenced_mod_use) 14208 << O << SourceRange(ModOrUse->getExprLoc())); 14209 UI.Diagnosed = true; 14210 } 14211 14212 // A note on note{Pre, Post}{Use, Mod}: 14213 // 14214 // (It helps to follow the algorithm with an expression such as 14215 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced 14216 // operations before C++17 and both are well-defined in C++17). 14217 // 14218 // When visiting a node which uses/modify an object we first call notePreUse 14219 // or notePreMod before visiting its sub-expression(s). At this point the 14220 // children of the current node have not yet been visited and so the eventual 14221 // uses/modifications resulting from the children of the current node have not 14222 // been recorded yet. 14223 // 14224 // We then visit the children of the current node. After that notePostUse or 14225 // notePostMod is called. These will 1) detect an unsequenced modification 14226 // as side effect (as in "k++ + k") and 2) add a new usage with the 14227 // appropriate usage kind. 14228 // 14229 // We also have to be careful that some operation sequences modification as 14230 // side effect as well (for example: || or ,). To account for this we wrap 14231 // the visitation of such a sub-expression (for example: the LHS of || or ,) 14232 // with SequencedSubexpression. SequencedSubexpression is an RAII object 14233 // which record usages which are modifications as side effect, and then 14234 // downgrade them (or more accurately restore the previous usage which was a 14235 // modification as side effect) when exiting the scope of the sequenced 14236 // subexpression. 14237 14238 void notePreUse(Object O, const Expr *UseExpr) { 14239 UsageInfo &UI = UsageMap[O]; 14240 // Uses conflict with other modifications. 14241 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false); 14242 } 14243 14244 void notePostUse(Object O, const Expr *UseExpr) { 14245 UsageInfo &UI = UsageMap[O]; 14246 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsSideEffect, 14247 /*IsModMod=*/false); 14248 addUsage(O, UI, UseExpr, /*UsageKind=*/UK_Use); 14249 } 14250 14251 void notePreMod(Object O, const Expr *ModExpr) { 14252 UsageInfo &UI = UsageMap[O]; 14253 // Modifications conflict with other modifications and with uses. 14254 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true); 14255 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false); 14256 } 14257 14258 void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) { 14259 UsageInfo &UI = UsageMap[O]; 14260 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsSideEffect, 14261 /*IsModMod=*/true); 14262 addUsage(O, UI, ModExpr, /*UsageKind=*/UK); 14263 } 14264 14265 public: 14266 SequenceChecker(Sema &S, const Expr *E, 14267 SmallVectorImpl<const Expr *> &WorkList) 14268 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 14269 Visit(E); 14270 // Silence a -Wunused-private-field since WorkList is now unused. 14271 // TODO: Evaluate if it can be used, and if not remove it. 14272 (void)this->WorkList; 14273 } 14274 14275 void VisitStmt(const Stmt *S) { 14276 // Skip all statements which aren't expressions for now. 14277 } 14278 14279 void VisitExpr(const Expr *E) { 14280 // By default, just recurse to evaluated subexpressions. 14281 Base::VisitStmt(E); 14282 } 14283 14284 void VisitCastExpr(const CastExpr *E) { 14285 Object O = Object(); 14286 if (E->getCastKind() == CK_LValueToRValue) 14287 O = getObject(E->getSubExpr(), false); 14288 14289 if (O) 14290 notePreUse(O, E); 14291 VisitExpr(E); 14292 if (O) 14293 notePostUse(O, E); 14294 } 14295 14296 void VisitSequencedExpressions(const Expr *SequencedBefore, 14297 const Expr *SequencedAfter) { 14298 SequenceTree::Seq BeforeRegion = Tree.allocate(Region); 14299 SequenceTree::Seq AfterRegion = Tree.allocate(Region); 14300 SequenceTree::Seq OldRegion = Region; 14301 14302 { 14303 SequencedSubexpression SeqBefore(*this); 14304 Region = BeforeRegion; 14305 Visit(SequencedBefore); 14306 } 14307 14308 Region = AfterRegion; 14309 Visit(SequencedAfter); 14310 14311 Region = OldRegion; 14312 14313 Tree.merge(BeforeRegion); 14314 Tree.merge(AfterRegion); 14315 } 14316 14317 void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) { 14318 // C++17 [expr.sub]p1: 14319 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The 14320 // expression E1 is sequenced before the expression E2. 14321 if (SemaRef.getLangOpts().CPlusPlus17) 14322 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS()); 14323 else { 14324 Visit(ASE->getLHS()); 14325 Visit(ASE->getRHS()); 14326 } 14327 } 14328 14329 void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 14330 void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 14331 void VisitBinPtrMem(const BinaryOperator *BO) { 14332 // C++17 [expr.mptr.oper]p4: 14333 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...] 14334 // the expression E1 is sequenced before the expression E2. 14335 if (SemaRef.getLangOpts().CPlusPlus17) 14336 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14337 else { 14338 Visit(BO->getLHS()); 14339 Visit(BO->getRHS()); 14340 } 14341 } 14342 14343 void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); } 14344 void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); } 14345 void VisitBinShlShr(const BinaryOperator *BO) { 14346 // C++17 [expr.shift]p4: 14347 // The expression E1 is sequenced before the expression E2. 14348 if (SemaRef.getLangOpts().CPlusPlus17) 14349 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14350 else { 14351 Visit(BO->getLHS()); 14352 Visit(BO->getRHS()); 14353 } 14354 } 14355 14356 void VisitBinComma(const BinaryOperator *BO) { 14357 // C++11 [expr.comma]p1: 14358 // Every value computation and side effect associated with the left 14359 // expression is sequenced before every value computation and side 14360 // effect associated with the right expression. 14361 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14362 } 14363 14364 void VisitBinAssign(const BinaryOperator *BO) { 14365 SequenceTree::Seq RHSRegion; 14366 SequenceTree::Seq LHSRegion; 14367 if (SemaRef.getLangOpts().CPlusPlus17) { 14368 RHSRegion = Tree.allocate(Region); 14369 LHSRegion = Tree.allocate(Region); 14370 } else { 14371 RHSRegion = Region; 14372 LHSRegion = Region; 14373 } 14374 SequenceTree::Seq OldRegion = Region; 14375 14376 // C++11 [expr.ass]p1: 14377 // [...] the assignment is sequenced after the value computation 14378 // of the right and left operands, [...] 14379 // 14380 // so check it before inspecting the operands and update the 14381 // map afterwards. 14382 Object O = getObject(BO->getLHS(), /*Mod=*/true); 14383 if (O) 14384 notePreMod(O, BO); 14385 14386 if (SemaRef.getLangOpts().CPlusPlus17) { 14387 // C++17 [expr.ass]p1: 14388 // [...] The right operand is sequenced before the left operand. [...] 14389 { 14390 SequencedSubexpression SeqBefore(*this); 14391 Region = RHSRegion; 14392 Visit(BO->getRHS()); 14393 } 14394 14395 Region = LHSRegion; 14396 Visit(BO->getLHS()); 14397 14398 if (O && isa<CompoundAssignOperator>(BO)) 14399 notePostUse(O, BO); 14400 14401 } else { 14402 // C++11 does not specify any sequencing between the LHS and RHS. 14403 Region = LHSRegion; 14404 Visit(BO->getLHS()); 14405 14406 if (O && isa<CompoundAssignOperator>(BO)) 14407 notePostUse(O, BO); 14408 14409 Region = RHSRegion; 14410 Visit(BO->getRHS()); 14411 } 14412 14413 // C++11 [expr.ass]p1: 14414 // the assignment is sequenced [...] before the value computation of the 14415 // assignment expression. 14416 // C11 6.5.16/3 has no such rule. 14417 Region = OldRegion; 14418 if (O) 14419 notePostMod(O, BO, 14420 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 14421 : UK_ModAsSideEffect); 14422 if (SemaRef.getLangOpts().CPlusPlus17) { 14423 Tree.merge(RHSRegion); 14424 Tree.merge(LHSRegion); 14425 } 14426 } 14427 14428 void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) { 14429 VisitBinAssign(CAO); 14430 } 14431 14432 void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 14433 void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 14434 void VisitUnaryPreIncDec(const UnaryOperator *UO) { 14435 Object O = getObject(UO->getSubExpr(), true); 14436 if (!O) 14437 return VisitExpr(UO); 14438 14439 notePreMod(O, UO); 14440 Visit(UO->getSubExpr()); 14441 // C++11 [expr.pre.incr]p1: 14442 // the expression ++x is equivalent to x+=1 14443 notePostMod(O, UO, 14444 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 14445 : UK_ModAsSideEffect); 14446 } 14447 14448 void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 14449 void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 14450 void VisitUnaryPostIncDec(const UnaryOperator *UO) { 14451 Object O = getObject(UO->getSubExpr(), true); 14452 if (!O) 14453 return VisitExpr(UO); 14454 14455 notePreMod(O, UO); 14456 Visit(UO->getSubExpr()); 14457 notePostMod(O, UO, UK_ModAsSideEffect); 14458 } 14459 14460 void VisitBinLOr(const BinaryOperator *BO) { 14461 // C++11 [expr.log.or]p2: 14462 // If the second expression is evaluated, every value computation and 14463 // side effect associated with the first expression is sequenced before 14464 // every value computation and side effect associated with the 14465 // second expression. 14466 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 14467 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 14468 SequenceTree::Seq OldRegion = Region; 14469 14470 EvaluationTracker Eval(*this); 14471 { 14472 SequencedSubexpression Sequenced(*this); 14473 Region = LHSRegion; 14474 Visit(BO->getLHS()); 14475 } 14476 14477 // C++11 [expr.log.or]p1: 14478 // [...] the second operand is not evaluated if the first operand 14479 // evaluates to true. 14480 bool EvalResult = false; 14481 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 14482 bool ShouldVisitRHS = !EvalOK || (EvalOK && !EvalResult); 14483 if (ShouldVisitRHS) { 14484 Region = RHSRegion; 14485 Visit(BO->getRHS()); 14486 } 14487 14488 Region = OldRegion; 14489 Tree.merge(LHSRegion); 14490 Tree.merge(RHSRegion); 14491 } 14492 14493 void VisitBinLAnd(const BinaryOperator *BO) { 14494 // C++11 [expr.log.and]p2: 14495 // If the second expression is evaluated, every value computation and 14496 // side effect associated with the first expression is sequenced before 14497 // every value computation and side effect associated with the 14498 // second expression. 14499 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 14500 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 14501 SequenceTree::Seq OldRegion = Region; 14502 14503 EvaluationTracker Eval(*this); 14504 { 14505 SequencedSubexpression Sequenced(*this); 14506 Region = LHSRegion; 14507 Visit(BO->getLHS()); 14508 } 14509 14510 // C++11 [expr.log.and]p1: 14511 // [...] the second operand is not evaluated if the first operand is false. 14512 bool EvalResult = false; 14513 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 14514 bool ShouldVisitRHS = !EvalOK || (EvalOK && EvalResult); 14515 if (ShouldVisitRHS) { 14516 Region = RHSRegion; 14517 Visit(BO->getRHS()); 14518 } 14519 14520 Region = OldRegion; 14521 Tree.merge(LHSRegion); 14522 Tree.merge(RHSRegion); 14523 } 14524 14525 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) { 14526 // C++11 [expr.cond]p1: 14527 // [...] Every value computation and side effect associated with the first 14528 // expression is sequenced before every value computation and side effect 14529 // associated with the second or third expression. 14530 SequenceTree::Seq ConditionRegion = Tree.allocate(Region); 14531 14532 // No sequencing is specified between the true and false expression. 14533 // However since exactly one of both is going to be evaluated we can 14534 // consider them to be sequenced. This is needed to avoid warning on 14535 // something like "x ? y+= 1 : y += 2;" in the case where we will visit 14536 // both the true and false expressions because we can't evaluate x. 14537 // This will still allow us to detect an expression like (pre C++17) 14538 // "(x ? y += 1 : y += 2) = y". 14539 // 14540 // We don't wrap the visitation of the true and false expression with 14541 // SequencedSubexpression because we don't want to downgrade modifications 14542 // as side effect in the true and false expressions after the visition 14543 // is done. (for example in the expression "(x ? y++ : y++) + y" we should 14544 // not warn between the two "y++", but we should warn between the "y++" 14545 // and the "y". 14546 SequenceTree::Seq TrueRegion = Tree.allocate(Region); 14547 SequenceTree::Seq FalseRegion = Tree.allocate(Region); 14548 SequenceTree::Seq OldRegion = Region; 14549 14550 EvaluationTracker Eval(*this); 14551 { 14552 SequencedSubexpression Sequenced(*this); 14553 Region = ConditionRegion; 14554 Visit(CO->getCond()); 14555 } 14556 14557 // C++11 [expr.cond]p1: 14558 // [...] The first expression is contextually converted to bool (Clause 4). 14559 // It is evaluated and if it is true, the result of the conditional 14560 // expression is the value of the second expression, otherwise that of the 14561 // third expression. Only one of the second and third expressions is 14562 // evaluated. [...] 14563 bool EvalResult = false; 14564 bool EvalOK = Eval.evaluate(CO->getCond(), EvalResult); 14565 bool ShouldVisitTrueExpr = !EvalOK || (EvalOK && EvalResult); 14566 bool ShouldVisitFalseExpr = !EvalOK || (EvalOK && !EvalResult); 14567 if (ShouldVisitTrueExpr) { 14568 Region = TrueRegion; 14569 Visit(CO->getTrueExpr()); 14570 } 14571 if (ShouldVisitFalseExpr) { 14572 Region = FalseRegion; 14573 Visit(CO->getFalseExpr()); 14574 } 14575 14576 Region = OldRegion; 14577 Tree.merge(ConditionRegion); 14578 Tree.merge(TrueRegion); 14579 Tree.merge(FalseRegion); 14580 } 14581 14582 void VisitCallExpr(const CallExpr *CE) { 14583 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 14584 14585 if (CE->isUnevaluatedBuiltinCall(Context)) 14586 return; 14587 14588 // C++11 [intro.execution]p15: 14589 // When calling a function [...], every value computation and side effect 14590 // associated with any argument expression, or with the postfix expression 14591 // designating the called function, is sequenced before execution of every 14592 // expression or statement in the body of the function [and thus before 14593 // the value computation of its result]. 14594 SequencedSubexpression Sequenced(*this); 14595 SemaRef.runWithSufficientStackSpace(CE->getExprLoc(), [&] { 14596 // C++17 [expr.call]p5 14597 // The postfix-expression is sequenced before each expression in the 14598 // expression-list and any default argument. [...] 14599 SequenceTree::Seq CalleeRegion; 14600 SequenceTree::Seq OtherRegion; 14601 if (SemaRef.getLangOpts().CPlusPlus17) { 14602 CalleeRegion = Tree.allocate(Region); 14603 OtherRegion = Tree.allocate(Region); 14604 } else { 14605 CalleeRegion = Region; 14606 OtherRegion = Region; 14607 } 14608 SequenceTree::Seq OldRegion = Region; 14609 14610 // Visit the callee expression first. 14611 Region = CalleeRegion; 14612 if (SemaRef.getLangOpts().CPlusPlus17) { 14613 SequencedSubexpression Sequenced(*this); 14614 Visit(CE->getCallee()); 14615 } else { 14616 Visit(CE->getCallee()); 14617 } 14618 14619 // Then visit the argument expressions. 14620 Region = OtherRegion; 14621 for (const Expr *Argument : CE->arguments()) 14622 Visit(Argument); 14623 14624 Region = OldRegion; 14625 if (SemaRef.getLangOpts().CPlusPlus17) { 14626 Tree.merge(CalleeRegion); 14627 Tree.merge(OtherRegion); 14628 } 14629 }); 14630 } 14631 14632 void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) { 14633 // C++17 [over.match.oper]p2: 14634 // [...] the operator notation is first transformed to the equivalent 14635 // function-call notation as summarized in Table 12 (where @ denotes one 14636 // of the operators covered in the specified subclause). However, the 14637 // operands are sequenced in the order prescribed for the built-in 14638 // operator (Clause 8). 14639 // 14640 // From the above only overloaded binary operators and overloaded call 14641 // operators have sequencing rules in C++17 that we need to handle 14642 // separately. 14643 if (!SemaRef.getLangOpts().CPlusPlus17 || 14644 (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call)) 14645 return VisitCallExpr(CXXOCE); 14646 14647 enum { 14648 NoSequencing, 14649 LHSBeforeRHS, 14650 RHSBeforeLHS, 14651 LHSBeforeRest 14652 } SequencingKind; 14653 switch (CXXOCE->getOperator()) { 14654 case OO_Equal: 14655 case OO_PlusEqual: 14656 case OO_MinusEqual: 14657 case OO_StarEqual: 14658 case OO_SlashEqual: 14659 case OO_PercentEqual: 14660 case OO_CaretEqual: 14661 case OO_AmpEqual: 14662 case OO_PipeEqual: 14663 case OO_LessLessEqual: 14664 case OO_GreaterGreaterEqual: 14665 SequencingKind = RHSBeforeLHS; 14666 break; 14667 14668 case OO_LessLess: 14669 case OO_GreaterGreater: 14670 case OO_AmpAmp: 14671 case OO_PipePipe: 14672 case OO_Comma: 14673 case OO_ArrowStar: 14674 case OO_Subscript: 14675 SequencingKind = LHSBeforeRHS; 14676 break; 14677 14678 case OO_Call: 14679 SequencingKind = LHSBeforeRest; 14680 break; 14681 14682 default: 14683 SequencingKind = NoSequencing; 14684 break; 14685 } 14686 14687 if (SequencingKind == NoSequencing) 14688 return VisitCallExpr(CXXOCE); 14689 14690 // This is a call, so all subexpressions are sequenced before the result. 14691 SequencedSubexpression Sequenced(*this); 14692 14693 SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] { 14694 assert(SemaRef.getLangOpts().CPlusPlus17 && 14695 "Should only get there with C++17 and above!"); 14696 assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) && 14697 "Should only get there with an overloaded binary operator" 14698 " or an overloaded call operator!"); 14699 14700 if (SequencingKind == LHSBeforeRest) { 14701 assert(CXXOCE->getOperator() == OO_Call && 14702 "We should only have an overloaded call operator here!"); 14703 14704 // This is very similar to VisitCallExpr, except that we only have the 14705 // C++17 case. The postfix-expression is the first argument of the 14706 // CXXOperatorCallExpr. The expressions in the expression-list, if any, 14707 // are in the following arguments. 14708 // 14709 // Note that we intentionally do not visit the callee expression since 14710 // it is just a decayed reference to a function. 14711 SequenceTree::Seq PostfixExprRegion = Tree.allocate(Region); 14712 SequenceTree::Seq ArgsRegion = Tree.allocate(Region); 14713 SequenceTree::Seq OldRegion = Region; 14714 14715 assert(CXXOCE->getNumArgs() >= 1 && 14716 "An overloaded call operator must have at least one argument" 14717 " for the postfix-expression!"); 14718 const Expr *PostfixExpr = CXXOCE->getArgs()[0]; 14719 llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1, 14720 CXXOCE->getNumArgs() - 1); 14721 14722 // Visit the postfix-expression first. 14723 { 14724 Region = PostfixExprRegion; 14725 SequencedSubexpression Sequenced(*this); 14726 Visit(PostfixExpr); 14727 } 14728 14729 // Then visit the argument expressions. 14730 Region = ArgsRegion; 14731 for (const Expr *Arg : Args) 14732 Visit(Arg); 14733 14734 Region = OldRegion; 14735 Tree.merge(PostfixExprRegion); 14736 Tree.merge(ArgsRegion); 14737 } else { 14738 assert(CXXOCE->getNumArgs() == 2 && 14739 "Should only have two arguments here!"); 14740 assert((SequencingKind == LHSBeforeRHS || 14741 SequencingKind == RHSBeforeLHS) && 14742 "Unexpected sequencing kind!"); 14743 14744 // We do not visit the callee expression since it is just a decayed 14745 // reference to a function. 14746 const Expr *E1 = CXXOCE->getArg(0); 14747 const Expr *E2 = CXXOCE->getArg(1); 14748 if (SequencingKind == RHSBeforeLHS) 14749 std::swap(E1, E2); 14750 14751 return VisitSequencedExpressions(E1, E2); 14752 } 14753 }); 14754 } 14755 14756 void VisitCXXConstructExpr(const CXXConstructExpr *CCE) { 14757 // This is a call, so all subexpressions are sequenced before the result. 14758 SequencedSubexpression Sequenced(*this); 14759 14760 if (!CCE->isListInitialization()) 14761 return VisitExpr(CCE); 14762 14763 // In C++11, list initializations are sequenced. 14764 SmallVector<SequenceTree::Seq, 32> Elts; 14765 SequenceTree::Seq Parent = Region; 14766 for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(), 14767 E = CCE->arg_end(); 14768 I != E; ++I) { 14769 Region = Tree.allocate(Parent); 14770 Elts.push_back(Region); 14771 Visit(*I); 14772 } 14773 14774 // Forget that the initializers are sequenced. 14775 Region = Parent; 14776 for (unsigned I = 0; I < Elts.size(); ++I) 14777 Tree.merge(Elts[I]); 14778 } 14779 14780 void VisitInitListExpr(const InitListExpr *ILE) { 14781 if (!SemaRef.getLangOpts().CPlusPlus11) 14782 return VisitExpr(ILE); 14783 14784 // In C++11, list initializations are sequenced. 14785 SmallVector<SequenceTree::Seq, 32> Elts; 14786 SequenceTree::Seq Parent = Region; 14787 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 14788 const Expr *E = ILE->getInit(I); 14789 if (!E) 14790 continue; 14791 Region = Tree.allocate(Parent); 14792 Elts.push_back(Region); 14793 Visit(E); 14794 } 14795 14796 // Forget that the initializers are sequenced. 14797 Region = Parent; 14798 for (unsigned I = 0; I < Elts.size(); ++I) 14799 Tree.merge(Elts[I]); 14800 } 14801 }; 14802 14803 } // namespace 14804 14805 void Sema::CheckUnsequencedOperations(const Expr *E) { 14806 SmallVector<const Expr *, 8> WorkList; 14807 WorkList.push_back(E); 14808 while (!WorkList.empty()) { 14809 const Expr *Item = WorkList.pop_back_val(); 14810 SequenceChecker(*this, Item, WorkList); 14811 } 14812 } 14813 14814 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 14815 bool IsConstexpr) { 14816 llvm::SaveAndRestore<bool> ConstantContext( 14817 isConstantEvaluatedOverride, IsConstexpr || isa<ConstantExpr>(E)); 14818 CheckImplicitConversions(E, CheckLoc); 14819 if (!E->isInstantiationDependent()) 14820 CheckUnsequencedOperations(E); 14821 if (!IsConstexpr && !E->isValueDependent()) 14822 CheckForIntOverflow(E); 14823 DiagnoseMisalignedMembers(); 14824 } 14825 14826 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 14827 FieldDecl *BitField, 14828 Expr *Init) { 14829 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 14830 } 14831 14832 static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 14833 SourceLocation Loc) { 14834 if (!PType->isVariablyModifiedType()) 14835 return; 14836 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 14837 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 14838 return; 14839 } 14840 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 14841 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 14842 return; 14843 } 14844 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 14845 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 14846 return; 14847 } 14848 14849 const ArrayType *AT = S.Context.getAsArrayType(PType); 14850 if (!AT) 14851 return; 14852 14853 if (AT->getSizeModifier() != ArrayType::Star) { 14854 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 14855 return; 14856 } 14857 14858 S.Diag(Loc, diag::err_array_star_in_function_definition); 14859 } 14860 14861 /// CheckParmsForFunctionDef - Check that the parameters of the given 14862 /// function are appropriate for the definition of a function. This 14863 /// takes care of any checks that cannot be performed on the 14864 /// declaration itself, e.g., that the types of each of the function 14865 /// parameters are complete. 14866 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 14867 bool CheckParameterNames) { 14868 bool HasInvalidParm = false; 14869 for (ParmVarDecl *Param : Parameters) { 14870 // C99 6.7.5.3p4: the parameters in a parameter type list in a 14871 // function declarator that is part of a function definition of 14872 // that function shall not have incomplete type. 14873 // 14874 // This is also C++ [dcl.fct]p6. 14875 if (!Param->isInvalidDecl() && 14876 RequireCompleteType(Param->getLocation(), Param->getType(), 14877 diag::err_typecheck_decl_incomplete_type)) { 14878 Param->setInvalidDecl(); 14879 HasInvalidParm = true; 14880 } 14881 14882 // C99 6.9.1p5: If the declarator includes a parameter type list, the 14883 // declaration of each parameter shall include an identifier. 14884 if (CheckParameterNames && Param->getIdentifier() == nullptr && 14885 !Param->isImplicit() && !getLangOpts().CPlusPlus) { 14886 // Diagnose this as an extension in C17 and earlier. 14887 if (!getLangOpts().C2x) 14888 Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x); 14889 } 14890 14891 // C99 6.7.5.3p12: 14892 // If the function declarator is not part of a definition of that 14893 // function, parameters may have incomplete type and may use the [*] 14894 // notation in their sequences of declarator specifiers to specify 14895 // variable length array types. 14896 QualType PType = Param->getOriginalType(); 14897 // FIXME: This diagnostic should point the '[*]' if source-location 14898 // information is added for it. 14899 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 14900 14901 // If the parameter is a c++ class type and it has to be destructed in the 14902 // callee function, declare the destructor so that it can be called by the 14903 // callee function. Do not perform any direct access check on the dtor here. 14904 if (!Param->isInvalidDecl()) { 14905 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 14906 if (!ClassDecl->isInvalidDecl() && 14907 !ClassDecl->hasIrrelevantDestructor() && 14908 !ClassDecl->isDependentContext() && 14909 ClassDecl->isParamDestroyedInCallee()) { 14910 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 14911 MarkFunctionReferenced(Param->getLocation(), Destructor); 14912 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 14913 } 14914 } 14915 } 14916 14917 // Parameters with the pass_object_size attribute only need to be marked 14918 // constant at function definitions. Because we lack information about 14919 // whether we're on a declaration or definition when we're instantiating the 14920 // attribute, we need to check for constness here. 14921 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 14922 if (!Param->getType().isConstQualified()) 14923 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 14924 << Attr->getSpelling() << 1; 14925 14926 // Check for parameter names shadowing fields from the class. 14927 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) { 14928 // The owning context for the parameter should be the function, but we 14929 // want to see if this function's declaration context is a record. 14930 DeclContext *DC = Param->getDeclContext(); 14931 if (DC && DC->isFunctionOrMethod()) { 14932 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent())) 14933 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(), 14934 RD, /*DeclIsField*/ false); 14935 } 14936 } 14937 } 14938 14939 return HasInvalidParm; 14940 } 14941 14942 Optional<std::pair<CharUnits, CharUnits>> 14943 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx); 14944 14945 /// Compute the alignment and offset of the base class object given the 14946 /// derived-to-base cast expression and the alignment and offset of the derived 14947 /// class object. 14948 static std::pair<CharUnits, CharUnits> 14949 getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType, 14950 CharUnits BaseAlignment, CharUnits Offset, 14951 ASTContext &Ctx) { 14952 for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE; 14953 ++PathI) { 14954 const CXXBaseSpecifier *Base = *PathI; 14955 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl(); 14956 if (Base->isVirtual()) { 14957 // The complete object may have a lower alignment than the non-virtual 14958 // alignment of the base, in which case the base may be misaligned. Choose 14959 // the smaller of the non-virtual alignment and BaseAlignment, which is a 14960 // conservative lower bound of the complete object alignment. 14961 CharUnits NonVirtualAlignment = 14962 Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment(); 14963 BaseAlignment = std::min(BaseAlignment, NonVirtualAlignment); 14964 Offset = CharUnits::Zero(); 14965 } else { 14966 const ASTRecordLayout &RL = 14967 Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl()); 14968 Offset += RL.getBaseClassOffset(BaseDecl); 14969 } 14970 DerivedType = Base->getType(); 14971 } 14972 14973 return std::make_pair(BaseAlignment, Offset); 14974 } 14975 14976 /// Compute the alignment and offset of a binary additive operator. 14977 static Optional<std::pair<CharUnits, CharUnits>> 14978 getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE, 14979 bool IsSub, ASTContext &Ctx) { 14980 QualType PointeeType = PtrE->getType()->getPointeeType(); 14981 14982 if (!PointeeType->isConstantSizeType()) 14983 return llvm::None; 14984 14985 auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx); 14986 14987 if (!P) 14988 return llvm::None; 14989 14990 CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType); 14991 if (Optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) { 14992 CharUnits Offset = EltSize * IdxRes->getExtValue(); 14993 if (IsSub) 14994 Offset = -Offset; 14995 return std::make_pair(P->first, P->second + Offset); 14996 } 14997 14998 // If the integer expression isn't a constant expression, compute the lower 14999 // bound of the alignment using the alignment and offset of the pointer 15000 // expression and the element size. 15001 return std::make_pair( 15002 P->first.alignmentAtOffset(P->second).alignmentAtOffset(EltSize), 15003 CharUnits::Zero()); 15004 } 15005 15006 /// This helper function takes an lvalue expression and returns the alignment of 15007 /// a VarDecl and a constant offset from the VarDecl. 15008 Optional<std::pair<CharUnits, CharUnits>> 15009 static getBaseAlignmentAndOffsetFromLValue(const Expr *E, ASTContext &Ctx) { 15010 E = E->IgnoreParens(); 15011 switch (E->getStmtClass()) { 15012 default: 15013 break; 15014 case Stmt::CStyleCastExprClass: 15015 case Stmt::CXXStaticCastExprClass: 15016 case Stmt::ImplicitCastExprClass: { 15017 auto *CE = cast<CastExpr>(E); 15018 const Expr *From = CE->getSubExpr(); 15019 switch (CE->getCastKind()) { 15020 default: 15021 break; 15022 case CK_NoOp: 15023 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15024 case CK_UncheckedDerivedToBase: 15025 case CK_DerivedToBase: { 15026 auto P = getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15027 if (!P) 15028 break; 15029 return getDerivedToBaseAlignmentAndOffset(CE, From->getType(), P->first, 15030 P->second, Ctx); 15031 } 15032 } 15033 break; 15034 } 15035 case Stmt::ArraySubscriptExprClass: { 15036 auto *ASE = cast<ArraySubscriptExpr>(E); 15037 return getAlignmentAndOffsetFromBinAddOrSub(ASE->getBase(), ASE->getIdx(), 15038 false, Ctx); 15039 } 15040 case Stmt::DeclRefExprClass: { 15041 if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) { 15042 // FIXME: If VD is captured by copy or is an escaping __block variable, 15043 // use the alignment of VD's type. 15044 if (!VD->getType()->isReferenceType()) 15045 return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero()); 15046 if (VD->hasInit()) 15047 return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx); 15048 } 15049 break; 15050 } 15051 case Stmt::MemberExprClass: { 15052 auto *ME = cast<MemberExpr>(E); 15053 auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); 15054 if (!FD || FD->getType()->isReferenceType() || 15055 FD->getParent()->isInvalidDecl()) 15056 break; 15057 Optional<std::pair<CharUnits, CharUnits>> P; 15058 if (ME->isArrow()) 15059 P = getBaseAlignmentAndOffsetFromPtr(ME->getBase(), Ctx); 15060 else 15061 P = getBaseAlignmentAndOffsetFromLValue(ME->getBase(), Ctx); 15062 if (!P) 15063 break; 15064 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent()); 15065 uint64_t Offset = Layout.getFieldOffset(FD->getFieldIndex()); 15066 return std::make_pair(P->first, 15067 P->second + CharUnits::fromQuantity(Offset)); 15068 } 15069 case Stmt::UnaryOperatorClass: { 15070 auto *UO = cast<UnaryOperator>(E); 15071 switch (UO->getOpcode()) { 15072 default: 15073 break; 15074 case UO_Deref: 15075 return getBaseAlignmentAndOffsetFromPtr(UO->getSubExpr(), Ctx); 15076 } 15077 break; 15078 } 15079 case Stmt::BinaryOperatorClass: { 15080 auto *BO = cast<BinaryOperator>(E); 15081 auto Opcode = BO->getOpcode(); 15082 switch (Opcode) { 15083 default: 15084 break; 15085 case BO_Comma: 15086 return getBaseAlignmentAndOffsetFromLValue(BO->getRHS(), Ctx); 15087 } 15088 break; 15089 } 15090 } 15091 return llvm::None; 15092 } 15093 15094 /// This helper function takes a pointer expression and returns the alignment of 15095 /// a VarDecl and a constant offset from the VarDecl. 15096 Optional<std::pair<CharUnits, CharUnits>> 15097 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx) { 15098 E = E->IgnoreParens(); 15099 switch (E->getStmtClass()) { 15100 default: 15101 break; 15102 case Stmt::CStyleCastExprClass: 15103 case Stmt::CXXStaticCastExprClass: 15104 case Stmt::ImplicitCastExprClass: { 15105 auto *CE = cast<CastExpr>(E); 15106 const Expr *From = CE->getSubExpr(); 15107 switch (CE->getCastKind()) { 15108 default: 15109 break; 15110 case CK_NoOp: 15111 return getBaseAlignmentAndOffsetFromPtr(From, Ctx); 15112 case CK_ArrayToPointerDecay: 15113 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15114 case CK_UncheckedDerivedToBase: 15115 case CK_DerivedToBase: { 15116 auto P = getBaseAlignmentAndOffsetFromPtr(From, Ctx); 15117 if (!P) 15118 break; 15119 return getDerivedToBaseAlignmentAndOffset( 15120 CE, From->getType()->getPointeeType(), P->first, P->second, Ctx); 15121 } 15122 } 15123 break; 15124 } 15125 case Stmt::CXXThisExprClass: { 15126 auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl(); 15127 CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment(); 15128 return std::make_pair(Alignment, CharUnits::Zero()); 15129 } 15130 case Stmt::UnaryOperatorClass: { 15131 auto *UO = cast<UnaryOperator>(E); 15132 if (UO->getOpcode() == UO_AddrOf) 15133 return getBaseAlignmentAndOffsetFromLValue(UO->getSubExpr(), Ctx); 15134 break; 15135 } 15136 case Stmt::BinaryOperatorClass: { 15137 auto *BO = cast<BinaryOperator>(E); 15138 auto Opcode = BO->getOpcode(); 15139 switch (Opcode) { 15140 default: 15141 break; 15142 case BO_Add: 15143 case BO_Sub: { 15144 const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS(); 15145 if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType()) 15146 std::swap(LHS, RHS); 15147 return getAlignmentAndOffsetFromBinAddOrSub(LHS, RHS, Opcode == BO_Sub, 15148 Ctx); 15149 } 15150 case BO_Comma: 15151 return getBaseAlignmentAndOffsetFromPtr(BO->getRHS(), Ctx); 15152 } 15153 break; 15154 } 15155 } 15156 return llvm::None; 15157 } 15158 15159 static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) { 15160 // See if we can compute the alignment of a VarDecl and an offset from it. 15161 Optional<std::pair<CharUnits, CharUnits>> P = 15162 getBaseAlignmentAndOffsetFromPtr(E, S.Context); 15163 15164 if (P) 15165 return P->first.alignmentAtOffset(P->second); 15166 15167 // If that failed, return the type's alignment. 15168 return S.Context.getTypeAlignInChars(E->getType()->getPointeeType()); 15169 } 15170 15171 /// CheckCastAlign - Implements -Wcast-align, which warns when a 15172 /// pointer cast increases the alignment requirements. 15173 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 15174 // This is actually a lot of work to potentially be doing on every 15175 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 15176 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 15177 return; 15178 15179 // Ignore dependent types. 15180 if (T->isDependentType() || Op->getType()->isDependentType()) 15181 return; 15182 15183 // Require that the destination be a pointer type. 15184 const PointerType *DestPtr = T->getAs<PointerType>(); 15185 if (!DestPtr) return; 15186 15187 // If the destination has alignment 1, we're done. 15188 QualType DestPointee = DestPtr->getPointeeType(); 15189 if (DestPointee->isIncompleteType()) return; 15190 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 15191 if (DestAlign.isOne()) return; 15192 15193 // Require that the source be a pointer type. 15194 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 15195 if (!SrcPtr) return; 15196 QualType SrcPointee = SrcPtr->getPointeeType(); 15197 15198 // Explicitly allow casts from cv void*. We already implicitly 15199 // allowed casts to cv void*, since they have alignment 1. 15200 // Also allow casts involving incomplete types, which implicitly 15201 // includes 'void'. 15202 if (SrcPointee->isIncompleteType()) return; 15203 15204 CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this); 15205 15206 if (SrcAlign >= DestAlign) return; 15207 15208 Diag(TRange.getBegin(), diag::warn_cast_align) 15209 << Op->getType() << T 15210 << static_cast<unsigned>(SrcAlign.getQuantity()) 15211 << static_cast<unsigned>(DestAlign.getQuantity()) 15212 << TRange << Op->getSourceRange(); 15213 } 15214 15215 /// Check whether this array fits the idiom of a size-one tail padded 15216 /// array member of a struct. 15217 /// 15218 /// We avoid emitting out-of-bounds access warnings for such arrays as they are 15219 /// commonly used to emulate flexible arrays in C89 code. 15220 static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size, 15221 const NamedDecl *ND) { 15222 if (Size != 1 || !ND) return false; 15223 15224 const FieldDecl *FD = dyn_cast<FieldDecl>(ND); 15225 if (!FD) return false; 15226 15227 // Don't consider sizes resulting from macro expansions or template argument 15228 // substitution to form C89 tail-padded arrays. 15229 15230 TypeSourceInfo *TInfo = FD->getTypeSourceInfo(); 15231 while (TInfo) { 15232 TypeLoc TL = TInfo->getTypeLoc(); 15233 // Look through typedefs. 15234 if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) { 15235 const TypedefNameDecl *TDL = TTL.getTypedefNameDecl(); 15236 TInfo = TDL->getTypeSourceInfo(); 15237 continue; 15238 } 15239 if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) { 15240 const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr()); 15241 if (!SizeExpr || SizeExpr->getExprLoc().isMacroID()) 15242 return false; 15243 } 15244 break; 15245 } 15246 15247 const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext()); 15248 if (!RD) return false; 15249 if (RD->isUnion()) return false; 15250 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 15251 if (!CRD->isStandardLayout()) return false; 15252 } 15253 15254 // See if this is the last field decl in the record. 15255 const Decl *D = FD; 15256 while ((D = D->getNextDeclInContext())) 15257 if (isa<FieldDecl>(D)) 15258 return false; 15259 return true; 15260 } 15261 15262 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 15263 const ArraySubscriptExpr *ASE, 15264 bool AllowOnePastEnd, bool IndexNegated) { 15265 // Already diagnosed by the constant evaluator. 15266 if (isConstantEvaluated()) 15267 return; 15268 15269 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 15270 if (IndexExpr->isValueDependent()) 15271 return; 15272 15273 const Type *EffectiveType = 15274 BaseExpr->getType()->getPointeeOrArrayElementType(); 15275 BaseExpr = BaseExpr->IgnoreParenCasts(); 15276 const ConstantArrayType *ArrayTy = 15277 Context.getAsConstantArrayType(BaseExpr->getType()); 15278 15279 const Type *BaseType = 15280 ArrayTy == nullptr ? nullptr : ArrayTy->getElementType().getTypePtr(); 15281 bool IsUnboundedArray = (BaseType == nullptr); 15282 if (EffectiveType->isDependentType() || 15283 (!IsUnboundedArray && BaseType->isDependentType())) 15284 return; 15285 15286 Expr::EvalResult Result; 15287 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) 15288 return; 15289 15290 llvm::APSInt index = Result.Val.getInt(); 15291 if (IndexNegated) { 15292 index.setIsUnsigned(false); 15293 index = -index; 15294 } 15295 15296 const NamedDecl *ND = nullptr; 15297 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15298 ND = DRE->getDecl(); 15299 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 15300 ND = ME->getMemberDecl(); 15301 15302 if (IsUnboundedArray) { 15303 if (index.isUnsigned() || !index.isNegative()) { 15304 const auto &ASTC = getASTContext(); 15305 unsigned AddrBits = 15306 ASTC.getTargetInfo().getPointerWidth(ASTC.getTargetAddressSpace( 15307 EffectiveType->getCanonicalTypeInternal())); 15308 if (index.getBitWidth() < AddrBits) 15309 index = index.zext(AddrBits); 15310 Optional<CharUnits> ElemCharUnits = 15311 ASTC.getTypeSizeInCharsIfKnown(EffectiveType); 15312 // PR50741 - If EffectiveType has unknown size (e.g., if it's a void 15313 // pointer) bounds-checking isn't meaningful. 15314 if (!ElemCharUnits) 15315 return; 15316 llvm::APInt ElemBytes(index.getBitWidth(), ElemCharUnits->getQuantity()); 15317 // If index has more active bits than address space, we already know 15318 // we have a bounds violation to warn about. Otherwise, compute 15319 // address of (index + 1)th element, and warn about bounds violation 15320 // only if that address exceeds address space. 15321 if (index.getActiveBits() <= AddrBits) { 15322 bool Overflow; 15323 llvm::APInt Product(index); 15324 Product += 1; 15325 Product = Product.umul_ov(ElemBytes, Overflow); 15326 if (!Overflow && Product.getActiveBits() <= AddrBits) 15327 return; 15328 } 15329 15330 // Need to compute max possible elements in address space, since that 15331 // is included in diag message. 15332 llvm::APInt MaxElems = llvm::APInt::getMaxValue(AddrBits); 15333 MaxElems = MaxElems.zext(std::max(AddrBits + 1, ElemBytes.getBitWidth())); 15334 MaxElems += 1; 15335 ElemBytes = ElemBytes.zextOrTrunc(MaxElems.getBitWidth()); 15336 MaxElems = MaxElems.udiv(ElemBytes); 15337 15338 unsigned DiagID = 15339 ASE ? diag::warn_array_index_exceeds_max_addressable_bounds 15340 : diag::warn_ptr_arith_exceeds_max_addressable_bounds; 15341 15342 // Diag message shows element size in bits and in "bytes" (platform- 15343 // dependent CharUnits) 15344 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15345 PDiag(DiagID) 15346 << toString(index, 10, true) << AddrBits 15347 << (unsigned)ASTC.toBits(*ElemCharUnits) 15348 << toString(ElemBytes, 10, false) 15349 << toString(MaxElems, 10, false) 15350 << (unsigned)MaxElems.getLimitedValue(~0U) 15351 << IndexExpr->getSourceRange()); 15352 15353 if (!ND) { 15354 // Try harder to find a NamedDecl to point at in the note. 15355 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 15356 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 15357 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15358 ND = DRE->getDecl(); 15359 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 15360 ND = ME->getMemberDecl(); 15361 } 15362 15363 if (ND) 15364 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 15365 PDiag(diag::note_array_declared_here) << ND); 15366 } 15367 return; 15368 } 15369 15370 if (index.isUnsigned() || !index.isNegative()) { 15371 // It is possible that the type of the base expression after 15372 // IgnoreParenCasts is incomplete, even though the type of the base 15373 // expression before IgnoreParenCasts is complete (see PR39746 for an 15374 // example). In this case we have no information about whether the array 15375 // access exceeds the array bounds. However we can still diagnose an array 15376 // access which precedes the array bounds. 15377 if (BaseType->isIncompleteType()) 15378 return; 15379 15380 llvm::APInt size = ArrayTy->getSize(); 15381 if (!size.isStrictlyPositive()) 15382 return; 15383 15384 if (BaseType != EffectiveType) { 15385 // Make sure we're comparing apples to apples when comparing index to size 15386 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 15387 uint64_t array_typesize = Context.getTypeSize(BaseType); 15388 // Handle ptrarith_typesize being zero, such as when casting to void* 15389 if (!ptrarith_typesize) ptrarith_typesize = 1; 15390 if (ptrarith_typesize != array_typesize) { 15391 // There's a cast to a different size type involved 15392 uint64_t ratio = array_typesize / ptrarith_typesize; 15393 // TODO: Be smarter about handling cases where array_typesize is not a 15394 // multiple of ptrarith_typesize 15395 if (ptrarith_typesize * ratio == array_typesize) 15396 size *= llvm::APInt(size.getBitWidth(), ratio); 15397 } 15398 } 15399 15400 if (size.getBitWidth() > index.getBitWidth()) 15401 index = index.zext(size.getBitWidth()); 15402 else if (size.getBitWidth() < index.getBitWidth()) 15403 size = size.zext(index.getBitWidth()); 15404 15405 // For array subscripting the index must be less than size, but for pointer 15406 // arithmetic also allow the index (offset) to be equal to size since 15407 // computing the next address after the end of the array is legal and 15408 // commonly done e.g. in C++ iterators and range-based for loops. 15409 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 15410 return; 15411 15412 // Also don't warn for arrays of size 1 which are members of some 15413 // structure. These are often used to approximate flexible arrays in C89 15414 // code. 15415 if (IsTailPaddedMemberArray(*this, size, ND)) 15416 return; 15417 15418 // Suppress the warning if the subscript expression (as identified by the 15419 // ']' location) and the index expression are both from macro expansions 15420 // within a system header. 15421 if (ASE) { 15422 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 15423 ASE->getRBracketLoc()); 15424 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 15425 SourceLocation IndexLoc = 15426 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 15427 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 15428 return; 15429 } 15430 } 15431 15432 unsigned DiagID = ASE ? diag::warn_array_index_exceeds_bounds 15433 : diag::warn_ptr_arith_exceeds_bounds; 15434 15435 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15436 PDiag(DiagID) << toString(index, 10, true) 15437 << toString(size, 10, true) 15438 << (unsigned)size.getLimitedValue(~0U) 15439 << IndexExpr->getSourceRange()); 15440 } else { 15441 unsigned DiagID = diag::warn_array_index_precedes_bounds; 15442 if (!ASE) { 15443 DiagID = diag::warn_ptr_arith_precedes_bounds; 15444 if (index.isNegative()) index = -index; 15445 } 15446 15447 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15448 PDiag(DiagID) << toString(index, 10, true) 15449 << IndexExpr->getSourceRange()); 15450 } 15451 15452 if (!ND) { 15453 // Try harder to find a NamedDecl to point at in the note. 15454 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 15455 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 15456 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15457 ND = DRE->getDecl(); 15458 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 15459 ND = ME->getMemberDecl(); 15460 } 15461 15462 if (ND) 15463 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 15464 PDiag(diag::note_array_declared_here) << ND); 15465 } 15466 15467 void Sema::CheckArrayAccess(const Expr *expr) { 15468 int AllowOnePastEnd = 0; 15469 while (expr) { 15470 expr = expr->IgnoreParenImpCasts(); 15471 switch (expr->getStmtClass()) { 15472 case Stmt::ArraySubscriptExprClass: { 15473 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 15474 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 15475 AllowOnePastEnd > 0); 15476 expr = ASE->getBase(); 15477 break; 15478 } 15479 case Stmt::MemberExprClass: { 15480 expr = cast<MemberExpr>(expr)->getBase(); 15481 break; 15482 } 15483 case Stmt::OMPArraySectionExprClass: { 15484 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 15485 if (ASE->getLowerBound()) 15486 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 15487 /*ASE=*/nullptr, AllowOnePastEnd > 0); 15488 return; 15489 } 15490 case Stmt::UnaryOperatorClass: { 15491 // Only unwrap the * and & unary operators 15492 const UnaryOperator *UO = cast<UnaryOperator>(expr); 15493 expr = UO->getSubExpr(); 15494 switch (UO->getOpcode()) { 15495 case UO_AddrOf: 15496 AllowOnePastEnd++; 15497 break; 15498 case UO_Deref: 15499 AllowOnePastEnd--; 15500 break; 15501 default: 15502 return; 15503 } 15504 break; 15505 } 15506 case Stmt::ConditionalOperatorClass: { 15507 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 15508 if (const Expr *lhs = cond->getLHS()) 15509 CheckArrayAccess(lhs); 15510 if (const Expr *rhs = cond->getRHS()) 15511 CheckArrayAccess(rhs); 15512 return; 15513 } 15514 case Stmt::CXXOperatorCallExprClass: { 15515 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 15516 for (const auto *Arg : OCE->arguments()) 15517 CheckArrayAccess(Arg); 15518 return; 15519 } 15520 default: 15521 return; 15522 } 15523 } 15524 } 15525 15526 //===--- CHECK: Objective-C retain cycles ----------------------------------// 15527 15528 namespace { 15529 15530 struct RetainCycleOwner { 15531 VarDecl *Variable = nullptr; 15532 SourceRange Range; 15533 SourceLocation Loc; 15534 bool Indirect = false; 15535 15536 RetainCycleOwner() = default; 15537 15538 void setLocsFrom(Expr *e) { 15539 Loc = e->getExprLoc(); 15540 Range = e->getSourceRange(); 15541 } 15542 }; 15543 15544 } // namespace 15545 15546 /// Consider whether capturing the given variable can possibly lead to 15547 /// a retain cycle. 15548 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 15549 // In ARC, it's captured strongly iff the variable has __strong 15550 // lifetime. In MRR, it's captured strongly if the variable is 15551 // __block and has an appropriate type. 15552 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 15553 return false; 15554 15555 owner.Variable = var; 15556 if (ref) 15557 owner.setLocsFrom(ref); 15558 return true; 15559 } 15560 15561 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 15562 while (true) { 15563 e = e->IgnoreParens(); 15564 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 15565 switch (cast->getCastKind()) { 15566 case CK_BitCast: 15567 case CK_LValueBitCast: 15568 case CK_LValueToRValue: 15569 case CK_ARCReclaimReturnedObject: 15570 e = cast->getSubExpr(); 15571 continue; 15572 15573 default: 15574 return false; 15575 } 15576 } 15577 15578 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 15579 ObjCIvarDecl *ivar = ref->getDecl(); 15580 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 15581 return false; 15582 15583 // Try to find a retain cycle in the base. 15584 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 15585 return false; 15586 15587 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 15588 owner.Indirect = true; 15589 return true; 15590 } 15591 15592 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 15593 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 15594 if (!var) return false; 15595 return considerVariable(var, ref, owner); 15596 } 15597 15598 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 15599 if (member->isArrow()) return false; 15600 15601 // Don't count this as an indirect ownership. 15602 e = member->getBase(); 15603 continue; 15604 } 15605 15606 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 15607 // Only pay attention to pseudo-objects on property references. 15608 ObjCPropertyRefExpr *pre 15609 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 15610 ->IgnoreParens()); 15611 if (!pre) return false; 15612 if (pre->isImplicitProperty()) return false; 15613 ObjCPropertyDecl *property = pre->getExplicitProperty(); 15614 if (!property->isRetaining() && 15615 !(property->getPropertyIvarDecl() && 15616 property->getPropertyIvarDecl()->getType() 15617 .getObjCLifetime() == Qualifiers::OCL_Strong)) 15618 return false; 15619 15620 owner.Indirect = true; 15621 if (pre->isSuperReceiver()) { 15622 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 15623 if (!owner.Variable) 15624 return false; 15625 owner.Loc = pre->getLocation(); 15626 owner.Range = pre->getSourceRange(); 15627 return true; 15628 } 15629 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 15630 ->getSourceExpr()); 15631 continue; 15632 } 15633 15634 // Array ivars? 15635 15636 return false; 15637 } 15638 } 15639 15640 namespace { 15641 15642 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 15643 ASTContext &Context; 15644 VarDecl *Variable; 15645 Expr *Capturer = nullptr; 15646 bool VarWillBeReased = false; 15647 15648 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 15649 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 15650 Context(Context), Variable(variable) {} 15651 15652 void VisitDeclRefExpr(DeclRefExpr *ref) { 15653 if (ref->getDecl() == Variable && !Capturer) 15654 Capturer = ref; 15655 } 15656 15657 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 15658 if (Capturer) return; 15659 Visit(ref->getBase()); 15660 if (Capturer && ref->isFreeIvar()) 15661 Capturer = ref; 15662 } 15663 15664 void VisitBlockExpr(BlockExpr *block) { 15665 // Look inside nested blocks 15666 if (block->getBlockDecl()->capturesVariable(Variable)) 15667 Visit(block->getBlockDecl()->getBody()); 15668 } 15669 15670 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 15671 if (Capturer) return; 15672 if (OVE->getSourceExpr()) 15673 Visit(OVE->getSourceExpr()); 15674 } 15675 15676 void VisitBinaryOperator(BinaryOperator *BinOp) { 15677 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 15678 return; 15679 Expr *LHS = BinOp->getLHS(); 15680 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 15681 if (DRE->getDecl() != Variable) 15682 return; 15683 if (Expr *RHS = BinOp->getRHS()) { 15684 RHS = RHS->IgnoreParenCasts(); 15685 Optional<llvm::APSInt> Value; 15686 VarWillBeReased = 15687 (RHS && (Value = RHS->getIntegerConstantExpr(Context)) && 15688 *Value == 0); 15689 } 15690 } 15691 } 15692 }; 15693 15694 } // namespace 15695 15696 /// Check whether the given argument is a block which captures a 15697 /// variable. 15698 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 15699 assert(owner.Variable && owner.Loc.isValid()); 15700 15701 e = e->IgnoreParenCasts(); 15702 15703 // Look through [^{...} copy] and Block_copy(^{...}). 15704 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 15705 Selector Cmd = ME->getSelector(); 15706 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 15707 e = ME->getInstanceReceiver(); 15708 if (!e) 15709 return nullptr; 15710 e = e->IgnoreParenCasts(); 15711 } 15712 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 15713 if (CE->getNumArgs() == 1) { 15714 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 15715 if (Fn) { 15716 const IdentifierInfo *FnI = Fn->getIdentifier(); 15717 if (FnI && FnI->isStr("_Block_copy")) { 15718 e = CE->getArg(0)->IgnoreParenCasts(); 15719 } 15720 } 15721 } 15722 } 15723 15724 BlockExpr *block = dyn_cast<BlockExpr>(e); 15725 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 15726 return nullptr; 15727 15728 FindCaptureVisitor visitor(S.Context, owner.Variable); 15729 visitor.Visit(block->getBlockDecl()->getBody()); 15730 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 15731 } 15732 15733 static void diagnoseRetainCycle(Sema &S, Expr *capturer, 15734 RetainCycleOwner &owner) { 15735 assert(capturer); 15736 assert(owner.Variable && owner.Loc.isValid()); 15737 15738 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 15739 << owner.Variable << capturer->getSourceRange(); 15740 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 15741 << owner.Indirect << owner.Range; 15742 } 15743 15744 /// Check for a keyword selector that starts with the word 'add' or 15745 /// 'set'. 15746 static bool isSetterLikeSelector(Selector sel) { 15747 if (sel.isUnarySelector()) return false; 15748 15749 StringRef str = sel.getNameForSlot(0); 15750 while (!str.empty() && str.front() == '_') str = str.substr(1); 15751 if (str.startswith("set")) 15752 str = str.substr(3); 15753 else if (str.startswith("add")) { 15754 // Specially allow 'addOperationWithBlock:'. 15755 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) 15756 return false; 15757 str = str.substr(3); 15758 } 15759 else 15760 return false; 15761 15762 if (str.empty()) return true; 15763 return !isLowercase(str.front()); 15764 } 15765 15766 static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S, 15767 ObjCMessageExpr *Message) { 15768 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 15769 Message->getReceiverInterface(), 15770 NSAPI::ClassId_NSMutableArray); 15771 if (!IsMutableArray) { 15772 return None; 15773 } 15774 15775 Selector Sel = Message->getSelector(); 15776 15777 Optional<NSAPI::NSArrayMethodKind> MKOpt = 15778 S.NSAPIObj->getNSArrayMethodKind(Sel); 15779 if (!MKOpt) { 15780 return None; 15781 } 15782 15783 NSAPI::NSArrayMethodKind MK = *MKOpt; 15784 15785 switch (MK) { 15786 case NSAPI::NSMutableArr_addObject: 15787 case NSAPI::NSMutableArr_insertObjectAtIndex: 15788 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 15789 return 0; 15790 case NSAPI::NSMutableArr_replaceObjectAtIndex: 15791 return 1; 15792 15793 default: 15794 return None; 15795 } 15796 15797 return None; 15798 } 15799 15800 static 15801 Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S, 15802 ObjCMessageExpr *Message) { 15803 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 15804 Message->getReceiverInterface(), 15805 NSAPI::ClassId_NSMutableDictionary); 15806 if (!IsMutableDictionary) { 15807 return None; 15808 } 15809 15810 Selector Sel = Message->getSelector(); 15811 15812 Optional<NSAPI::NSDictionaryMethodKind> MKOpt = 15813 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 15814 if (!MKOpt) { 15815 return None; 15816 } 15817 15818 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 15819 15820 switch (MK) { 15821 case NSAPI::NSMutableDict_setObjectForKey: 15822 case NSAPI::NSMutableDict_setValueForKey: 15823 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 15824 return 0; 15825 15826 default: 15827 return None; 15828 } 15829 15830 return None; 15831 } 15832 15833 static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 15834 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 15835 Message->getReceiverInterface(), 15836 NSAPI::ClassId_NSMutableSet); 15837 15838 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 15839 Message->getReceiverInterface(), 15840 NSAPI::ClassId_NSMutableOrderedSet); 15841 if (!IsMutableSet && !IsMutableOrderedSet) { 15842 return None; 15843 } 15844 15845 Selector Sel = Message->getSelector(); 15846 15847 Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel); 15848 if (!MKOpt) { 15849 return None; 15850 } 15851 15852 NSAPI::NSSetMethodKind MK = *MKOpt; 15853 15854 switch (MK) { 15855 case NSAPI::NSMutableSet_addObject: 15856 case NSAPI::NSOrderedSet_setObjectAtIndex: 15857 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 15858 case NSAPI::NSOrderedSet_insertObjectAtIndex: 15859 return 0; 15860 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 15861 return 1; 15862 } 15863 15864 return None; 15865 } 15866 15867 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 15868 if (!Message->isInstanceMessage()) { 15869 return; 15870 } 15871 15872 Optional<int> ArgOpt; 15873 15874 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 15875 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 15876 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 15877 return; 15878 } 15879 15880 int ArgIndex = *ArgOpt; 15881 15882 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 15883 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 15884 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 15885 } 15886 15887 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 15888 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 15889 if (ArgRE->isObjCSelfExpr()) { 15890 Diag(Message->getSourceRange().getBegin(), 15891 diag::warn_objc_circular_container) 15892 << ArgRE->getDecl() << StringRef("'super'"); 15893 } 15894 } 15895 } else { 15896 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 15897 15898 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 15899 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 15900 } 15901 15902 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 15903 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 15904 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 15905 ValueDecl *Decl = ReceiverRE->getDecl(); 15906 Diag(Message->getSourceRange().getBegin(), 15907 diag::warn_objc_circular_container) 15908 << Decl << Decl; 15909 if (!ArgRE->isObjCSelfExpr()) { 15910 Diag(Decl->getLocation(), 15911 diag::note_objc_circular_container_declared_here) 15912 << Decl; 15913 } 15914 } 15915 } 15916 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 15917 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 15918 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 15919 ObjCIvarDecl *Decl = IvarRE->getDecl(); 15920 Diag(Message->getSourceRange().getBegin(), 15921 diag::warn_objc_circular_container) 15922 << Decl << Decl; 15923 Diag(Decl->getLocation(), 15924 diag::note_objc_circular_container_declared_here) 15925 << Decl; 15926 } 15927 } 15928 } 15929 } 15930 } 15931 15932 /// Check a message send to see if it's likely to cause a retain cycle. 15933 void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 15934 // Only check instance methods whose selector looks like a setter. 15935 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 15936 return; 15937 15938 // Try to find a variable that the receiver is strongly owned by. 15939 RetainCycleOwner owner; 15940 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 15941 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 15942 return; 15943 } else { 15944 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 15945 owner.Variable = getCurMethodDecl()->getSelfDecl(); 15946 owner.Loc = msg->getSuperLoc(); 15947 owner.Range = msg->getSuperLoc(); 15948 } 15949 15950 // Check whether the receiver is captured by any of the arguments. 15951 const ObjCMethodDecl *MD = msg->getMethodDecl(); 15952 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 15953 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 15954 // noescape blocks should not be retained by the method. 15955 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 15956 continue; 15957 return diagnoseRetainCycle(*this, capturer, owner); 15958 } 15959 } 15960 } 15961 15962 /// Check a property assign to see if it's likely to cause a retain cycle. 15963 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 15964 RetainCycleOwner owner; 15965 if (!findRetainCycleOwner(*this, receiver, owner)) 15966 return; 15967 15968 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 15969 diagnoseRetainCycle(*this, capturer, owner); 15970 } 15971 15972 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 15973 RetainCycleOwner Owner; 15974 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 15975 return; 15976 15977 // Because we don't have an expression for the variable, we have to set the 15978 // location explicitly here. 15979 Owner.Loc = Var->getLocation(); 15980 Owner.Range = Var->getSourceRange(); 15981 15982 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 15983 diagnoseRetainCycle(*this, Capturer, Owner); 15984 } 15985 15986 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 15987 Expr *RHS, bool isProperty) { 15988 // Check if RHS is an Objective-C object literal, which also can get 15989 // immediately zapped in a weak reference. Note that we explicitly 15990 // allow ObjCStringLiterals, since those are designed to never really die. 15991 RHS = RHS->IgnoreParenImpCasts(); 15992 15993 // This enum needs to match with the 'select' in 15994 // warn_objc_arc_literal_assign (off-by-1). 15995 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 15996 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 15997 return false; 15998 15999 S.Diag(Loc, diag::warn_arc_literal_assign) 16000 << (unsigned) Kind 16001 << (isProperty ? 0 : 1) 16002 << RHS->getSourceRange(); 16003 16004 return true; 16005 } 16006 16007 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 16008 Qualifiers::ObjCLifetime LT, 16009 Expr *RHS, bool isProperty) { 16010 // Strip off any implicit cast added to get to the one ARC-specific. 16011 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 16012 if (cast->getCastKind() == CK_ARCConsumeObject) { 16013 S.Diag(Loc, diag::warn_arc_retained_assign) 16014 << (LT == Qualifiers::OCL_ExplicitNone) 16015 << (isProperty ? 0 : 1) 16016 << RHS->getSourceRange(); 16017 return true; 16018 } 16019 RHS = cast->getSubExpr(); 16020 } 16021 16022 if (LT == Qualifiers::OCL_Weak && 16023 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 16024 return true; 16025 16026 return false; 16027 } 16028 16029 bool Sema::checkUnsafeAssigns(SourceLocation Loc, 16030 QualType LHS, Expr *RHS) { 16031 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 16032 16033 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 16034 return false; 16035 16036 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 16037 return true; 16038 16039 return false; 16040 } 16041 16042 void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 16043 Expr *LHS, Expr *RHS) { 16044 QualType LHSType; 16045 // PropertyRef on LHS type need be directly obtained from 16046 // its declaration as it has a PseudoType. 16047 ObjCPropertyRefExpr *PRE 16048 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 16049 if (PRE && !PRE->isImplicitProperty()) { 16050 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 16051 if (PD) 16052 LHSType = PD->getType(); 16053 } 16054 16055 if (LHSType.isNull()) 16056 LHSType = LHS->getType(); 16057 16058 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 16059 16060 if (LT == Qualifiers::OCL_Weak) { 16061 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 16062 getCurFunction()->markSafeWeakUse(LHS); 16063 } 16064 16065 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 16066 return; 16067 16068 // FIXME. Check for other life times. 16069 if (LT != Qualifiers::OCL_None) 16070 return; 16071 16072 if (PRE) { 16073 if (PRE->isImplicitProperty()) 16074 return; 16075 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 16076 if (!PD) 16077 return; 16078 16079 unsigned Attributes = PD->getPropertyAttributes(); 16080 if (Attributes & ObjCPropertyAttribute::kind_assign) { 16081 // when 'assign' attribute was not explicitly specified 16082 // by user, ignore it and rely on property type itself 16083 // for lifetime info. 16084 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 16085 if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) && 16086 LHSType->isObjCRetainableType()) 16087 return; 16088 16089 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 16090 if (cast->getCastKind() == CK_ARCConsumeObject) { 16091 Diag(Loc, diag::warn_arc_retained_property_assign) 16092 << RHS->getSourceRange(); 16093 return; 16094 } 16095 RHS = cast->getSubExpr(); 16096 } 16097 } else if (Attributes & ObjCPropertyAttribute::kind_weak) { 16098 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 16099 return; 16100 } 16101 } 16102 } 16103 16104 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 16105 16106 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 16107 SourceLocation StmtLoc, 16108 const NullStmt *Body) { 16109 // Do not warn if the body is a macro that expands to nothing, e.g: 16110 // 16111 // #define CALL(x) 16112 // if (condition) 16113 // CALL(0); 16114 if (Body->hasLeadingEmptyMacro()) 16115 return false; 16116 16117 // Get line numbers of statement and body. 16118 bool StmtLineInvalid; 16119 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 16120 &StmtLineInvalid); 16121 if (StmtLineInvalid) 16122 return false; 16123 16124 bool BodyLineInvalid; 16125 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 16126 &BodyLineInvalid); 16127 if (BodyLineInvalid) 16128 return false; 16129 16130 // Warn if null statement and body are on the same line. 16131 if (StmtLine != BodyLine) 16132 return false; 16133 16134 return true; 16135 } 16136 16137 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 16138 const Stmt *Body, 16139 unsigned DiagID) { 16140 // Since this is a syntactic check, don't emit diagnostic for template 16141 // instantiations, this just adds noise. 16142 if (CurrentInstantiationScope) 16143 return; 16144 16145 // The body should be a null statement. 16146 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 16147 if (!NBody) 16148 return; 16149 16150 // Do the usual checks. 16151 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 16152 return; 16153 16154 Diag(NBody->getSemiLoc(), DiagID); 16155 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 16156 } 16157 16158 void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 16159 const Stmt *PossibleBody) { 16160 assert(!CurrentInstantiationScope); // Ensured by caller 16161 16162 SourceLocation StmtLoc; 16163 const Stmt *Body; 16164 unsigned DiagID; 16165 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 16166 StmtLoc = FS->getRParenLoc(); 16167 Body = FS->getBody(); 16168 DiagID = diag::warn_empty_for_body; 16169 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 16170 StmtLoc = WS->getCond()->getSourceRange().getEnd(); 16171 Body = WS->getBody(); 16172 DiagID = diag::warn_empty_while_body; 16173 } else 16174 return; // Neither `for' nor `while'. 16175 16176 // The body should be a null statement. 16177 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 16178 if (!NBody) 16179 return; 16180 16181 // Skip expensive checks if diagnostic is disabled. 16182 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 16183 return; 16184 16185 // Do the usual checks. 16186 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 16187 return; 16188 16189 // `for(...);' and `while(...);' are popular idioms, so in order to keep 16190 // noise level low, emit diagnostics only if for/while is followed by a 16191 // CompoundStmt, e.g.: 16192 // for (int i = 0; i < n; i++); 16193 // { 16194 // a(i); 16195 // } 16196 // or if for/while is followed by a statement with more indentation 16197 // than for/while itself: 16198 // for (int i = 0; i < n; i++); 16199 // a(i); 16200 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 16201 if (!ProbableTypo) { 16202 bool BodyColInvalid; 16203 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 16204 PossibleBody->getBeginLoc(), &BodyColInvalid); 16205 if (BodyColInvalid) 16206 return; 16207 16208 bool StmtColInvalid; 16209 unsigned StmtCol = 16210 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 16211 if (StmtColInvalid) 16212 return; 16213 16214 if (BodyCol > StmtCol) 16215 ProbableTypo = true; 16216 } 16217 16218 if (ProbableTypo) { 16219 Diag(NBody->getSemiLoc(), DiagID); 16220 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 16221 } 16222 } 16223 16224 //===--- CHECK: Warn on self move with std::move. -------------------------===// 16225 16226 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 16227 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 16228 SourceLocation OpLoc) { 16229 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 16230 return; 16231 16232 if (inTemplateInstantiation()) 16233 return; 16234 16235 // Strip parens and casts away. 16236 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 16237 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 16238 16239 // Check for a call expression 16240 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 16241 if (!CE || CE->getNumArgs() != 1) 16242 return; 16243 16244 // Check for a call to std::move 16245 if (!CE->isCallToStdMove()) 16246 return; 16247 16248 // Get argument from std::move 16249 RHSExpr = CE->getArg(0); 16250 16251 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 16252 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 16253 16254 // Two DeclRefExpr's, check that the decls are the same. 16255 if (LHSDeclRef && RHSDeclRef) { 16256 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 16257 return; 16258 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 16259 RHSDeclRef->getDecl()->getCanonicalDecl()) 16260 return; 16261 16262 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16263 << LHSExpr->getSourceRange() 16264 << RHSExpr->getSourceRange(); 16265 return; 16266 } 16267 16268 // Member variables require a different approach to check for self moves. 16269 // MemberExpr's are the same if every nested MemberExpr refers to the same 16270 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 16271 // the base Expr's are CXXThisExpr's. 16272 const Expr *LHSBase = LHSExpr; 16273 const Expr *RHSBase = RHSExpr; 16274 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 16275 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 16276 if (!LHSME || !RHSME) 16277 return; 16278 16279 while (LHSME && RHSME) { 16280 if (LHSME->getMemberDecl()->getCanonicalDecl() != 16281 RHSME->getMemberDecl()->getCanonicalDecl()) 16282 return; 16283 16284 LHSBase = LHSME->getBase(); 16285 RHSBase = RHSME->getBase(); 16286 LHSME = dyn_cast<MemberExpr>(LHSBase); 16287 RHSME = dyn_cast<MemberExpr>(RHSBase); 16288 } 16289 16290 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 16291 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 16292 if (LHSDeclRef && RHSDeclRef) { 16293 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 16294 return; 16295 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 16296 RHSDeclRef->getDecl()->getCanonicalDecl()) 16297 return; 16298 16299 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16300 << LHSExpr->getSourceRange() 16301 << RHSExpr->getSourceRange(); 16302 return; 16303 } 16304 16305 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 16306 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16307 << LHSExpr->getSourceRange() 16308 << RHSExpr->getSourceRange(); 16309 } 16310 16311 //===--- Layout compatibility ----------------------------------------------// 16312 16313 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 16314 16315 /// Check if two enumeration types are layout-compatible. 16316 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 16317 // C++11 [dcl.enum] p8: 16318 // Two enumeration types are layout-compatible if they have the same 16319 // underlying type. 16320 return ED1->isComplete() && ED2->isComplete() && 16321 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 16322 } 16323 16324 /// Check if two fields are layout-compatible. 16325 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 16326 FieldDecl *Field2) { 16327 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 16328 return false; 16329 16330 if (Field1->isBitField() != Field2->isBitField()) 16331 return false; 16332 16333 if (Field1->isBitField()) { 16334 // Make sure that the bit-fields are the same length. 16335 unsigned Bits1 = Field1->getBitWidthValue(C); 16336 unsigned Bits2 = Field2->getBitWidthValue(C); 16337 16338 if (Bits1 != Bits2) 16339 return false; 16340 } 16341 16342 return true; 16343 } 16344 16345 /// Check if two standard-layout structs are layout-compatible. 16346 /// (C++11 [class.mem] p17) 16347 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 16348 RecordDecl *RD2) { 16349 // If both records are C++ classes, check that base classes match. 16350 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 16351 // If one of records is a CXXRecordDecl we are in C++ mode, 16352 // thus the other one is a CXXRecordDecl, too. 16353 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 16354 // Check number of base classes. 16355 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 16356 return false; 16357 16358 // Check the base classes. 16359 for (CXXRecordDecl::base_class_const_iterator 16360 Base1 = D1CXX->bases_begin(), 16361 BaseEnd1 = D1CXX->bases_end(), 16362 Base2 = D2CXX->bases_begin(); 16363 Base1 != BaseEnd1; 16364 ++Base1, ++Base2) { 16365 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 16366 return false; 16367 } 16368 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 16369 // If only RD2 is a C++ class, it should have zero base classes. 16370 if (D2CXX->getNumBases() > 0) 16371 return false; 16372 } 16373 16374 // Check the fields. 16375 RecordDecl::field_iterator Field2 = RD2->field_begin(), 16376 Field2End = RD2->field_end(), 16377 Field1 = RD1->field_begin(), 16378 Field1End = RD1->field_end(); 16379 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 16380 if (!isLayoutCompatible(C, *Field1, *Field2)) 16381 return false; 16382 } 16383 if (Field1 != Field1End || Field2 != Field2End) 16384 return false; 16385 16386 return true; 16387 } 16388 16389 /// Check if two standard-layout unions are layout-compatible. 16390 /// (C++11 [class.mem] p18) 16391 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 16392 RecordDecl *RD2) { 16393 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 16394 for (auto *Field2 : RD2->fields()) 16395 UnmatchedFields.insert(Field2); 16396 16397 for (auto *Field1 : RD1->fields()) { 16398 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 16399 I = UnmatchedFields.begin(), 16400 E = UnmatchedFields.end(); 16401 16402 for ( ; I != E; ++I) { 16403 if (isLayoutCompatible(C, Field1, *I)) { 16404 bool Result = UnmatchedFields.erase(*I); 16405 (void) Result; 16406 assert(Result); 16407 break; 16408 } 16409 } 16410 if (I == E) 16411 return false; 16412 } 16413 16414 return UnmatchedFields.empty(); 16415 } 16416 16417 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 16418 RecordDecl *RD2) { 16419 if (RD1->isUnion() != RD2->isUnion()) 16420 return false; 16421 16422 if (RD1->isUnion()) 16423 return isLayoutCompatibleUnion(C, RD1, RD2); 16424 else 16425 return isLayoutCompatibleStruct(C, RD1, RD2); 16426 } 16427 16428 /// Check if two types are layout-compatible in C++11 sense. 16429 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 16430 if (T1.isNull() || T2.isNull()) 16431 return false; 16432 16433 // C++11 [basic.types] p11: 16434 // If two types T1 and T2 are the same type, then T1 and T2 are 16435 // layout-compatible types. 16436 if (C.hasSameType(T1, T2)) 16437 return true; 16438 16439 T1 = T1.getCanonicalType().getUnqualifiedType(); 16440 T2 = T2.getCanonicalType().getUnqualifiedType(); 16441 16442 const Type::TypeClass TC1 = T1->getTypeClass(); 16443 const Type::TypeClass TC2 = T2->getTypeClass(); 16444 16445 if (TC1 != TC2) 16446 return false; 16447 16448 if (TC1 == Type::Enum) { 16449 return isLayoutCompatible(C, 16450 cast<EnumType>(T1)->getDecl(), 16451 cast<EnumType>(T2)->getDecl()); 16452 } else if (TC1 == Type::Record) { 16453 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 16454 return false; 16455 16456 return isLayoutCompatible(C, 16457 cast<RecordType>(T1)->getDecl(), 16458 cast<RecordType>(T2)->getDecl()); 16459 } 16460 16461 return false; 16462 } 16463 16464 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 16465 16466 /// Given a type tag expression find the type tag itself. 16467 /// 16468 /// \param TypeExpr Type tag expression, as it appears in user's code. 16469 /// 16470 /// \param VD Declaration of an identifier that appears in a type tag. 16471 /// 16472 /// \param MagicValue Type tag magic value. 16473 /// 16474 /// \param isConstantEvaluated whether the evalaution should be performed in 16475 16476 /// constant context. 16477 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 16478 const ValueDecl **VD, uint64_t *MagicValue, 16479 bool isConstantEvaluated) { 16480 while(true) { 16481 if (!TypeExpr) 16482 return false; 16483 16484 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 16485 16486 switch (TypeExpr->getStmtClass()) { 16487 case Stmt::UnaryOperatorClass: { 16488 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 16489 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 16490 TypeExpr = UO->getSubExpr(); 16491 continue; 16492 } 16493 return false; 16494 } 16495 16496 case Stmt::DeclRefExprClass: { 16497 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 16498 *VD = DRE->getDecl(); 16499 return true; 16500 } 16501 16502 case Stmt::IntegerLiteralClass: { 16503 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 16504 llvm::APInt MagicValueAPInt = IL->getValue(); 16505 if (MagicValueAPInt.getActiveBits() <= 64) { 16506 *MagicValue = MagicValueAPInt.getZExtValue(); 16507 return true; 16508 } else 16509 return false; 16510 } 16511 16512 case Stmt::BinaryConditionalOperatorClass: 16513 case Stmt::ConditionalOperatorClass: { 16514 const AbstractConditionalOperator *ACO = 16515 cast<AbstractConditionalOperator>(TypeExpr); 16516 bool Result; 16517 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx, 16518 isConstantEvaluated)) { 16519 if (Result) 16520 TypeExpr = ACO->getTrueExpr(); 16521 else 16522 TypeExpr = ACO->getFalseExpr(); 16523 continue; 16524 } 16525 return false; 16526 } 16527 16528 case Stmt::BinaryOperatorClass: { 16529 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 16530 if (BO->getOpcode() == BO_Comma) { 16531 TypeExpr = BO->getRHS(); 16532 continue; 16533 } 16534 return false; 16535 } 16536 16537 default: 16538 return false; 16539 } 16540 } 16541 } 16542 16543 /// Retrieve the C type corresponding to type tag TypeExpr. 16544 /// 16545 /// \param TypeExpr Expression that specifies a type tag. 16546 /// 16547 /// \param MagicValues Registered magic values. 16548 /// 16549 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 16550 /// kind. 16551 /// 16552 /// \param TypeInfo Information about the corresponding C type. 16553 /// 16554 /// \param isConstantEvaluated whether the evalaution should be performed in 16555 /// constant context. 16556 /// 16557 /// \returns true if the corresponding C type was found. 16558 static bool GetMatchingCType( 16559 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr, 16560 const ASTContext &Ctx, 16561 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData> 16562 *MagicValues, 16563 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo, 16564 bool isConstantEvaluated) { 16565 FoundWrongKind = false; 16566 16567 // Variable declaration that has type_tag_for_datatype attribute. 16568 const ValueDecl *VD = nullptr; 16569 16570 uint64_t MagicValue; 16571 16572 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated)) 16573 return false; 16574 16575 if (VD) { 16576 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 16577 if (I->getArgumentKind() != ArgumentKind) { 16578 FoundWrongKind = true; 16579 return false; 16580 } 16581 TypeInfo.Type = I->getMatchingCType(); 16582 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 16583 TypeInfo.MustBeNull = I->getMustBeNull(); 16584 return true; 16585 } 16586 return false; 16587 } 16588 16589 if (!MagicValues) 16590 return false; 16591 16592 llvm::DenseMap<Sema::TypeTagMagicValue, 16593 Sema::TypeTagData>::const_iterator I = 16594 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 16595 if (I == MagicValues->end()) 16596 return false; 16597 16598 TypeInfo = I->second; 16599 return true; 16600 } 16601 16602 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 16603 uint64_t MagicValue, QualType Type, 16604 bool LayoutCompatible, 16605 bool MustBeNull) { 16606 if (!TypeTagForDatatypeMagicValues) 16607 TypeTagForDatatypeMagicValues.reset( 16608 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 16609 16610 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 16611 (*TypeTagForDatatypeMagicValues)[Magic] = 16612 TypeTagData(Type, LayoutCompatible, MustBeNull); 16613 } 16614 16615 static bool IsSameCharType(QualType T1, QualType T2) { 16616 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 16617 if (!BT1) 16618 return false; 16619 16620 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 16621 if (!BT2) 16622 return false; 16623 16624 BuiltinType::Kind T1Kind = BT1->getKind(); 16625 BuiltinType::Kind T2Kind = BT2->getKind(); 16626 16627 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 16628 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 16629 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 16630 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 16631 } 16632 16633 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 16634 const ArrayRef<const Expr *> ExprArgs, 16635 SourceLocation CallSiteLoc) { 16636 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 16637 bool IsPointerAttr = Attr->getIsPointer(); 16638 16639 // Retrieve the argument representing the 'type_tag'. 16640 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 16641 if (TypeTagIdxAST >= ExprArgs.size()) { 16642 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 16643 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 16644 return; 16645 } 16646 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 16647 bool FoundWrongKind; 16648 TypeTagData TypeInfo; 16649 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 16650 TypeTagForDatatypeMagicValues.get(), FoundWrongKind, 16651 TypeInfo, isConstantEvaluated())) { 16652 if (FoundWrongKind) 16653 Diag(TypeTagExpr->getExprLoc(), 16654 diag::warn_type_tag_for_datatype_wrong_kind) 16655 << TypeTagExpr->getSourceRange(); 16656 return; 16657 } 16658 16659 // Retrieve the argument representing the 'arg_idx'. 16660 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 16661 if (ArgumentIdxAST >= ExprArgs.size()) { 16662 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 16663 << 1 << Attr->getArgumentIdx().getSourceIndex(); 16664 return; 16665 } 16666 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 16667 if (IsPointerAttr) { 16668 // Skip implicit cast of pointer to `void *' (as a function argument). 16669 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 16670 if (ICE->getType()->isVoidPointerType() && 16671 ICE->getCastKind() == CK_BitCast) 16672 ArgumentExpr = ICE->getSubExpr(); 16673 } 16674 QualType ArgumentType = ArgumentExpr->getType(); 16675 16676 // Passing a `void*' pointer shouldn't trigger a warning. 16677 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 16678 return; 16679 16680 if (TypeInfo.MustBeNull) { 16681 // Type tag with matching void type requires a null pointer. 16682 if (!ArgumentExpr->isNullPointerConstant(Context, 16683 Expr::NPC_ValueDependentIsNotNull)) { 16684 Diag(ArgumentExpr->getExprLoc(), 16685 diag::warn_type_safety_null_pointer_required) 16686 << ArgumentKind->getName() 16687 << ArgumentExpr->getSourceRange() 16688 << TypeTagExpr->getSourceRange(); 16689 } 16690 return; 16691 } 16692 16693 QualType RequiredType = TypeInfo.Type; 16694 if (IsPointerAttr) 16695 RequiredType = Context.getPointerType(RequiredType); 16696 16697 bool mismatch = false; 16698 if (!TypeInfo.LayoutCompatible) { 16699 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 16700 16701 // C++11 [basic.fundamental] p1: 16702 // Plain char, signed char, and unsigned char are three distinct types. 16703 // 16704 // But we treat plain `char' as equivalent to `signed char' or `unsigned 16705 // char' depending on the current char signedness mode. 16706 if (mismatch) 16707 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 16708 RequiredType->getPointeeType())) || 16709 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 16710 mismatch = false; 16711 } else 16712 if (IsPointerAttr) 16713 mismatch = !isLayoutCompatible(Context, 16714 ArgumentType->getPointeeType(), 16715 RequiredType->getPointeeType()); 16716 else 16717 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 16718 16719 if (mismatch) 16720 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 16721 << ArgumentType << ArgumentKind 16722 << TypeInfo.LayoutCompatible << RequiredType 16723 << ArgumentExpr->getSourceRange() 16724 << TypeTagExpr->getSourceRange(); 16725 } 16726 16727 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 16728 CharUnits Alignment) { 16729 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 16730 } 16731 16732 void Sema::DiagnoseMisalignedMembers() { 16733 for (MisalignedMember &m : MisalignedMembers) { 16734 const NamedDecl *ND = m.RD; 16735 if (ND->getName().empty()) { 16736 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 16737 ND = TD; 16738 } 16739 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 16740 << m.MD << ND << m.E->getSourceRange(); 16741 } 16742 MisalignedMembers.clear(); 16743 } 16744 16745 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 16746 E = E->IgnoreParens(); 16747 if (!T->isPointerType() && !T->isIntegerType()) 16748 return; 16749 if (isa<UnaryOperator>(E) && 16750 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 16751 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 16752 if (isa<MemberExpr>(Op)) { 16753 auto MA = llvm::find(MisalignedMembers, MisalignedMember(Op)); 16754 if (MA != MisalignedMembers.end() && 16755 (T->isIntegerType() || 16756 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 16757 Context.getTypeAlignInChars( 16758 T->getPointeeType()) <= MA->Alignment)))) 16759 MisalignedMembers.erase(MA); 16760 } 16761 } 16762 } 16763 16764 void Sema::RefersToMemberWithReducedAlignment( 16765 Expr *E, 16766 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 16767 Action) { 16768 const auto *ME = dyn_cast<MemberExpr>(E); 16769 if (!ME) 16770 return; 16771 16772 // No need to check expressions with an __unaligned-qualified type. 16773 if (E->getType().getQualifiers().hasUnaligned()) 16774 return; 16775 16776 // For a chain of MemberExpr like "a.b.c.d" this list 16777 // will keep FieldDecl's like [d, c, b]. 16778 SmallVector<FieldDecl *, 4> ReverseMemberChain; 16779 const MemberExpr *TopME = nullptr; 16780 bool AnyIsPacked = false; 16781 do { 16782 QualType BaseType = ME->getBase()->getType(); 16783 if (BaseType->isDependentType()) 16784 return; 16785 if (ME->isArrow()) 16786 BaseType = BaseType->getPointeeType(); 16787 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl(); 16788 if (RD->isInvalidDecl()) 16789 return; 16790 16791 ValueDecl *MD = ME->getMemberDecl(); 16792 auto *FD = dyn_cast<FieldDecl>(MD); 16793 // We do not care about non-data members. 16794 if (!FD || FD->isInvalidDecl()) 16795 return; 16796 16797 AnyIsPacked = 16798 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 16799 ReverseMemberChain.push_back(FD); 16800 16801 TopME = ME; 16802 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 16803 } while (ME); 16804 assert(TopME && "We did not compute a topmost MemberExpr!"); 16805 16806 // Not the scope of this diagnostic. 16807 if (!AnyIsPacked) 16808 return; 16809 16810 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 16811 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 16812 // TODO: The innermost base of the member expression may be too complicated. 16813 // For now, just disregard these cases. This is left for future 16814 // improvement. 16815 if (!DRE && !isa<CXXThisExpr>(TopBase)) 16816 return; 16817 16818 // Alignment expected by the whole expression. 16819 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 16820 16821 // No need to do anything else with this case. 16822 if (ExpectedAlignment.isOne()) 16823 return; 16824 16825 // Synthesize offset of the whole access. 16826 CharUnits Offset; 16827 for (const FieldDecl *FD : llvm::reverse(ReverseMemberChain)) 16828 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(FD)); 16829 16830 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 16831 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 16832 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 16833 16834 // The base expression of the innermost MemberExpr may give 16835 // stronger guarantees than the class containing the member. 16836 if (DRE && !TopME->isArrow()) { 16837 const ValueDecl *VD = DRE->getDecl(); 16838 if (!VD->getType()->isReferenceType()) 16839 CompleteObjectAlignment = 16840 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 16841 } 16842 16843 // Check if the synthesized offset fulfills the alignment. 16844 if (Offset % ExpectedAlignment != 0 || 16845 // It may fulfill the offset it but the effective alignment may still be 16846 // lower than the expected expression alignment. 16847 CompleteObjectAlignment < ExpectedAlignment) { 16848 // If this happens, we want to determine a sensible culprit of this. 16849 // Intuitively, watching the chain of member expressions from right to 16850 // left, we start with the required alignment (as required by the field 16851 // type) but some packed attribute in that chain has reduced the alignment. 16852 // It may happen that another packed structure increases it again. But if 16853 // we are here such increase has not been enough. So pointing the first 16854 // FieldDecl that either is packed or else its RecordDecl is, 16855 // seems reasonable. 16856 FieldDecl *FD = nullptr; 16857 CharUnits Alignment; 16858 for (FieldDecl *FDI : ReverseMemberChain) { 16859 if (FDI->hasAttr<PackedAttr>() || 16860 FDI->getParent()->hasAttr<PackedAttr>()) { 16861 FD = FDI; 16862 Alignment = std::min( 16863 Context.getTypeAlignInChars(FD->getType()), 16864 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 16865 break; 16866 } 16867 } 16868 assert(FD && "We did not find a packed FieldDecl!"); 16869 Action(E, FD->getParent(), FD, Alignment); 16870 } 16871 } 16872 16873 void Sema::CheckAddressOfPackedMember(Expr *rhs) { 16874 using namespace std::placeholders; 16875 16876 RefersToMemberWithReducedAlignment( 16877 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 16878 _2, _3, _4)); 16879 } 16880 16881 // Check if \p Ty is a valid type for the elementwise math builtins. If it is 16882 // not a valid type, emit an error message and return true. Otherwise return 16883 // false. 16884 static bool checkMathBuiltinElementType(Sema &S, SourceLocation Loc, 16885 QualType Ty) { 16886 if (!Ty->getAs<VectorType>() && !ConstantMatrixType::isValidElementType(Ty)) { 16887 S.Diag(Loc, diag::err_builtin_invalid_arg_type) 16888 << 1 << /* vector, integer or float ty*/ 0 << Ty; 16889 return true; 16890 } 16891 return false; 16892 } 16893 16894 bool Sema::PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall) { 16895 if (checkArgCount(*this, TheCall, 1)) 16896 return true; 16897 16898 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 16899 if (A.isInvalid()) 16900 return true; 16901 16902 TheCall->setArg(0, A.get()); 16903 QualType TyA = A.get()->getType(); 16904 16905 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 16906 return true; 16907 16908 TheCall->setType(TyA); 16909 return false; 16910 } 16911 16912 bool Sema::SemaBuiltinElementwiseMath(CallExpr *TheCall) { 16913 if (checkArgCount(*this, TheCall, 2)) 16914 return true; 16915 16916 ExprResult A = TheCall->getArg(0); 16917 ExprResult B = TheCall->getArg(1); 16918 // Do standard promotions between the two arguments, returning their common 16919 // type. 16920 QualType Res = 16921 UsualArithmeticConversions(A, B, TheCall->getExprLoc(), ACK_Comparison); 16922 if (A.isInvalid() || B.isInvalid()) 16923 return true; 16924 16925 QualType TyA = A.get()->getType(); 16926 QualType TyB = B.get()->getType(); 16927 16928 if (Res.isNull() || TyA.getCanonicalType() != TyB.getCanonicalType()) 16929 return Diag(A.get()->getBeginLoc(), 16930 diag::err_typecheck_call_different_arg_types) 16931 << TyA << TyB; 16932 16933 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 16934 return true; 16935 16936 TheCall->setArg(0, A.get()); 16937 TheCall->setArg(1, B.get()); 16938 TheCall->setType(Res); 16939 return false; 16940 } 16941 16942 bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall) { 16943 if (checkArgCount(*this, TheCall, 1)) 16944 return true; 16945 16946 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 16947 if (A.isInvalid()) 16948 return true; 16949 16950 TheCall->setArg(0, A.get()); 16951 return false; 16952 } 16953 16954 ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall, 16955 ExprResult CallResult) { 16956 if (checkArgCount(*this, TheCall, 1)) 16957 return ExprError(); 16958 16959 ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0)); 16960 if (MatrixArg.isInvalid()) 16961 return MatrixArg; 16962 Expr *Matrix = MatrixArg.get(); 16963 16964 auto *MType = Matrix->getType()->getAs<ConstantMatrixType>(); 16965 if (!MType) { 16966 Diag(Matrix->getBeginLoc(), diag::err_builtin_invalid_arg_type) 16967 << 1 << /* matrix ty*/ 1 << Matrix->getType(); 16968 return ExprError(); 16969 } 16970 16971 // Create returned matrix type by swapping rows and columns of the argument 16972 // matrix type. 16973 QualType ResultType = Context.getConstantMatrixType( 16974 MType->getElementType(), MType->getNumColumns(), MType->getNumRows()); 16975 16976 // Change the return type to the type of the returned matrix. 16977 TheCall->setType(ResultType); 16978 16979 // Update call argument to use the possibly converted matrix argument. 16980 TheCall->setArg(0, Matrix); 16981 return CallResult; 16982 } 16983 16984 // Get and verify the matrix dimensions. 16985 static llvm::Optional<unsigned> 16986 getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) { 16987 SourceLocation ErrorPos; 16988 Optional<llvm::APSInt> Value = 16989 Expr->getIntegerConstantExpr(S.Context, &ErrorPos); 16990 if (!Value) { 16991 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg) 16992 << Name; 16993 return {}; 16994 } 16995 uint64_t Dim = Value->getZExtValue(); 16996 if (!ConstantMatrixType::isDimensionValid(Dim)) { 16997 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension) 16998 << Name << ConstantMatrixType::getMaxElementsPerDimension(); 16999 return {}; 17000 } 17001 return Dim; 17002 } 17003 17004 ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, 17005 ExprResult CallResult) { 17006 if (!getLangOpts().MatrixTypes) { 17007 Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled); 17008 return ExprError(); 17009 } 17010 17011 if (checkArgCount(*this, TheCall, 4)) 17012 return ExprError(); 17013 17014 unsigned PtrArgIdx = 0; 17015 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 17016 Expr *RowsExpr = TheCall->getArg(1); 17017 Expr *ColumnsExpr = TheCall->getArg(2); 17018 Expr *StrideExpr = TheCall->getArg(3); 17019 17020 bool ArgError = false; 17021 17022 // Check pointer argument. 17023 { 17024 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 17025 if (PtrConv.isInvalid()) 17026 return PtrConv; 17027 PtrExpr = PtrConv.get(); 17028 TheCall->setArg(0, PtrExpr); 17029 if (PtrExpr->isTypeDependent()) { 17030 TheCall->setType(Context.DependentTy); 17031 return TheCall; 17032 } 17033 } 17034 17035 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 17036 QualType ElementTy; 17037 if (!PtrTy) { 17038 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17039 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 17040 ArgError = true; 17041 } else { 17042 ElementTy = PtrTy->getPointeeType().getUnqualifiedType(); 17043 17044 if (!ConstantMatrixType::isValidElementType(ElementTy)) { 17045 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17046 << PtrArgIdx + 1 << /* pointer to element ty*/ 2 17047 << PtrExpr->getType(); 17048 ArgError = true; 17049 } 17050 } 17051 17052 // Apply default Lvalue conversions and convert the expression to size_t. 17053 auto ApplyArgumentConversions = [this](Expr *E) { 17054 ExprResult Conv = DefaultLvalueConversion(E); 17055 if (Conv.isInvalid()) 17056 return Conv; 17057 17058 return tryConvertExprToType(Conv.get(), Context.getSizeType()); 17059 }; 17060 17061 // Apply conversion to row and column expressions. 17062 ExprResult RowsConv = ApplyArgumentConversions(RowsExpr); 17063 if (!RowsConv.isInvalid()) { 17064 RowsExpr = RowsConv.get(); 17065 TheCall->setArg(1, RowsExpr); 17066 } else 17067 RowsExpr = nullptr; 17068 17069 ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr); 17070 if (!ColumnsConv.isInvalid()) { 17071 ColumnsExpr = ColumnsConv.get(); 17072 TheCall->setArg(2, ColumnsExpr); 17073 } else 17074 ColumnsExpr = nullptr; 17075 17076 // If any any part of the result matrix type is still pending, just use 17077 // Context.DependentTy, until all parts are resolved. 17078 if ((RowsExpr && RowsExpr->isTypeDependent()) || 17079 (ColumnsExpr && ColumnsExpr->isTypeDependent())) { 17080 TheCall->setType(Context.DependentTy); 17081 return CallResult; 17082 } 17083 17084 // Check row and column dimensions. 17085 llvm::Optional<unsigned> MaybeRows; 17086 if (RowsExpr) 17087 MaybeRows = getAndVerifyMatrixDimension(RowsExpr, "row", *this); 17088 17089 llvm::Optional<unsigned> MaybeColumns; 17090 if (ColumnsExpr) 17091 MaybeColumns = getAndVerifyMatrixDimension(ColumnsExpr, "column", *this); 17092 17093 // Check stride argument. 17094 ExprResult StrideConv = ApplyArgumentConversions(StrideExpr); 17095 if (StrideConv.isInvalid()) 17096 return ExprError(); 17097 StrideExpr = StrideConv.get(); 17098 TheCall->setArg(3, StrideExpr); 17099 17100 if (MaybeRows) { 17101 if (Optional<llvm::APSInt> Value = 17102 StrideExpr->getIntegerConstantExpr(Context)) { 17103 uint64_t Stride = Value->getZExtValue(); 17104 if (Stride < *MaybeRows) { 17105 Diag(StrideExpr->getBeginLoc(), 17106 diag::err_builtin_matrix_stride_too_small); 17107 ArgError = true; 17108 } 17109 } 17110 } 17111 17112 if (ArgError || !MaybeRows || !MaybeColumns) 17113 return ExprError(); 17114 17115 TheCall->setType( 17116 Context.getConstantMatrixType(ElementTy, *MaybeRows, *MaybeColumns)); 17117 return CallResult; 17118 } 17119 17120 ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, 17121 ExprResult CallResult) { 17122 if (checkArgCount(*this, TheCall, 3)) 17123 return ExprError(); 17124 17125 unsigned PtrArgIdx = 1; 17126 Expr *MatrixExpr = TheCall->getArg(0); 17127 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 17128 Expr *StrideExpr = TheCall->getArg(2); 17129 17130 bool ArgError = false; 17131 17132 { 17133 ExprResult MatrixConv = DefaultLvalueConversion(MatrixExpr); 17134 if (MatrixConv.isInvalid()) 17135 return MatrixConv; 17136 MatrixExpr = MatrixConv.get(); 17137 TheCall->setArg(0, MatrixExpr); 17138 } 17139 if (MatrixExpr->isTypeDependent()) { 17140 TheCall->setType(Context.DependentTy); 17141 return TheCall; 17142 } 17143 17144 auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>(); 17145 if (!MatrixTy) { 17146 Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17147 << 1 << /*matrix ty */ 1 << MatrixExpr->getType(); 17148 ArgError = true; 17149 } 17150 17151 { 17152 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 17153 if (PtrConv.isInvalid()) 17154 return PtrConv; 17155 PtrExpr = PtrConv.get(); 17156 TheCall->setArg(1, PtrExpr); 17157 if (PtrExpr->isTypeDependent()) { 17158 TheCall->setType(Context.DependentTy); 17159 return TheCall; 17160 } 17161 } 17162 17163 // Check pointer argument. 17164 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 17165 if (!PtrTy) { 17166 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17167 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 17168 ArgError = true; 17169 } else { 17170 QualType ElementTy = PtrTy->getPointeeType(); 17171 if (ElementTy.isConstQualified()) { 17172 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const); 17173 ArgError = true; 17174 } 17175 ElementTy = ElementTy.getUnqualifiedType().getCanonicalType(); 17176 if (MatrixTy && 17177 !Context.hasSameType(ElementTy, MatrixTy->getElementType())) { 17178 Diag(PtrExpr->getBeginLoc(), 17179 diag::err_builtin_matrix_pointer_arg_mismatch) 17180 << ElementTy << MatrixTy->getElementType(); 17181 ArgError = true; 17182 } 17183 } 17184 17185 // Apply default Lvalue conversions and convert the stride expression to 17186 // size_t. 17187 { 17188 ExprResult StrideConv = DefaultLvalueConversion(StrideExpr); 17189 if (StrideConv.isInvalid()) 17190 return StrideConv; 17191 17192 StrideConv = tryConvertExprToType(StrideConv.get(), Context.getSizeType()); 17193 if (StrideConv.isInvalid()) 17194 return StrideConv; 17195 StrideExpr = StrideConv.get(); 17196 TheCall->setArg(2, StrideExpr); 17197 } 17198 17199 // Check stride argument. 17200 if (MatrixTy) { 17201 if (Optional<llvm::APSInt> Value = 17202 StrideExpr->getIntegerConstantExpr(Context)) { 17203 uint64_t Stride = Value->getZExtValue(); 17204 if (Stride < MatrixTy->getNumRows()) { 17205 Diag(StrideExpr->getBeginLoc(), 17206 diag::err_builtin_matrix_stride_too_small); 17207 ArgError = true; 17208 } 17209 } 17210 } 17211 17212 if (ArgError) 17213 return ExprError(); 17214 17215 return CallResult; 17216 } 17217 17218 /// \brief Enforce the bounds of a TCB 17219 /// CheckTCBEnforcement - Enforces that every function in a named TCB only 17220 /// directly calls other functions in the same TCB as marked by the enforce_tcb 17221 /// and enforce_tcb_leaf attributes. 17222 void Sema::CheckTCBEnforcement(const CallExpr *TheCall, 17223 const FunctionDecl *Callee) { 17224 const FunctionDecl *Caller = getCurFunctionDecl(); 17225 17226 // Calls to builtins are not enforced. 17227 if (!Caller || !Caller->hasAttr<EnforceTCBAttr>() || 17228 Callee->getBuiltinID() != 0) 17229 return; 17230 17231 // Search through the enforce_tcb and enforce_tcb_leaf attributes to find 17232 // all TCBs the callee is a part of. 17233 llvm::StringSet<> CalleeTCBs; 17234 for_each(Callee->specific_attrs<EnforceTCBAttr>(), 17235 [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); }); 17236 for_each(Callee->specific_attrs<EnforceTCBLeafAttr>(), 17237 [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); }); 17238 17239 // Go through the TCBs the caller is a part of and emit warnings if Caller 17240 // is in a TCB that the Callee is not. 17241 for_each( 17242 Caller->specific_attrs<EnforceTCBAttr>(), 17243 [&](const auto *A) { 17244 StringRef CallerTCB = A->getTCBName(); 17245 if (CalleeTCBs.count(CallerTCB) == 0) { 17246 this->Diag(TheCall->getExprLoc(), 17247 diag::warn_tcb_enforcement_violation) << Callee 17248 << CallerTCB; 17249 } 17250 }); 17251 } 17252