1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements extra semantic analysis beyond what is enforced 10 // by the C type system. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/APValue.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/AttrIterator.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclBase.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/DeclarationName.h" 24 #include "clang/AST/EvaluatedExprVisitor.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/ExprCXX.h" 27 #include "clang/AST/ExprObjC.h" 28 #include "clang/AST/ExprOpenMP.h" 29 #include "clang/AST/FormatString.h" 30 #include "clang/AST/NSAPI.h" 31 #include "clang/AST/NonTrivialTypeVisitor.h" 32 #include "clang/AST/OperationKinds.h" 33 #include "clang/AST/RecordLayout.h" 34 #include "clang/AST/Stmt.h" 35 #include "clang/AST/TemplateBase.h" 36 #include "clang/AST/Type.h" 37 #include "clang/AST/TypeLoc.h" 38 #include "clang/AST/UnresolvedSet.h" 39 #include "clang/Basic/AddressSpaces.h" 40 #include "clang/Basic/CharInfo.h" 41 #include "clang/Basic/Diagnostic.h" 42 #include "clang/Basic/IdentifierTable.h" 43 #include "clang/Basic/LLVM.h" 44 #include "clang/Basic/LangOptions.h" 45 #include "clang/Basic/OpenCLOptions.h" 46 #include "clang/Basic/OperatorKinds.h" 47 #include "clang/Basic/PartialDiagnostic.h" 48 #include "clang/Basic/SourceLocation.h" 49 #include "clang/Basic/SourceManager.h" 50 #include "clang/Basic/Specifiers.h" 51 #include "clang/Basic/SyncScope.h" 52 #include "clang/Basic/TargetBuiltins.h" 53 #include "clang/Basic/TargetCXXABI.h" 54 #include "clang/Basic/TargetInfo.h" 55 #include "clang/Basic/TypeTraits.h" 56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 57 #include "clang/Sema/Initialization.h" 58 #include "clang/Sema/Lookup.h" 59 #include "clang/Sema/Ownership.h" 60 #include "clang/Sema/Scope.h" 61 #include "clang/Sema/ScopeInfo.h" 62 #include "clang/Sema/Sema.h" 63 #include "clang/Sema/SemaInternal.h" 64 #include "llvm/ADT/APFloat.h" 65 #include "llvm/ADT/APInt.h" 66 #include "llvm/ADT/APSInt.h" 67 #include "llvm/ADT/ArrayRef.h" 68 #include "llvm/ADT/DenseMap.h" 69 #include "llvm/ADT/FoldingSet.h" 70 #include "llvm/ADT/None.h" 71 #include "llvm/ADT/Optional.h" 72 #include "llvm/ADT/STLExtras.h" 73 #include "llvm/ADT/SmallBitVector.h" 74 #include "llvm/ADT/SmallPtrSet.h" 75 #include "llvm/ADT/SmallString.h" 76 #include "llvm/ADT/SmallVector.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/ADT/StringSet.h" 79 #include "llvm/ADT/StringSwitch.h" 80 #include "llvm/ADT/Triple.h" 81 #include "llvm/Support/AtomicOrdering.h" 82 #include "llvm/Support/Casting.h" 83 #include "llvm/Support/Compiler.h" 84 #include "llvm/Support/ConvertUTF.h" 85 #include "llvm/Support/ErrorHandling.h" 86 #include "llvm/Support/Format.h" 87 #include "llvm/Support/Locale.h" 88 #include "llvm/Support/MathExtras.h" 89 #include "llvm/Support/SaveAndRestore.h" 90 #include "llvm/Support/raw_ostream.h" 91 #include <algorithm> 92 #include <bitset> 93 #include <cassert> 94 #include <cctype> 95 #include <cstddef> 96 #include <cstdint> 97 #include <functional> 98 #include <limits> 99 #include <string> 100 #include <tuple> 101 #include <utility> 102 103 using namespace clang; 104 using namespace sema; 105 106 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 107 unsigned ByteNo) const { 108 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 109 Context.getTargetInfo()); 110 } 111 112 /// Checks that a call expression's argument count is the desired number. 113 /// This is useful when doing custom type-checking. Returns true on error. 114 static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) { 115 unsigned argCount = call->getNumArgs(); 116 if (argCount == desiredArgCount) return false; 117 118 if (argCount < desiredArgCount) 119 return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args) 120 << 0 /*function call*/ << desiredArgCount << argCount 121 << call->getSourceRange(); 122 123 // Highlight all the excess arguments. 124 SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(), 125 call->getArg(argCount - 1)->getEndLoc()); 126 127 return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args) 128 << 0 /*function call*/ << desiredArgCount << argCount 129 << call->getArg(1)->getSourceRange(); 130 } 131 132 /// Check that the first argument to __builtin_annotation is an integer 133 /// and the second argument is a non-wide string literal. 134 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 135 if (checkArgCount(S, TheCall, 2)) 136 return true; 137 138 // First argument should be an integer. 139 Expr *ValArg = TheCall->getArg(0); 140 QualType Ty = ValArg->getType(); 141 if (!Ty->isIntegerType()) { 142 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 143 << ValArg->getSourceRange(); 144 return true; 145 } 146 147 // Second argument should be a constant string. 148 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 149 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 150 if (!Literal || !Literal->isAscii()) { 151 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 152 << StrArg->getSourceRange(); 153 return true; 154 } 155 156 TheCall->setType(Ty); 157 return false; 158 } 159 160 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 161 // We need at least one argument. 162 if (TheCall->getNumArgs() < 1) { 163 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 164 << 0 << 1 << TheCall->getNumArgs() 165 << TheCall->getCallee()->getSourceRange(); 166 return true; 167 } 168 169 // All arguments should be wide string literals. 170 for (Expr *Arg : TheCall->arguments()) { 171 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 172 if (!Literal || !Literal->isWide()) { 173 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 174 << Arg->getSourceRange(); 175 return true; 176 } 177 } 178 179 return false; 180 } 181 182 /// Check that the argument to __builtin_addressof is a glvalue, and set the 183 /// result type to the corresponding pointer type. 184 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 185 if (checkArgCount(S, TheCall, 1)) 186 return true; 187 188 ExprResult Arg(TheCall->getArg(0)); 189 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 190 if (ResultType.isNull()) 191 return true; 192 193 TheCall->setArg(0, Arg.get()); 194 TheCall->setType(ResultType); 195 return false; 196 } 197 198 /// Check that the argument to __builtin_function_start is a function. 199 static bool SemaBuiltinFunctionStart(Sema &S, CallExpr *TheCall) { 200 if (checkArgCount(S, TheCall, 1)) 201 return true; 202 203 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 204 if (Arg.isInvalid()) 205 return true; 206 207 TheCall->setArg(0, Arg.get()); 208 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>( 209 Arg.get()->getAsBuiltinConstantDeclRef(S.getASTContext())); 210 211 if (!FD) { 212 S.Diag(TheCall->getBeginLoc(), diag::err_function_start_invalid_type) 213 << TheCall->getSourceRange(); 214 return true; 215 } 216 217 return !S.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true, 218 TheCall->getBeginLoc()); 219 } 220 221 /// Check the number of arguments and set the result type to 222 /// the argument type. 223 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) { 224 if (checkArgCount(S, TheCall, 1)) 225 return true; 226 227 TheCall->setType(TheCall->getArg(0)->getType()); 228 return false; 229 } 230 231 /// Check that the value argument for __builtin_is_aligned(value, alignment) and 232 /// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer 233 /// type (but not a function pointer) and that the alignment is a power-of-two. 234 static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) { 235 if (checkArgCount(S, TheCall, 2)) 236 return true; 237 238 clang::Expr *Source = TheCall->getArg(0); 239 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned; 240 241 auto IsValidIntegerType = [](QualType Ty) { 242 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType(); 243 }; 244 QualType SrcTy = Source->getType(); 245 // We should also be able to use it with arrays (but not functions!). 246 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) { 247 SrcTy = S.Context.getDecayedType(SrcTy); 248 } 249 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) || 250 SrcTy->isFunctionPointerType()) { 251 // FIXME: this is not quite the right error message since we don't allow 252 // floating point types, or member pointers. 253 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand) 254 << SrcTy; 255 return true; 256 } 257 258 clang::Expr *AlignOp = TheCall->getArg(1); 259 if (!IsValidIntegerType(AlignOp->getType())) { 260 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int) 261 << AlignOp->getType(); 262 return true; 263 } 264 Expr::EvalResult AlignResult; 265 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1; 266 // We can't check validity of alignment if it is value dependent. 267 if (!AlignOp->isValueDependent() && 268 AlignOp->EvaluateAsInt(AlignResult, S.Context, 269 Expr::SE_AllowSideEffects)) { 270 llvm::APSInt AlignValue = AlignResult.Val.getInt(); 271 llvm::APSInt MaxValue( 272 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits)); 273 if (AlignValue < 1) { 274 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1; 275 return true; 276 } 277 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) { 278 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big) 279 << toString(MaxValue, 10); 280 return true; 281 } 282 if (!AlignValue.isPowerOf2()) { 283 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two); 284 return true; 285 } 286 if (AlignValue == 1) { 287 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless) 288 << IsBooleanAlignBuiltin; 289 } 290 } 291 292 ExprResult SrcArg = S.PerformCopyInitialization( 293 InitializedEntity::InitializeParameter(S.Context, SrcTy, false), 294 SourceLocation(), Source); 295 if (SrcArg.isInvalid()) 296 return true; 297 TheCall->setArg(0, SrcArg.get()); 298 ExprResult AlignArg = 299 S.PerformCopyInitialization(InitializedEntity::InitializeParameter( 300 S.Context, AlignOp->getType(), false), 301 SourceLocation(), AlignOp); 302 if (AlignArg.isInvalid()) 303 return true; 304 TheCall->setArg(1, AlignArg.get()); 305 // For align_up/align_down, the return type is the same as the (potentially 306 // decayed) argument type including qualifiers. For is_aligned(), the result 307 // is always bool. 308 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy); 309 return false; 310 } 311 312 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall, 313 unsigned BuiltinID) { 314 if (checkArgCount(S, TheCall, 3)) 315 return true; 316 317 // First two arguments should be integers. 318 for (unsigned I = 0; I < 2; ++I) { 319 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I)); 320 if (Arg.isInvalid()) return true; 321 TheCall->setArg(I, Arg.get()); 322 323 QualType Ty = Arg.get()->getType(); 324 if (!Ty->isIntegerType()) { 325 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 326 << Ty << Arg.get()->getSourceRange(); 327 return true; 328 } 329 } 330 331 // Third argument should be a pointer to a non-const integer. 332 // IRGen correctly handles volatile, restrict, and address spaces, and 333 // the other qualifiers aren't possible. 334 { 335 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2)); 336 if (Arg.isInvalid()) return true; 337 TheCall->setArg(2, Arg.get()); 338 339 QualType Ty = Arg.get()->getType(); 340 const auto *PtrTy = Ty->getAs<PointerType>(); 341 if (!PtrTy || 342 !PtrTy->getPointeeType()->isIntegerType() || 343 PtrTy->getPointeeType().isConstQualified()) { 344 S.Diag(Arg.get()->getBeginLoc(), 345 diag::err_overflow_builtin_must_be_ptr_int) 346 << Ty << Arg.get()->getSourceRange(); 347 return true; 348 } 349 } 350 351 // Disallow signed bit-precise integer args larger than 128 bits to mul 352 // function until we improve backend support. 353 if (BuiltinID == Builtin::BI__builtin_mul_overflow) { 354 for (unsigned I = 0; I < 3; ++I) { 355 const auto Arg = TheCall->getArg(I); 356 // Third argument will be a pointer. 357 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType(); 358 if (Ty->isBitIntType() && Ty->isSignedIntegerType() && 359 S.getASTContext().getIntWidth(Ty) > 128) 360 return S.Diag(Arg->getBeginLoc(), 361 diag::err_overflow_builtin_bit_int_max_size) 362 << 128; 363 } 364 } 365 366 return false; 367 } 368 369 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 370 if (checkArgCount(S, BuiltinCall, 2)) 371 return true; 372 373 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 374 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 375 Expr *Call = BuiltinCall->getArg(0); 376 Expr *Chain = BuiltinCall->getArg(1); 377 378 if (Call->getStmtClass() != Stmt::CallExprClass) { 379 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 380 << Call->getSourceRange(); 381 return true; 382 } 383 384 auto CE = cast<CallExpr>(Call); 385 if (CE->getCallee()->getType()->isBlockPointerType()) { 386 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 387 << Call->getSourceRange(); 388 return true; 389 } 390 391 const Decl *TargetDecl = CE->getCalleeDecl(); 392 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 393 if (FD->getBuiltinID()) { 394 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 395 << Call->getSourceRange(); 396 return true; 397 } 398 399 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 400 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 401 << Call->getSourceRange(); 402 return true; 403 } 404 405 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 406 if (ChainResult.isInvalid()) 407 return true; 408 if (!ChainResult.get()->getType()->isPointerType()) { 409 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 410 << Chain->getSourceRange(); 411 return true; 412 } 413 414 QualType ReturnTy = CE->getCallReturnType(S.Context); 415 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 416 QualType BuiltinTy = S.Context.getFunctionType( 417 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 418 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 419 420 Builtin = 421 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 422 423 BuiltinCall->setType(CE->getType()); 424 BuiltinCall->setValueKind(CE->getValueKind()); 425 BuiltinCall->setObjectKind(CE->getObjectKind()); 426 BuiltinCall->setCallee(Builtin); 427 BuiltinCall->setArg(1, ChainResult.get()); 428 429 return false; 430 } 431 432 namespace { 433 434 class ScanfDiagnosticFormatHandler 435 : public analyze_format_string::FormatStringHandler { 436 // Accepts the argument index (relative to the first destination index) of the 437 // argument whose size we want. 438 using ComputeSizeFunction = 439 llvm::function_ref<Optional<llvm::APSInt>(unsigned)>; 440 441 // Accepts the argument index (relative to the first destination index), the 442 // destination size, and the source size). 443 using DiagnoseFunction = 444 llvm::function_ref<void(unsigned, unsigned, unsigned)>; 445 446 ComputeSizeFunction ComputeSizeArgument; 447 DiagnoseFunction Diagnose; 448 449 public: 450 ScanfDiagnosticFormatHandler(ComputeSizeFunction ComputeSizeArgument, 451 DiagnoseFunction Diagnose) 452 : ComputeSizeArgument(ComputeSizeArgument), Diagnose(Diagnose) {} 453 454 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 455 const char *StartSpecifier, 456 unsigned specifierLen) override { 457 if (!FS.consumesDataArgument()) 458 return true; 459 460 unsigned NulByte = 0; 461 switch ((FS.getConversionSpecifier().getKind())) { 462 default: 463 return true; 464 case analyze_format_string::ConversionSpecifier::sArg: 465 case analyze_format_string::ConversionSpecifier::ScanListArg: 466 NulByte = 1; 467 break; 468 case analyze_format_string::ConversionSpecifier::cArg: 469 break; 470 } 471 472 analyze_format_string::OptionalAmount FW = FS.getFieldWidth(); 473 if (FW.getHowSpecified() != 474 analyze_format_string::OptionalAmount::HowSpecified::Constant) 475 return true; 476 477 unsigned SourceSize = FW.getConstantAmount() + NulByte; 478 479 Optional<llvm::APSInt> DestSizeAPS = ComputeSizeArgument(FS.getArgIndex()); 480 if (!DestSizeAPS) 481 return true; 482 483 unsigned DestSize = DestSizeAPS->getZExtValue(); 484 485 if (DestSize < SourceSize) 486 Diagnose(FS.getArgIndex(), DestSize, SourceSize); 487 488 return true; 489 } 490 }; 491 492 class EstimateSizeFormatHandler 493 : public analyze_format_string::FormatStringHandler { 494 size_t Size; 495 496 public: 497 EstimateSizeFormatHandler(StringRef Format) 498 : Size(std::min(Format.find(0), Format.size()) + 499 1 /* null byte always written by sprintf */) {} 500 501 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 502 const char *, unsigned SpecifierLen, 503 const TargetInfo &) override { 504 505 const size_t FieldWidth = computeFieldWidth(FS); 506 const size_t Precision = computePrecision(FS); 507 508 // The actual format. 509 switch (FS.getConversionSpecifier().getKind()) { 510 // Just a char. 511 case analyze_format_string::ConversionSpecifier::cArg: 512 case analyze_format_string::ConversionSpecifier::CArg: 513 Size += std::max(FieldWidth, (size_t)1); 514 break; 515 // Just an integer. 516 case analyze_format_string::ConversionSpecifier::dArg: 517 case analyze_format_string::ConversionSpecifier::DArg: 518 case analyze_format_string::ConversionSpecifier::iArg: 519 case analyze_format_string::ConversionSpecifier::oArg: 520 case analyze_format_string::ConversionSpecifier::OArg: 521 case analyze_format_string::ConversionSpecifier::uArg: 522 case analyze_format_string::ConversionSpecifier::UArg: 523 case analyze_format_string::ConversionSpecifier::xArg: 524 case analyze_format_string::ConversionSpecifier::XArg: 525 Size += std::max(FieldWidth, Precision); 526 break; 527 528 // %g style conversion switches between %f or %e style dynamically. 529 // %f always takes less space, so default to it. 530 case analyze_format_string::ConversionSpecifier::gArg: 531 case analyze_format_string::ConversionSpecifier::GArg: 532 533 // Floating point number in the form '[+]ddd.ddd'. 534 case analyze_format_string::ConversionSpecifier::fArg: 535 case analyze_format_string::ConversionSpecifier::FArg: 536 Size += std::max(FieldWidth, 1 /* integer part */ + 537 (Precision ? 1 + Precision 538 : 0) /* period + decimal */); 539 break; 540 541 // Floating point number in the form '[-]d.ddde[+-]dd'. 542 case analyze_format_string::ConversionSpecifier::eArg: 543 case analyze_format_string::ConversionSpecifier::EArg: 544 Size += 545 std::max(FieldWidth, 546 1 /* integer part */ + 547 (Precision ? 1 + Precision : 0) /* period + decimal */ + 548 1 /* e or E letter */ + 2 /* exponent */); 549 break; 550 551 // Floating point number in the form '[-]0xh.hhhhp±dd'. 552 case analyze_format_string::ConversionSpecifier::aArg: 553 case analyze_format_string::ConversionSpecifier::AArg: 554 Size += 555 std::max(FieldWidth, 556 2 /* 0x */ + 1 /* integer part */ + 557 (Precision ? 1 + Precision : 0) /* period + decimal */ + 558 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */); 559 break; 560 561 // Just a string. 562 case analyze_format_string::ConversionSpecifier::sArg: 563 case analyze_format_string::ConversionSpecifier::SArg: 564 Size += FieldWidth; 565 break; 566 567 // Just a pointer in the form '0xddd'. 568 case analyze_format_string::ConversionSpecifier::pArg: 569 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision); 570 break; 571 572 // A plain percent. 573 case analyze_format_string::ConversionSpecifier::PercentArg: 574 Size += 1; 575 break; 576 577 default: 578 break; 579 } 580 581 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix(); 582 583 if (FS.hasAlternativeForm()) { 584 switch (FS.getConversionSpecifier().getKind()) { 585 default: 586 break; 587 // Force a leading '0'. 588 case analyze_format_string::ConversionSpecifier::oArg: 589 Size += 1; 590 break; 591 // Force a leading '0x'. 592 case analyze_format_string::ConversionSpecifier::xArg: 593 case analyze_format_string::ConversionSpecifier::XArg: 594 Size += 2; 595 break; 596 // Force a period '.' before decimal, even if precision is 0. 597 case analyze_format_string::ConversionSpecifier::aArg: 598 case analyze_format_string::ConversionSpecifier::AArg: 599 case analyze_format_string::ConversionSpecifier::eArg: 600 case analyze_format_string::ConversionSpecifier::EArg: 601 case analyze_format_string::ConversionSpecifier::fArg: 602 case analyze_format_string::ConversionSpecifier::FArg: 603 case analyze_format_string::ConversionSpecifier::gArg: 604 case analyze_format_string::ConversionSpecifier::GArg: 605 Size += (Precision ? 0 : 1); 606 break; 607 } 608 } 609 assert(SpecifierLen <= Size && "no underflow"); 610 Size -= SpecifierLen; 611 return true; 612 } 613 614 size_t getSizeLowerBound() const { return Size; } 615 616 private: 617 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) { 618 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth(); 619 size_t FieldWidth = 0; 620 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant) 621 FieldWidth = FW.getConstantAmount(); 622 return FieldWidth; 623 } 624 625 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) { 626 const analyze_format_string::OptionalAmount &FW = FS.getPrecision(); 627 size_t Precision = 0; 628 629 // See man 3 printf for default precision value based on the specifier. 630 switch (FW.getHowSpecified()) { 631 case analyze_format_string::OptionalAmount::NotSpecified: 632 switch (FS.getConversionSpecifier().getKind()) { 633 default: 634 break; 635 case analyze_format_string::ConversionSpecifier::dArg: // %d 636 case analyze_format_string::ConversionSpecifier::DArg: // %D 637 case analyze_format_string::ConversionSpecifier::iArg: // %i 638 Precision = 1; 639 break; 640 case analyze_format_string::ConversionSpecifier::oArg: // %d 641 case analyze_format_string::ConversionSpecifier::OArg: // %D 642 case analyze_format_string::ConversionSpecifier::uArg: // %d 643 case analyze_format_string::ConversionSpecifier::UArg: // %D 644 case analyze_format_string::ConversionSpecifier::xArg: // %d 645 case analyze_format_string::ConversionSpecifier::XArg: // %D 646 Precision = 1; 647 break; 648 case analyze_format_string::ConversionSpecifier::fArg: // %f 649 case analyze_format_string::ConversionSpecifier::FArg: // %F 650 case analyze_format_string::ConversionSpecifier::eArg: // %e 651 case analyze_format_string::ConversionSpecifier::EArg: // %E 652 case analyze_format_string::ConversionSpecifier::gArg: // %g 653 case analyze_format_string::ConversionSpecifier::GArg: // %G 654 Precision = 6; 655 break; 656 case analyze_format_string::ConversionSpecifier::pArg: // %d 657 Precision = 1; 658 break; 659 } 660 break; 661 case analyze_format_string::OptionalAmount::Constant: 662 Precision = FW.getConstantAmount(); 663 break; 664 default: 665 break; 666 } 667 return Precision; 668 } 669 }; 670 671 } // namespace 672 673 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, 674 CallExpr *TheCall) { 675 if (TheCall->isValueDependent() || TheCall->isTypeDependent() || 676 isConstantEvaluated()) 677 return; 678 679 bool UseDABAttr = false; 680 const FunctionDecl *UseDecl = FD; 681 682 const auto *DABAttr = FD->getAttr<DiagnoseAsBuiltinAttr>(); 683 if (DABAttr) { 684 UseDecl = DABAttr->getFunction(); 685 assert(UseDecl && "Missing FunctionDecl in DiagnoseAsBuiltin attribute!"); 686 UseDABAttr = true; 687 } 688 689 unsigned BuiltinID = UseDecl->getBuiltinID(/*ConsiderWrappers=*/true); 690 691 if (!BuiltinID) 692 return; 693 694 const TargetInfo &TI = getASTContext().getTargetInfo(); 695 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType()); 696 697 auto TranslateIndex = [&](unsigned Index) -> Optional<unsigned> { 698 // If we refer to a diagnose_as_builtin attribute, we need to change the 699 // argument index to refer to the arguments of the called function. Unless 700 // the index is out of bounds, which presumably means it's a variadic 701 // function. 702 if (!UseDABAttr) 703 return Index; 704 unsigned DABIndices = DABAttr->argIndices_size(); 705 unsigned NewIndex = Index < DABIndices 706 ? DABAttr->argIndices_begin()[Index] 707 : Index - DABIndices + FD->getNumParams(); 708 if (NewIndex >= TheCall->getNumArgs()) 709 return llvm::None; 710 return NewIndex; 711 }; 712 713 auto ComputeExplicitObjectSizeArgument = 714 [&](unsigned Index) -> Optional<llvm::APSInt> { 715 Optional<unsigned> IndexOptional = TranslateIndex(Index); 716 if (!IndexOptional) 717 return llvm::None; 718 unsigned NewIndex = IndexOptional.getValue(); 719 Expr::EvalResult Result; 720 Expr *SizeArg = TheCall->getArg(NewIndex); 721 if (!SizeArg->EvaluateAsInt(Result, getASTContext())) 722 return llvm::None; 723 llvm::APSInt Integer = Result.Val.getInt(); 724 Integer.setIsUnsigned(true); 725 return Integer; 726 }; 727 728 auto ComputeSizeArgument = [&](unsigned Index) -> Optional<llvm::APSInt> { 729 // If the parameter has a pass_object_size attribute, then we should use its 730 // (potentially) more strict checking mode. Otherwise, conservatively assume 731 // type 0. 732 int BOSType = 0; 733 // This check can fail for variadic functions. 734 if (Index < FD->getNumParams()) { 735 if (const auto *POS = 736 FD->getParamDecl(Index)->getAttr<PassObjectSizeAttr>()) 737 BOSType = POS->getType(); 738 } 739 740 Optional<unsigned> IndexOptional = TranslateIndex(Index); 741 if (!IndexOptional) 742 return llvm::None; 743 unsigned NewIndex = IndexOptional.getValue(); 744 745 const Expr *ObjArg = TheCall->getArg(NewIndex); 746 uint64_t Result; 747 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType)) 748 return llvm::None; 749 750 // Get the object size in the target's size_t width. 751 return llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth); 752 }; 753 754 auto ComputeStrLenArgument = [&](unsigned Index) -> Optional<llvm::APSInt> { 755 Optional<unsigned> IndexOptional = TranslateIndex(Index); 756 if (!IndexOptional) 757 return llvm::None; 758 unsigned NewIndex = IndexOptional.getValue(); 759 760 const Expr *ObjArg = TheCall->getArg(NewIndex); 761 uint64_t Result; 762 if (!ObjArg->tryEvaluateStrLen(Result, getASTContext())) 763 return llvm::None; 764 // Add 1 for null byte. 765 return llvm::APSInt::getUnsigned(Result + 1).extOrTrunc(SizeTypeWidth); 766 }; 767 768 Optional<llvm::APSInt> SourceSize; 769 Optional<llvm::APSInt> DestinationSize; 770 unsigned DiagID = 0; 771 bool IsChkVariant = false; 772 773 auto GetFunctionName = [&]() { 774 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID); 775 // Skim off the details of whichever builtin was called to produce a better 776 // diagnostic, as it's unlikely that the user wrote the __builtin 777 // explicitly. 778 if (IsChkVariant) { 779 FunctionName = FunctionName.drop_front(std::strlen("__builtin___")); 780 FunctionName = FunctionName.drop_back(std::strlen("_chk")); 781 } else if (FunctionName.startswith("__builtin_")) { 782 FunctionName = FunctionName.drop_front(std::strlen("__builtin_")); 783 } 784 return FunctionName; 785 }; 786 787 switch (BuiltinID) { 788 default: 789 return; 790 case Builtin::BI__builtin_strcpy: 791 case Builtin::BIstrcpy: { 792 DiagID = diag::warn_fortify_strlen_overflow; 793 SourceSize = ComputeStrLenArgument(1); 794 DestinationSize = ComputeSizeArgument(0); 795 break; 796 } 797 798 case Builtin::BI__builtin___strcpy_chk: { 799 DiagID = diag::warn_fortify_strlen_overflow; 800 SourceSize = ComputeStrLenArgument(1); 801 DestinationSize = ComputeExplicitObjectSizeArgument(2); 802 IsChkVariant = true; 803 break; 804 } 805 806 case Builtin::BIscanf: 807 case Builtin::BIfscanf: 808 case Builtin::BIsscanf: { 809 unsigned FormatIndex = 1; 810 unsigned DataIndex = 2; 811 if (BuiltinID == Builtin::BIscanf) { 812 FormatIndex = 0; 813 DataIndex = 1; 814 } 815 816 const auto *FormatExpr = 817 TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 818 819 const auto *Format = dyn_cast<StringLiteral>(FormatExpr); 820 if (!Format) 821 return; 822 823 if (!Format->isAscii() && !Format->isUTF8()) 824 return; 825 826 auto Diagnose = [&](unsigned ArgIndex, unsigned DestSize, 827 unsigned SourceSize) { 828 DiagID = diag::warn_fortify_scanf_overflow; 829 unsigned Index = ArgIndex + DataIndex; 830 StringRef FunctionName = GetFunctionName(); 831 DiagRuntimeBehavior(TheCall->getArg(Index)->getBeginLoc(), TheCall, 832 PDiag(DiagID) << FunctionName << (Index + 1) 833 << DestSize << SourceSize); 834 }; 835 836 StringRef FormatStrRef = Format->getString(); 837 auto ShiftedComputeSizeArgument = [&](unsigned Index) { 838 return ComputeSizeArgument(Index + DataIndex); 839 }; 840 ScanfDiagnosticFormatHandler H(ShiftedComputeSizeArgument, Diagnose); 841 const char *FormatBytes = FormatStrRef.data(); 842 const ConstantArrayType *T = 843 Context.getAsConstantArrayType(Format->getType()); 844 assert(T && "String literal not of constant array type!"); 845 size_t TypeSize = T->getSize().getZExtValue(); 846 847 // In case there's a null byte somewhere. 848 size_t StrLen = 849 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 850 851 analyze_format_string::ParseScanfString(H, FormatBytes, 852 FormatBytes + StrLen, getLangOpts(), 853 Context.getTargetInfo()); 854 855 // Unlike the other cases, in this one we have already issued the diagnostic 856 // here, so no need to continue (because unlike the other cases, here the 857 // diagnostic refers to the argument number). 858 return; 859 } 860 861 case Builtin::BIsprintf: 862 case Builtin::BI__builtin___sprintf_chk: { 863 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3; 864 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 865 866 if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) { 867 868 if (!Format->isAscii() && !Format->isUTF8()) 869 return; 870 871 StringRef FormatStrRef = Format->getString(); 872 EstimateSizeFormatHandler H(FormatStrRef); 873 const char *FormatBytes = FormatStrRef.data(); 874 const ConstantArrayType *T = 875 Context.getAsConstantArrayType(Format->getType()); 876 assert(T && "String literal not of constant array type!"); 877 size_t TypeSize = T->getSize().getZExtValue(); 878 879 // In case there's a null byte somewhere. 880 size_t StrLen = 881 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 882 if (!analyze_format_string::ParsePrintfString( 883 H, FormatBytes, FormatBytes + StrLen, getLangOpts(), 884 Context.getTargetInfo(), false)) { 885 DiagID = diag::warn_fortify_source_format_overflow; 886 SourceSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound()) 887 .extOrTrunc(SizeTypeWidth); 888 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) { 889 DestinationSize = ComputeExplicitObjectSizeArgument(2); 890 IsChkVariant = true; 891 } else { 892 DestinationSize = ComputeSizeArgument(0); 893 } 894 break; 895 } 896 } 897 return; 898 } 899 case Builtin::BI__builtin___memcpy_chk: 900 case Builtin::BI__builtin___memmove_chk: 901 case Builtin::BI__builtin___memset_chk: 902 case Builtin::BI__builtin___strlcat_chk: 903 case Builtin::BI__builtin___strlcpy_chk: 904 case Builtin::BI__builtin___strncat_chk: 905 case Builtin::BI__builtin___strncpy_chk: 906 case Builtin::BI__builtin___stpncpy_chk: 907 case Builtin::BI__builtin___memccpy_chk: 908 case Builtin::BI__builtin___mempcpy_chk: { 909 DiagID = diag::warn_builtin_chk_overflow; 910 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 2); 911 DestinationSize = 912 ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 913 IsChkVariant = true; 914 break; 915 } 916 917 case Builtin::BI__builtin___snprintf_chk: 918 case Builtin::BI__builtin___vsnprintf_chk: { 919 DiagID = diag::warn_builtin_chk_overflow; 920 SourceSize = ComputeExplicitObjectSizeArgument(1); 921 DestinationSize = ComputeExplicitObjectSizeArgument(3); 922 IsChkVariant = true; 923 break; 924 } 925 926 case Builtin::BIstrncat: 927 case Builtin::BI__builtin_strncat: 928 case Builtin::BIstrncpy: 929 case Builtin::BI__builtin_strncpy: 930 case Builtin::BIstpncpy: 931 case Builtin::BI__builtin_stpncpy: { 932 // Whether these functions overflow depends on the runtime strlen of the 933 // string, not just the buffer size, so emitting the "always overflow" 934 // diagnostic isn't quite right. We should still diagnose passing a buffer 935 // size larger than the destination buffer though; this is a runtime abort 936 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise. 937 DiagID = diag::warn_fortify_source_size_mismatch; 938 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 939 DestinationSize = ComputeSizeArgument(0); 940 break; 941 } 942 943 case Builtin::BImemcpy: 944 case Builtin::BI__builtin_memcpy: 945 case Builtin::BImemmove: 946 case Builtin::BI__builtin_memmove: 947 case Builtin::BImemset: 948 case Builtin::BI__builtin_memset: 949 case Builtin::BImempcpy: 950 case Builtin::BI__builtin_mempcpy: { 951 DiagID = diag::warn_fortify_source_overflow; 952 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 953 DestinationSize = ComputeSizeArgument(0); 954 break; 955 } 956 case Builtin::BIsnprintf: 957 case Builtin::BI__builtin_snprintf: 958 case Builtin::BIvsnprintf: 959 case Builtin::BI__builtin_vsnprintf: { 960 DiagID = diag::warn_fortify_source_size_mismatch; 961 SourceSize = ComputeExplicitObjectSizeArgument(1); 962 DestinationSize = ComputeSizeArgument(0); 963 break; 964 } 965 } 966 967 if (!SourceSize || !DestinationSize || 968 llvm::APSInt::compareValues(SourceSize.getValue(), 969 DestinationSize.getValue()) <= 0) 970 return; 971 972 StringRef FunctionName = GetFunctionName(); 973 974 SmallString<16> DestinationStr; 975 SmallString<16> SourceStr; 976 DestinationSize->toString(DestinationStr, /*Radix=*/10); 977 SourceSize->toString(SourceStr, /*Radix=*/10); 978 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 979 PDiag(DiagID) 980 << FunctionName << DestinationStr << SourceStr); 981 } 982 983 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 984 Scope::ScopeFlags NeededScopeFlags, 985 unsigned DiagID) { 986 // Scopes aren't available during instantiation. Fortunately, builtin 987 // functions cannot be template args so they cannot be formed through template 988 // instantiation. Therefore checking once during the parse is sufficient. 989 if (SemaRef.inTemplateInstantiation()) 990 return false; 991 992 Scope *S = SemaRef.getCurScope(); 993 while (S && !S->isSEHExceptScope()) 994 S = S->getParent(); 995 if (!S || !(S->getFlags() & NeededScopeFlags)) { 996 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 997 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 998 << DRE->getDecl()->getIdentifier(); 999 return true; 1000 } 1001 1002 return false; 1003 } 1004 1005 static inline bool isBlockPointer(Expr *Arg) { 1006 return Arg->getType()->isBlockPointerType(); 1007 } 1008 1009 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 1010 /// void*, which is a requirement of device side enqueue. 1011 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 1012 const BlockPointerType *BPT = 1013 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1014 ArrayRef<QualType> Params = 1015 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes(); 1016 unsigned ArgCounter = 0; 1017 bool IllegalParams = false; 1018 // Iterate through the block parameters until either one is found that is not 1019 // a local void*, or the block is valid. 1020 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 1021 I != E; ++I, ++ArgCounter) { 1022 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 1023 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 1024 LangAS::opencl_local) { 1025 // Get the location of the error. If a block literal has been passed 1026 // (BlockExpr) then we can point straight to the offending argument, 1027 // else we just point to the variable reference. 1028 SourceLocation ErrorLoc; 1029 if (isa<BlockExpr>(BlockArg)) { 1030 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 1031 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 1032 } else if (isa<DeclRefExpr>(BlockArg)) { 1033 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 1034 } 1035 S.Diag(ErrorLoc, 1036 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 1037 IllegalParams = true; 1038 } 1039 } 1040 1041 return IllegalParams; 1042 } 1043 1044 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 1045 if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts())) { 1046 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 1047 << 1 << Call->getDirectCallee() << "cl_khr_subgroups"; 1048 return true; 1049 } 1050 return false; 1051 } 1052 1053 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 1054 if (checkArgCount(S, TheCall, 2)) 1055 return true; 1056 1057 if (checkOpenCLSubgroupExt(S, TheCall)) 1058 return true; 1059 1060 // First argument is an ndrange_t type. 1061 Expr *NDRangeArg = TheCall->getArg(0); 1062 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1063 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1064 << TheCall->getDirectCallee() << "'ndrange_t'"; 1065 return true; 1066 } 1067 1068 Expr *BlockArg = TheCall->getArg(1); 1069 if (!isBlockPointer(BlockArg)) { 1070 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1071 << TheCall->getDirectCallee() << "block"; 1072 return true; 1073 } 1074 return checkOpenCLBlockArgs(S, BlockArg); 1075 } 1076 1077 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 1078 /// get_kernel_work_group_size 1079 /// and get_kernel_preferred_work_group_size_multiple builtin functions. 1080 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 1081 if (checkArgCount(S, TheCall, 1)) 1082 return true; 1083 1084 Expr *BlockArg = TheCall->getArg(0); 1085 if (!isBlockPointer(BlockArg)) { 1086 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1087 << TheCall->getDirectCallee() << "block"; 1088 return true; 1089 } 1090 return checkOpenCLBlockArgs(S, BlockArg); 1091 } 1092 1093 /// Diagnose integer type and any valid implicit conversion to it. 1094 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 1095 const QualType &IntType); 1096 1097 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 1098 unsigned Start, unsigned End) { 1099 bool IllegalParams = false; 1100 for (unsigned I = Start; I <= End; ++I) 1101 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 1102 S.Context.getSizeType()); 1103 return IllegalParams; 1104 } 1105 1106 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 1107 /// 'local void*' parameter of passed block. 1108 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 1109 Expr *BlockArg, 1110 unsigned NumNonVarArgs) { 1111 const BlockPointerType *BPT = 1112 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1113 unsigned NumBlockParams = 1114 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams(); 1115 unsigned TotalNumArgs = TheCall->getNumArgs(); 1116 1117 // For each argument passed to the block, a corresponding uint needs to 1118 // be passed to describe the size of the local memory. 1119 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 1120 S.Diag(TheCall->getBeginLoc(), 1121 diag::err_opencl_enqueue_kernel_local_size_args); 1122 return true; 1123 } 1124 1125 // Check that the sizes of the local memory are specified by integers. 1126 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 1127 TotalNumArgs - 1); 1128 } 1129 1130 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 1131 /// overload formats specified in Table 6.13.17.1. 1132 /// int enqueue_kernel(queue_t queue, 1133 /// kernel_enqueue_flags_t flags, 1134 /// const ndrange_t ndrange, 1135 /// void (^block)(void)) 1136 /// int enqueue_kernel(queue_t queue, 1137 /// kernel_enqueue_flags_t flags, 1138 /// const ndrange_t ndrange, 1139 /// uint num_events_in_wait_list, 1140 /// clk_event_t *event_wait_list, 1141 /// clk_event_t *event_ret, 1142 /// void (^block)(void)) 1143 /// int enqueue_kernel(queue_t queue, 1144 /// kernel_enqueue_flags_t flags, 1145 /// const ndrange_t ndrange, 1146 /// void (^block)(local void*, ...), 1147 /// uint size0, ...) 1148 /// int enqueue_kernel(queue_t queue, 1149 /// kernel_enqueue_flags_t flags, 1150 /// const ndrange_t ndrange, 1151 /// uint num_events_in_wait_list, 1152 /// clk_event_t *event_wait_list, 1153 /// clk_event_t *event_ret, 1154 /// void (^block)(local void*, ...), 1155 /// uint size0, ...) 1156 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 1157 unsigned NumArgs = TheCall->getNumArgs(); 1158 1159 if (NumArgs < 4) { 1160 S.Diag(TheCall->getBeginLoc(), 1161 diag::err_typecheck_call_too_few_args_at_least) 1162 << 0 << 4 << NumArgs; 1163 return true; 1164 } 1165 1166 Expr *Arg0 = TheCall->getArg(0); 1167 Expr *Arg1 = TheCall->getArg(1); 1168 Expr *Arg2 = TheCall->getArg(2); 1169 Expr *Arg3 = TheCall->getArg(3); 1170 1171 // First argument always needs to be a queue_t type. 1172 if (!Arg0->getType()->isQueueT()) { 1173 S.Diag(TheCall->getArg(0)->getBeginLoc(), 1174 diag::err_opencl_builtin_expected_type) 1175 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 1176 return true; 1177 } 1178 1179 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 1180 if (!Arg1->getType()->isIntegerType()) { 1181 S.Diag(TheCall->getArg(1)->getBeginLoc(), 1182 diag::err_opencl_builtin_expected_type) 1183 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 1184 return true; 1185 } 1186 1187 // Third argument is always an ndrange_t type. 1188 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1189 S.Diag(TheCall->getArg(2)->getBeginLoc(), 1190 diag::err_opencl_builtin_expected_type) 1191 << TheCall->getDirectCallee() << "'ndrange_t'"; 1192 return true; 1193 } 1194 1195 // With four arguments, there is only one form that the function could be 1196 // called in: no events and no variable arguments. 1197 if (NumArgs == 4) { 1198 // check that the last argument is the right block type. 1199 if (!isBlockPointer(Arg3)) { 1200 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1201 << TheCall->getDirectCallee() << "block"; 1202 return true; 1203 } 1204 // we have a block type, check the prototype 1205 const BlockPointerType *BPT = 1206 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 1207 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) { 1208 S.Diag(Arg3->getBeginLoc(), 1209 diag::err_opencl_enqueue_kernel_blocks_no_args); 1210 return true; 1211 } 1212 return false; 1213 } 1214 // we can have block + varargs. 1215 if (isBlockPointer(Arg3)) 1216 return (checkOpenCLBlockArgs(S, Arg3) || 1217 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 1218 // last two cases with either exactly 7 args or 7 args and varargs. 1219 if (NumArgs >= 7) { 1220 // check common block argument. 1221 Expr *Arg6 = TheCall->getArg(6); 1222 if (!isBlockPointer(Arg6)) { 1223 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1224 << TheCall->getDirectCallee() << "block"; 1225 return true; 1226 } 1227 if (checkOpenCLBlockArgs(S, Arg6)) 1228 return true; 1229 1230 // Forth argument has to be any integer type. 1231 if (!Arg3->getType()->isIntegerType()) { 1232 S.Diag(TheCall->getArg(3)->getBeginLoc(), 1233 diag::err_opencl_builtin_expected_type) 1234 << TheCall->getDirectCallee() << "integer"; 1235 return true; 1236 } 1237 // check remaining common arguments. 1238 Expr *Arg4 = TheCall->getArg(4); 1239 Expr *Arg5 = TheCall->getArg(5); 1240 1241 // Fifth argument is always passed as a pointer to clk_event_t. 1242 if (!Arg4->isNullPointerConstant(S.Context, 1243 Expr::NPC_ValueDependentIsNotNull) && 1244 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 1245 S.Diag(TheCall->getArg(4)->getBeginLoc(), 1246 diag::err_opencl_builtin_expected_type) 1247 << TheCall->getDirectCallee() 1248 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1249 return true; 1250 } 1251 1252 // Sixth argument is always passed as a pointer to clk_event_t. 1253 if (!Arg5->isNullPointerConstant(S.Context, 1254 Expr::NPC_ValueDependentIsNotNull) && 1255 !(Arg5->getType()->isPointerType() && 1256 Arg5->getType()->getPointeeType()->isClkEventT())) { 1257 S.Diag(TheCall->getArg(5)->getBeginLoc(), 1258 diag::err_opencl_builtin_expected_type) 1259 << TheCall->getDirectCallee() 1260 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1261 return true; 1262 } 1263 1264 if (NumArgs == 7) 1265 return false; 1266 1267 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 1268 } 1269 1270 // None of the specific case has been detected, give generic error 1271 S.Diag(TheCall->getBeginLoc(), 1272 diag::err_opencl_enqueue_kernel_incorrect_args); 1273 return true; 1274 } 1275 1276 /// Returns OpenCL access qual. 1277 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 1278 return D->getAttr<OpenCLAccessAttr>(); 1279 } 1280 1281 /// Returns true if pipe element type is different from the pointer. 1282 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 1283 const Expr *Arg0 = Call->getArg(0); 1284 // First argument type should always be pipe. 1285 if (!Arg0->getType()->isPipeType()) { 1286 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1287 << Call->getDirectCallee() << Arg0->getSourceRange(); 1288 return true; 1289 } 1290 OpenCLAccessAttr *AccessQual = 1291 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 1292 // Validates the access qualifier is compatible with the call. 1293 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 1294 // read_only and write_only, and assumed to be read_only if no qualifier is 1295 // specified. 1296 switch (Call->getDirectCallee()->getBuiltinID()) { 1297 case Builtin::BIread_pipe: 1298 case Builtin::BIreserve_read_pipe: 1299 case Builtin::BIcommit_read_pipe: 1300 case Builtin::BIwork_group_reserve_read_pipe: 1301 case Builtin::BIsub_group_reserve_read_pipe: 1302 case Builtin::BIwork_group_commit_read_pipe: 1303 case Builtin::BIsub_group_commit_read_pipe: 1304 if (!(!AccessQual || AccessQual->isReadOnly())) { 1305 S.Diag(Arg0->getBeginLoc(), 1306 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1307 << "read_only" << Arg0->getSourceRange(); 1308 return true; 1309 } 1310 break; 1311 case Builtin::BIwrite_pipe: 1312 case Builtin::BIreserve_write_pipe: 1313 case Builtin::BIcommit_write_pipe: 1314 case Builtin::BIwork_group_reserve_write_pipe: 1315 case Builtin::BIsub_group_reserve_write_pipe: 1316 case Builtin::BIwork_group_commit_write_pipe: 1317 case Builtin::BIsub_group_commit_write_pipe: 1318 if (!(AccessQual && AccessQual->isWriteOnly())) { 1319 S.Diag(Arg0->getBeginLoc(), 1320 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1321 << "write_only" << Arg0->getSourceRange(); 1322 return true; 1323 } 1324 break; 1325 default: 1326 break; 1327 } 1328 return false; 1329 } 1330 1331 /// Returns true if pipe element type is different from the pointer. 1332 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 1333 const Expr *Arg0 = Call->getArg(0); 1334 const Expr *ArgIdx = Call->getArg(Idx); 1335 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 1336 const QualType EltTy = PipeTy->getElementType(); 1337 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 1338 // The Idx argument should be a pointer and the type of the pointer and 1339 // the type of pipe element should also be the same. 1340 if (!ArgTy || 1341 !S.Context.hasSameType( 1342 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 1343 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1344 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 1345 << ArgIdx->getType() << ArgIdx->getSourceRange(); 1346 return true; 1347 } 1348 return false; 1349 } 1350 1351 // Performs semantic analysis for the read/write_pipe call. 1352 // \param S Reference to the semantic analyzer. 1353 // \param Call A pointer to the builtin call. 1354 // \return True if a semantic error has been found, false otherwise. 1355 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 1356 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 1357 // functions have two forms. 1358 switch (Call->getNumArgs()) { 1359 case 2: 1360 if (checkOpenCLPipeArg(S, Call)) 1361 return true; 1362 // The call with 2 arguments should be 1363 // read/write_pipe(pipe T, T*). 1364 // Check packet type T. 1365 if (checkOpenCLPipePacketType(S, Call, 1)) 1366 return true; 1367 break; 1368 1369 case 4: { 1370 if (checkOpenCLPipeArg(S, Call)) 1371 return true; 1372 // The call with 4 arguments should be 1373 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 1374 // Check reserve_id_t. 1375 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1376 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1377 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1378 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1379 return true; 1380 } 1381 1382 // Check the index. 1383 const Expr *Arg2 = Call->getArg(2); 1384 if (!Arg2->getType()->isIntegerType() && 1385 !Arg2->getType()->isUnsignedIntegerType()) { 1386 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1387 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1388 << Arg2->getType() << Arg2->getSourceRange(); 1389 return true; 1390 } 1391 1392 // Check packet type T. 1393 if (checkOpenCLPipePacketType(S, Call, 3)) 1394 return true; 1395 } break; 1396 default: 1397 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 1398 << Call->getDirectCallee() << Call->getSourceRange(); 1399 return true; 1400 } 1401 1402 return false; 1403 } 1404 1405 // Performs a semantic analysis on the {work_group_/sub_group_ 1406 // /_}reserve_{read/write}_pipe 1407 // \param S Reference to the semantic analyzer. 1408 // \param Call The call to the builtin function to be analyzed. 1409 // \return True if a semantic error was found, false otherwise. 1410 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 1411 if (checkArgCount(S, Call, 2)) 1412 return true; 1413 1414 if (checkOpenCLPipeArg(S, Call)) 1415 return true; 1416 1417 // Check the reserve size. 1418 if (!Call->getArg(1)->getType()->isIntegerType() && 1419 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 1420 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1421 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1422 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1423 return true; 1424 } 1425 1426 // Since return type of reserve_read/write_pipe built-in function is 1427 // reserve_id_t, which is not defined in the builtin def file , we used int 1428 // as return type and need to override the return type of these functions. 1429 Call->setType(S.Context.OCLReserveIDTy); 1430 1431 return false; 1432 } 1433 1434 // Performs a semantic analysis on {work_group_/sub_group_ 1435 // /_}commit_{read/write}_pipe 1436 // \param S Reference to the semantic analyzer. 1437 // \param Call The call to the builtin function to be analyzed. 1438 // \return True if a semantic error was found, false otherwise. 1439 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 1440 if (checkArgCount(S, Call, 2)) 1441 return true; 1442 1443 if (checkOpenCLPipeArg(S, Call)) 1444 return true; 1445 1446 // Check reserve_id_t. 1447 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1448 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1449 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1450 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1451 return true; 1452 } 1453 1454 return false; 1455 } 1456 1457 // Performs a semantic analysis on the call to built-in Pipe 1458 // Query Functions. 1459 // \param S Reference to the semantic analyzer. 1460 // \param Call The call to the builtin function to be analyzed. 1461 // \return True if a semantic error was found, false otherwise. 1462 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 1463 if (checkArgCount(S, Call, 1)) 1464 return true; 1465 1466 if (!Call->getArg(0)->getType()->isPipeType()) { 1467 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1468 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 1469 return true; 1470 } 1471 1472 return false; 1473 } 1474 1475 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. 1476 // Performs semantic analysis for the to_global/local/private call. 1477 // \param S Reference to the semantic analyzer. 1478 // \param BuiltinID ID of the builtin function. 1479 // \param Call A pointer to the builtin call. 1480 // \return True if a semantic error has been found, false otherwise. 1481 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 1482 CallExpr *Call) { 1483 if (checkArgCount(S, Call, 1)) 1484 return true; 1485 1486 auto RT = Call->getArg(0)->getType(); 1487 if (!RT->isPointerType() || RT->getPointeeType() 1488 .getAddressSpace() == LangAS::opencl_constant) { 1489 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 1490 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 1491 return true; 1492 } 1493 1494 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 1495 S.Diag(Call->getArg(0)->getBeginLoc(), 1496 diag::warn_opencl_generic_address_space_arg) 1497 << Call->getDirectCallee()->getNameInfo().getAsString() 1498 << Call->getArg(0)->getSourceRange(); 1499 } 1500 1501 RT = RT->getPointeeType(); 1502 auto Qual = RT.getQualifiers(); 1503 switch (BuiltinID) { 1504 case Builtin::BIto_global: 1505 Qual.setAddressSpace(LangAS::opencl_global); 1506 break; 1507 case Builtin::BIto_local: 1508 Qual.setAddressSpace(LangAS::opencl_local); 1509 break; 1510 case Builtin::BIto_private: 1511 Qual.setAddressSpace(LangAS::opencl_private); 1512 break; 1513 default: 1514 llvm_unreachable("Invalid builtin function"); 1515 } 1516 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 1517 RT.getUnqualifiedType(), Qual))); 1518 1519 return false; 1520 } 1521 1522 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { 1523 if (checkArgCount(S, TheCall, 1)) 1524 return ExprError(); 1525 1526 // Compute __builtin_launder's parameter type from the argument. 1527 // The parameter type is: 1528 // * The type of the argument if it's not an array or function type, 1529 // Otherwise, 1530 // * The decayed argument type. 1531 QualType ParamTy = [&]() { 1532 QualType ArgTy = TheCall->getArg(0)->getType(); 1533 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe()) 1534 return S.Context.getPointerType(Ty->getElementType()); 1535 if (ArgTy->isFunctionType()) { 1536 return S.Context.getPointerType(ArgTy); 1537 } 1538 return ArgTy; 1539 }(); 1540 1541 TheCall->setType(ParamTy); 1542 1543 auto DiagSelect = [&]() -> llvm::Optional<unsigned> { 1544 if (!ParamTy->isPointerType()) 1545 return 0; 1546 if (ParamTy->isFunctionPointerType()) 1547 return 1; 1548 if (ParamTy->isVoidPointerType()) 1549 return 2; 1550 return llvm::Optional<unsigned>{}; 1551 }(); 1552 if (DiagSelect.hasValue()) { 1553 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) 1554 << DiagSelect.getValue() << TheCall->getSourceRange(); 1555 return ExprError(); 1556 } 1557 1558 // We either have an incomplete class type, or we have a class template 1559 // whose instantiation has not been forced. Example: 1560 // 1561 // template <class T> struct Foo { T value; }; 1562 // Foo<int> *p = nullptr; 1563 // auto *d = __builtin_launder(p); 1564 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(), 1565 diag::err_incomplete_type)) 1566 return ExprError(); 1567 1568 assert(ParamTy->getPointeeType()->isObjectType() && 1569 "Unhandled non-object pointer case"); 1570 1571 InitializedEntity Entity = 1572 InitializedEntity::InitializeParameter(S.Context, ParamTy, false); 1573 ExprResult Arg = 1574 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0)); 1575 if (Arg.isInvalid()) 1576 return ExprError(); 1577 TheCall->setArg(0, Arg.get()); 1578 1579 return TheCall; 1580 } 1581 1582 // Emit an error and return true if the current object format type is in the 1583 // list of unsupported types. 1584 static bool CheckBuiltinTargetNotInUnsupported( 1585 Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1586 ArrayRef<llvm::Triple::ObjectFormatType> UnsupportedObjectFormatTypes) { 1587 llvm::Triple::ObjectFormatType CurObjFormat = 1588 S.getASTContext().getTargetInfo().getTriple().getObjectFormat(); 1589 if (llvm::is_contained(UnsupportedObjectFormatTypes, CurObjFormat)) { 1590 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1591 << TheCall->getSourceRange(); 1592 return true; 1593 } 1594 return false; 1595 } 1596 1597 // Emit an error and return true if the current architecture is not in the list 1598 // of supported architectures. 1599 static bool 1600 CheckBuiltinTargetInSupported(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1601 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 1602 llvm::Triple::ArchType CurArch = 1603 S.getASTContext().getTargetInfo().getTriple().getArch(); 1604 if (llvm::is_contained(SupportedArchs, CurArch)) 1605 return false; 1606 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1607 << TheCall->getSourceRange(); 1608 return true; 1609 } 1610 1611 static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr, 1612 SourceLocation CallSiteLoc); 1613 1614 bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 1615 CallExpr *TheCall) { 1616 switch (TI.getTriple().getArch()) { 1617 default: 1618 // Some builtins don't require additional checking, so just consider these 1619 // acceptable. 1620 return false; 1621 case llvm::Triple::arm: 1622 case llvm::Triple::armeb: 1623 case llvm::Triple::thumb: 1624 case llvm::Triple::thumbeb: 1625 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall); 1626 case llvm::Triple::aarch64: 1627 case llvm::Triple::aarch64_32: 1628 case llvm::Triple::aarch64_be: 1629 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall); 1630 case llvm::Triple::bpfeb: 1631 case llvm::Triple::bpfel: 1632 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall); 1633 case llvm::Triple::hexagon: 1634 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall); 1635 case llvm::Triple::mips: 1636 case llvm::Triple::mipsel: 1637 case llvm::Triple::mips64: 1638 case llvm::Triple::mips64el: 1639 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall); 1640 case llvm::Triple::systemz: 1641 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall); 1642 case llvm::Triple::x86: 1643 case llvm::Triple::x86_64: 1644 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall); 1645 case llvm::Triple::ppc: 1646 case llvm::Triple::ppcle: 1647 case llvm::Triple::ppc64: 1648 case llvm::Triple::ppc64le: 1649 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall); 1650 case llvm::Triple::amdgcn: 1651 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall); 1652 case llvm::Triple::riscv32: 1653 case llvm::Triple::riscv64: 1654 return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall); 1655 } 1656 } 1657 1658 ExprResult 1659 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 1660 CallExpr *TheCall) { 1661 ExprResult TheCallResult(TheCall); 1662 1663 // Find out if any arguments are required to be integer constant expressions. 1664 unsigned ICEArguments = 0; 1665 ASTContext::GetBuiltinTypeError Error; 1666 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 1667 if (Error != ASTContext::GE_None) 1668 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 1669 1670 // If any arguments are required to be ICE's, check and diagnose. 1671 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 1672 // Skip arguments not required to be ICE's. 1673 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 1674 1675 llvm::APSInt Result; 1676 if (SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 1677 return true; 1678 ICEArguments &= ~(1 << ArgNo); 1679 } 1680 1681 switch (BuiltinID) { 1682 case Builtin::BI__builtin___CFStringMakeConstantString: 1683 // CFStringMakeConstantString is currently not implemented for GOFF (i.e., 1684 // on z/OS) and for XCOFF (i.e., on AIX). Emit unsupported 1685 if (CheckBuiltinTargetNotInUnsupported( 1686 *this, BuiltinID, TheCall, 1687 {llvm::Triple::GOFF, llvm::Triple::XCOFF})) 1688 return ExprError(); 1689 assert(TheCall->getNumArgs() == 1 && 1690 "Wrong # arguments to builtin CFStringMakeConstantString"); 1691 if (CheckObjCString(TheCall->getArg(0))) 1692 return ExprError(); 1693 break; 1694 case Builtin::BI__builtin_ms_va_start: 1695 case Builtin::BI__builtin_stdarg_start: 1696 case Builtin::BI__builtin_va_start: 1697 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1698 return ExprError(); 1699 break; 1700 case Builtin::BI__va_start: { 1701 switch (Context.getTargetInfo().getTriple().getArch()) { 1702 case llvm::Triple::aarch64: 1703 case llvm::Triple::arm: 1704 case llvm::Triple::thumb: 1705 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 1706 return ExprError(); 1707 break; 1708 default: 1709 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1710 return ExprError(); 1711 break; 1712 } 1713 break; 1714 } 1715 1716 // The acquire, release, and no fence variants are ARM and AArch64 only. 1717 case Builtin::BI_interlockedbittestandset_acq: 1718 case Builtin::BI_interlockedbittestandset_rel: 1719 case Builtin::BI_interlockedbittestandset_nf: 1720 case Builtin::BI_interlockedbittestandreset_acq: 1721 case Builtin::BI_interlockedbittestandreset_rel: 1722 case Builtin::BI_interlockedbittestandreset_nf: 1723 if (CheckBuiltinTargetInSupported( 1724 *this, BuiltinID, TheCall, 1725 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 1726 return ExprError(); 1727 break; 1728 1729 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 1730 case Builtin::BI_bittest64: 1731 case Builtin::BI_bittestandcomplement64: 1732 case Builtin::BI_bittestandreset64: 1733 case Builtin::BI_bittestandset64: 1734 case Builtin::BI_interlockedbittestandreset64: 1735 case Builtin::BI_interlockedbittestandset64: 1736 if (CheckBuiltinTargetInSupported(*this, BuiltinID, TheCall, 1737 {llvm::Triple::x86_64, llvm::Triple::arm, 1738 llvm::Triple::thumb, 1739 llvm::Triple::aarch64})) 1740 return ExprError(); 1741 break; 1742 1743 case Builtin::BI__builtin_isgreater: 1744 case Builtin::BI__builtin_isgreaterequal: 1745 case Builtin::BI__builtin_isless: 1746 case Builtin::BI__builtin_islessequal: 1747 case Builtin::BI__builtin_islessgreater: 1748 case Builtin::BI__builtin_isunordered: 1749 if (SemaBuiltinUnorderedCompare(TheCall)) 1750 return ExprError(); 1751 break; 1752 case Builtin::BI__builtin_fpclassify: 1753 if (SemaBuiltinFPClassification(TheCall, 6)) 1754 return ExprError(); 1755 break; 1756 case Builtin::BI__builtin_isfinite: 1757 case Builtin::BI__builtin_isinf: 1758 case Builtin::BI__builtin_isinf_sign: 1759 case Builtin::BI__builtin_isnan: 1760 case Builtin::BI__builtin_isnormal: 1761 case Builtin::BI__builtin_signbit: 1762 case Builtin::BI__builtin_signbitf: 1763 case Builtin::BI__builtin_signbitl: 1764 if (SemaBuiltinFPClassification(TheCall, 1)) 1765 return ExprError(); 1766 break; 1767 case Builtin::BI__builtin_shufflevector: 1768 return SemaBuiltinShuffleVector(TheCall); 1769 // TheCall will be freed by the smart pointer here, but that's fine, since 1770 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 1771 case Builtin::BI__builtin_prefetch: 1772 if (SemaBuiltinPrefetch(TheCall)) 1773 return ExprError(); 1774 break; 1775 case Builtin::BI__builtin_alloca_with_align: 1776 case Builtin::BI__builtin_alloca_with_align_uninitialized: 1777 if (SemaBuiltinAllocaWithAlign(TheCall)) 1778 return ExprError(); 1779 LLVM_FALLTHROUGH; 1780 case Builtin::BI__builtin_alloca: 1781 case Builtin::BI__builtin_alloca_uninitialized: 1782 Diag(TheCall->getBeginLoc(), diag::warn_alloca) 1783 << TheCall->getDirectCallee(); 1784 break; 1785 case Builtin::BI__arithmetic_fence: 1786 if (SemaBuiltinArithmeticFence(TheCall)) 1787 return ExprError(); 1788 break; 1789 case Builtin::BI__assume: 1790 case Builtin::BI__builtin_assume: 1791 if (SemaBuiltinAssume(TheCall)) 1792 return ExprError(); 1793 break; 1794 case Builtin::BI__builtin_assume_aligned: 1795 if (SemaBuiltinAssumeAligned(TheCall)) 1796 return ExprError(); 1797 break; 1798 case Builtin::BI__builtin_dynamic_object_size: 1799 case Builtin::BI__builtin_object_size: 1800 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 1801 return ExprError(); 1802 break; 1803 case Builtin::BI__builtin_longjmp: 1804 if (SemaBuiltinLongjmp(TheCall)) 1805 return ExprError(); 1806 break; 1807 case Builtin::BI__builtin_setjmp: 1808 if (SemaBuiltinSetjmp(TheCall)) 1809 return ExprError(); 1810 break; 1811 case Builtin::BI__builtin_classify_type: 1812 if (checkArgCount(*this, TheCall, 1)) return true; 1813 TheCall->setType(Context.IntTy); 1814 break; 1815 case Builtin::BI__builtin_complex: 1816 if (SemaBuiltinComplex(TheCall)) 1817 return ExprError(); 1818 break; 1819 case Builtin::BI__builtin_constant_p: { 1820 if (checkArgCount(*this, TheCall, 1)) return true; 1821 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 1822 if (Arg.isInvalid()) return true; 1823 TheCall->setArg(0, Arg.get()); 1824 TheCall->setType(Context.IntTy); 1825 break; 1826 } 1827 case Builtin::BI__builtin_launder: 1828 return SemaBuiltinLaunder(*this, TheCall); 1829 case Builtin::BI__sync_fetch_and_add: 1830 case Builtin::BI__sync_fetch_and_add_1: 1831 case Builtin::BI__sync_fetch_and_add_2: 1832 case Builtin::BI__sync_fetch_and_add_4: 1833 case Builtin::BI__sync_fetch_and_add_8: 1834 case Builtin::BI__sync_fetch_and_add_16: 1835 case Builtin::BI__sync_fetch_and_sub: 1836 case Builtin::BI__sync_fetch_and_sub_1: 1837 case Builtin::BI__sync_fetch_and_sub_2: 1838 case Builtin::BI__sync_fetch_and_sub_4: 1839 case Builtin::BI__sync_fetch_and_sub_8: 1840 case Builtin::BI__sync_fetch_and_sub_16: 1841 case Builtin::BI__sync_fetch_and_or: 1842 case Builtin::BI__sync_fetch_and_or_1: 1843 case Builtin::BI__sync_fetch_and_or_2: 1844 case Builtin::BI__sync_fetch_and_or_4: 1845 case Builtin::BI__sync_fetch_and_or_8: 1846 case Builtin::BI__sync_fetch_and_or_16: 1847 case Builtin::BI__sync_fetch_and_and: 1848 case Builtin::BI__sync_fetch_and_and_1: 1849 case Builtin::BI__sync_fetch_and_and_2: 1850 case Builtin::BI__sync_fetch_and_and_4: 1851 case Builtin::BI__sync_fetch_and_and_8: 1852 case Builtin::BI__sync_fetch_and_and_16: 1853 case Builtin::BI__sync_fetch_and_xor: 1854 case Builtin::BI__sync_fetch_and_xor_1: 1855 case Builtin::BI__sync_fetch_and_xor_2: 1856 case Builtin::BI__sync_fetch_and_xor_4: 1857 case Builtin::BI__sync_fetch_and_xor_8: 1858 case Builtin::BI__sync_fetch_and_xor_16: 1859 case Builtin::BI__sync_fetch_and_nand: 1860 case Builtin::BI__sync_fetch_and_nand_1: 1861 case Builtin::BI__sync_fetch_and_nand_2: 1862 case Builtin::BI__sync_fetch_and_nand_4: 1863 case Builtin::BI__sync_fetch_and_nand_8: 1864 case Builtin::BI__sync_fetch_and_nand_16: 1865 case Builtin::BI__sync_add_and_fetch: 1866 case Builtin::BI__sync_add_and_fetch_1: 1867 case Builtin::BI__sync_add_and_fetch_2: 1868 case Builtin::BI__sync_add_and_fetch_4: 1869 case Builtin::BI__sync_add_and_fetch_8: 1870 case Builtin::BI__sync_add_and_fetch_16: 1871 case Builtin::BI__sync_sub_and_fetch: 1872 case Builtin::BI__sync_sub_and_fetch_1: 1873 case Builtin::BI__sync_sub_and_fetch_2: 1874 case Builtin::BI__sync_sub_and_fetch_4: 1875 case Builtin::BI__sync_sub_and_fetch_8: 1876 case Builtin::BI__sync_sub_and_fetch_16: 1877 case Builtin::BI__sync_and_and_fetch: 1878 case Builtin::BI__sync_and_and_fetch_1: 1879 case Builtin::BI__sync_and_and_fetch_2: 1880 case Builtin::BI__sync_and_and_fetch_4: 1881 case Builtin::BI__sync_and_and_fetch_8: 1882 case Builtin::BI__sync_and_and_fetch_16: 1883 case Builtin::BI__sync_or_and_fetch: 1884 case Builtin::BI__sync_or_and_fetch_1: 1885 case Builtin::BI__sync_or_and_fetch_2: 1886 case Builtin::BI__sync_or_and_fetch_4: 1887 case Builtin::BI__sync_or_and_fetch_8: 1888 case Builtin::BI__sync_or_and_fetch_16: 1889 case Builtin::BI__sync_xor_and_fetch: 1890 case Builtin::BI__sync_xor_and_fetch_1: 1891 case Builtin::BI__sync_xor_and_fetch_2: 1892 case Builtin::BI__sync_xor_and_fetch_4: 1893 case Builtin::BI__sync_xor_and_fetch_8: 1894 case Builtin::BI__sync_xor_and_fetch_16: 1895 case Builtin::BI__sync_nand_and_fetch: 1896 case Builtin::BI__sync_nand_and_fetch_1: 1897 case Builtin::BI__sync_nand_and_fetch_2: 1898 case Builtin::BI__sync_nand_and_fetch_4: 1899 case Builtin::BI__sync_nand_and_fetch_8: 1900 case Builtin::BI__sync_nand_and_fetch_16: 1901 case Builtin::BI__sync_val_compare_and_swap: 1902 case Builtin::BI__sync_val_compare_and_swap_1: 1903 case Builtin::BI__sync_val_compare_and_swap_2: 1904 case Builtin::BI__sync_val_compare_and_swap_4: 1905 case Builtin::BI__sync_val_compare_and_swap_8: 1906 case Builtin::BI__sync_val_compare_and_swap_16: 1907 case Builtin::BI__sync_bool_compare_and_swap: 1908 case Builtin::BI__sync_bool_compare_and_swap_1: 1909 case Builtin::BI__sync_bool_compare_and_swap_2: 1910 case Builtin::BI__sync_bool_compare_and_swap_4: 1911 case Builtin::BI__sync_bool_compare_and_swap_8: 1912 case Builtin::BI__sync_bool_compare_and_swap_16: 1913 case Builtin::BI__sync_lock_test_and_set: 1914 case Builtin::BI__sync_lock_test_and_set_1: 1915 case Builtin::BI__sync_lock_test_and_set_2: 1916 case Builtin::BI__sync_lock_test_and_set_4: 1917 case Builtin::BI__sync_lock_test_and_set_8: 1918 case Builtin::BI__sync_lock_test_and_set_16: 1919 case Builtin::BI__sync_lock_release: 1920 case Builtin::BI__sync_lock_release_1: 1921 case Builtin::BI__sync_lock_release_2: 1922 case Builtin::BI__sync_lock_release_4: 1923 case Builtin::BI__sync_lock_release_8: 1924 case Builtin::BI__sync_lock_release_16: 1925 case Builtin::BI__sync_swap: 1926 case Builtin::BI__sync_swap_1: 1927 case Builtin::BI__sync_swap_2: 1928 case Builtin::BI__sync_swap_4: 1929 case Builtin::BI__sync_swap_8: 1930 case Builtin::BI__sync_swap_16: 1931 return SemaBuiltinAtomicOverloaded(TheCallResult); 1932 case Builtin::BI__sync_synchronize: 1933 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 1934 << TheCall->getCallee()->getSourceRange(); 1935 break; 1936 case Builtin::BI__builtin_nontemporal_load: 1937 case Builtin::BI__builtin_nontemporal_store: 1938 return SemaBuiltinNontemporalOverloaded(TheCallResult); 1939 case Builtin::BI__builtin_memcpy_inline: { 1940 clang::Expr *SizeOp = TheCall->getArg(2); 1941 // We warn about copying to or from `nullptr` pointers when `size` is 1942 // greater than 0. When `size` is value dependent we cannot evaluate its 1943 // value so we bail out. 1944 if (SizeOp->isValueDependent()) 1945 break; 1946 if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) { 1947 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 1948 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc()); 1949 } 1950 break; 1951 } 1952 #define BUILTIN(ID, TYPE, ATTRS) 1953 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 1954 case Builtin::BI##ID: \ 1955 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 1956 #include "clang/Basic/Builtins.def" 1957 case Builtin::BI__annotation: 1958 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 1959 return ExprError(); 1960 break; 1961 case Builtin::BI__builtin_annotation: 1962 if (SemaBuiltinAnnotation(*this, TheCall)) 1963 return ExprError(); 1964 break; 1965 case Builtin::BI__builtin_addressof: 1966 if (SemaBuiltinAddressof(*this, TheCall)) 1967 return ExprError(); 1968 break; 1969 case Builtin::BI__builtin_function_start: 1970 if (SemaBuiltinFunctionStart(*this, TheCall)) 1971 return ExprError(); 1972 break; 1973 case Builtin::BI__builtin_is_aligned: 1974 case Builtin::BI__builtin_align_up: 1975 case Builtin::BI__builtin_align_down: 1976 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID)) 1977 return ExprError(); 1978 break; 1979 case Builtin::BI__builtin_add_overflow: 1980 case Builtin::BI__builtin_sub_overflow: 1981 case Builtin::BI__builtin_mul_overflow: 1982 if (SemaBuiltinOverflow(*this, TheCall, BuiltinID)) 1983 return ExprError(); 1984 break; 1985 case Builtin::BI__builtin_operator_new: 1986 case Builtin::BI__builtin_operator_delete: { 1987 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 1988 ExprResult Res = 1989 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 1990 if (Res.isInvalid()) 1991 CorrectDelayedTyposInExpr(TheCallResult.get()); 1992 return Res; 1993 } 1994 case Builtin::BI__builtin_dump_struct: { 1995 // We first want to ensure we are called with 2 arguments 1996 if (checkArgCount(*this, TheCall, 2)) 1997 return ExprError(); 1998 // Ensure that the first argument is of type 'struct XX *' 1999 const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts(); 2000 const QualType PtrArgType = PtrArg->getType(); 2001 if (!PtrArgType->isPointerType() || 2002 !PtrArgType->getPointeeType()->isRecordType()) { 2003 Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 2004 << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType 2005 << "structure pointer"; 2006 return ExprError(); 2007 } 2008 2009 // Ensure that the second argument is of type 'FunctionType' 2010 const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts(); 2011 const QualType FnPtrArgType = FnPtrArg->getType(); 2012 if (!FnPtrArgType->isPointerType()) { 2013 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 2014 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 2015 << FnPtrArgType << "'int (*)(const char *, ...)'"; 2016 return ExprError(); 2017 } 2018 2019 const auto *FuncType = 2020 FnPtrArgType->getPointeeType()->getAs<FunctionType>(); 2021 2022 if (!FuncType) { 2023 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 2024 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 2025 << FnPtrArgType << "'int (*)(const char *, ...)'"; 2026 return ExprError(); 2027 } 2028 2029 if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) { 2030 if (!FT->getNumParams()) { 2031 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 2032 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 2033 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 2034 return ExprError(); 2035 } 2036 QualType PT = FT->getParamType(0); 2037 if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy || 2038 !PT->isPointerType() || !PT->getPointeeType()->isCharType() || 2039 !PT->getPointeeType().isConstQualified()) { 2040 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 2041 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 2042 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 2043 return ExprError(); 2044 } 2045 } 2046 2047 TheCall->setType(Context.IntTy); 2048 break; 2049 } 2050 case Builtin::BI__builtin_expect_with_probability: { 2051 // We first want to ensure we are called with 3 arguments 2052 if (checkArgCount(*this, TheCall, 3)) 2053 return ExprError(); 2054 // then check probability is constant float in range [0.0, 1.0] 2055 const Expr *ProbArg = TheCall->getArg(2); 2056 SmallVector<PartialDiagnosticAt, 8> Notes; 2057 Expr::EvalResult Eval; 2058 Eval.Diag = &Notes; 2059 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) || 2060 !Eval.Val.isFloat()) { 2061 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float) 2062 << ProbArg->getSourceRange(); 2063 for (const PartialDiagnosticAt &PDiag : Notes) 2064 Diag(PDiag.first, PDiag.second); 2065 return ExprError(); 2066 } 2067 llvm::APFloat Probability = Eval.Val.getFloat(); 2068 bool LoseInfo = false; 2069 Probability.convert(llvm::APFloat::IEEEdouble(), 2070 llvm::RoundingMode::Dynamic, &LoseInfo); 2071 if (!(Probability >= llvm::APFloat(0.0) && 2072 Probability <= llvm::APFloat(1.0))) { 2073 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range) 2074 << ProbArg->getSourceRange(); 2075 return ExprError(); 2076 } 2077 break; 2078 } 2079 case Builtin::BI__builtin_preserve_access_index: 2080 if (SemaBuiltinPreserveAI(*this, TheCall)) 2081 return ExprError(); 2082 break; 2083 case Builtin::BI__builtin_call_with_static_chain: 2084 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 2085 return ExprError(); 2086 break; 2087 case Builtin::BI__exception_code: 2088 case Builtin::BI_exception_code: 2089 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 2090 diag::err_seh___except_block)) 2091 return ExprError(); 2092 break; 2093 case Builtin::BI__exception_info: 2094 case Builtin::BI_exception_info: 2095 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 2096 diag::err_seh___except_filter)) 2097 return ExprError(); 2098 break; 2099 case Builtin::BI__GetExceptionInfo: 2100 if (checkArgCount(*this, TheCall, 1)) 2101 return ExprError(); 2102 2103 if (CheckCXXThrowOperand( 2104 TheCall->getBeginLoc(), 2105 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 2106 TheCall)) 2107 return ExprError(); 2108 2109 TheCall->setType(Context.VoidPtrTy); 2110 break; 2111 // OpenCL v2.0, s6.13.16 - Pipe functions 2112 case Builtin::BIread_pipe: 2113 case Builtin::BIwrite_pipe: 2114 // Since those two functions are declared with var args, we need a semantic 2115 // check for the argument. 2116 if (SemaBuiltinRWPipe(*this, TheCall)) 2117 return ExprError(); 2118 break; 2119 case Builtin::BIreserve_read_pipe: 2120 case Builtin::BIreserve_write_pipe: 2121 case Builtin::BIwork_group_reserve_read_pipe: 2122 case Builtin::BIwork_group_reserve_write_pipe: 2123 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 2124 return ExprError(); 2125 break; 2126 case Builtin::BIsub_group_reserve_read_pipe: 2127 case Builtin::BIsub_group_reserve_write_pipe: 2128 if (checkOpenCLSubgroupExt(*this, TheCall) || 2129 SemaBuiltinReserveRWPipe(*this, TheCall)) 2130 return ExprError(); 2131 break; 2132 case Builtin::BIcommit_read_pipe: 2133 case Builtin::BIcommit_write_pipe: 2134 case Builtin::BIwork_group_commit_read_pipe: 2135 case Builtin::BIwork_group_commit_write_pipe: 2136 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 2137 return ExprError(); 2138 break; 2139 case Builtin::BIsub_group_commit_read_pipe: 2140 case Builtin::BIsub_group_commit_write_pipe: 2141 if (checkOpenCLSubgroupExt(*this, TheCall) || 2142 SemaBuiltinCommitRWPipe(*this, TheCall)) 2143 return ExprError(); 2144 break; 2145 case Builtin::BIget_pipe_num_packets: 2146 case Builtin::BIget_pipe_max_packets: 2147 if (SemaBuiltinPipePackets(*this, TheCall)) 2148 return ExprError(); 2149 break; 2150 case Builtin::BIto_global: 2151 case Builtin::BIto_local: 2152 case Builtin::BIto_private: 2153 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 2154 return ExprError(); 2155 break; 2156 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 2157 case Builtin::BIenqueue_kernel: 2158 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 2159 return ExprError(); 2160 break; 2161 case Builtin::BIget_kernel_work_group_size: 2162 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 2163 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 2164 return ExprError(); 2165 break; 2166 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 2167 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 2168 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 2169 return ExprError(); 2170 break; 2171 case Builtin::BI__builtin_os_log_format: 2172 Cleanup.setExprNeedsCleanups(true); 2173 LLVM_FALLTHROUGH; 2174 case Builtin::BI__builtin_os_log_format_buffer_size: 2175 if (SemaBuiltinOSLogFormat(TheCall)) 2176 return ExprError(); 2177 break; 2178 case Builtin::BI__builtin_frame_address: 2179 case Builtin::BI__builtin_return_address: { 2180 if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF)) 2181 return ExprError(); 2182 2183 // -Wframe-address warning if non-zero passed to builtin 2184 // return/frame address. 2185 Expr::EvalResult Result; 2186 if (!TheCall->getArg(0)->isValueDependent() && 2187 TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) && 2188 Result.Val.getInt() != 0) 2189 Diag(TheCall->getBeginLoc(), diag::warn_frame_address) 2190 << ((BuiltinID == Builtin::BI__builtin_return_address) 2191 ? "__builtin_return_address" 2192 : "__builtin_frame_address") 2193 << TheCall->getSourceRange(); 2194 break; 2195 } 2196 2197 // __builtin_elementwise_abs restricts the element type to signed integers or 2198 // floating point types only. 2199 case Builtin::BI__builtin_elementwise_abs: { 2200 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2201 return ExprError(); 2202 2203 QualType ArgTy = TheCall->getArg(0)->getType(); 2204 QualType EltTy = ArgTy; 2205 2206 if (auto *VecTy = EltTy->getAs<VectorType>()) 2207 EltTy = VecTy->getElementType(); 2208 if (EltTy->isUnsignedIntegerType()) { 2209 Diag(TheCall->getArg(0)->getBeginLoc(), 2210 diag::err_builtin_invalid_arg_type) 2211 << 1 << /* signed integer or float ty*/ 3 << ArgTy; 2212 return ExprError(); 2213 } 2214 break; 2215 } 2216 2217 // These builtins restrict the element type to floating point 2218 // types only. 2219 case Builtin::BI__builtin_elementwise_ceil: 2220 case Builtin::BI__builtin_elementwise_floor: 2221 case Builtin::BI__builtin_elementwise_roundeven: 2222 case Builtin::BI__builtin_elementwise_trunc: { 2223 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2224 return ExprError(); 2225 2226 QualType ArgTy = TheCall->getArg(0)->getType(); 2227 QualType EltTy = ArgTy; 2228 2229 if (auto *VecTy = EltTy->getAs<VectorType>()) 2230 EltTy = VecTy->getElementType(); 2231 if (!EltTy->isFloatingType()) { 2232 Diag(TheCall->getArg(0)->getBeginLoc(), 2233 diag::err_builtin_invalid_arg_type) 2234 << 1 << /* float ty*/ 5 << ArgTy; 2235 2236 return ExprError(); 2237 } 2238 break; 2239 } 2240 2241 case Builtin::BI__builtin_elementwise_min: 2242 case Builtin::BI__builtin_elementwise_max: 2243 if (SemaBuiltinElementwiseMath(TheCall)) 2244 return ExprError(); 2245 break; 2246 case Builtin::BI__builtin_reduce_max: 2247 case Builtin::BI__builtin_reduce_min: { 2248 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2249 return ExprError(); 2250 2251 const Expr *Arg = TheCall->getArg(0); 2252 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2253 if (!TyA) { 2254 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2255 << 1 << /* vector ty*/ 4 << Arg->getType(); 2256 return ExprError(); 2257 } 2258 2259 TheCall->setType(TyA->getElementType()); 2260 break; 2261 } 2262 2263 // These builtins support vectors of integers only. 2264 case Builtin::BI__builtin_reduce_xor: 2265 case Builtin::BI__builtin_reduce_or: 2266 case Builtin::BI__builtin_reduce_and: { 2267 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2268 return ExprError(); 2269 2270 const Expr *Arg = TheCall->getArg(0); 2271 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2272 if (!TyA || !TyA->getElementType()->isIntegerType()) { 2273 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2274 << 1 << /* vector of integers */ 6 << Arg->getType(); 2275 return ExprError(); 2276 } 2277 TheCall->setType(TyA->getElementType()); 2278 break; 2279 } 2280 2281 case Builtin::BI__builtin_matrix_transpose: 2282 return SemaBuiltinMatrixTranspose(TheCall, TheCallResult); 2283 2284 case Builtin::BI__builtin_matrix_column_major_load: 2285 return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult); 2286 2287 case Builtin::BI__builtin_matrix_column_major_store: 2288 return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult); 2289 2290 case Builtin::BI__builtin_get_device_side_mangled_name: { 2291 auto Check = [](CallExpr *TheCall) { 2292 if (TheCall->getNumArgs() != 1) 2293 return false; 2294 auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts()); 2295 if (!DRE) 2296 return false; 2297 auto *D = DRE->getDecl(); 2298 if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D)) 2299 return false; 2300 return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() || 2301 D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>(); 2302 }; 2303 if (!Check(TheCall)) { 2304 Diag(TheCall->getBeginLoc(), 2305 diag::err_hip_invalid_args_builtin_mangled_name); 2306 return ExprError(); 2307 } 2308 } 2309 } 2310 2311 // Since the target specific builtins for each arch overlap, only check those 2312 // of the arch we are compiling for. 2313 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 2314 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) { 2315 assert(Context.getAuxTargetInfo() && 2316 "Aux Target Builtin, but not an aux target?"); 2317 2318 if (CheckTSBuiltinFunctionCall( 2319 *Context.getAuxTargetInfo(), 2320 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall)) 2321 return ExprError(); 2322 } else { 2323 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID, 2324 TheCall)) 2325 return ExprError(); 2326 } 2327 } 2328 2329 return TheCallResult; 2330 } 2331 2332 // Get the valid immediate range for the specified NEON type code. 2333 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 2334 NeonTypeFlags Type(t); 2335 int IsQuad = ForceQuad ? true : Type.isQuad(); 2336 switch (Type.getEltType()) { 2337 case NeonTypeFlags::Int8: 2338 case NeonTypeFlags::Poly8: 2339 return shift ? 7 : (8 << IsQuad) - 1; 2340 case NeonTypeFlags::Int16: 2341 case NeonTypeFlags::Poly16: 2342 return shift ? 15 : (4 << IsQuad) - 1; 2343 case NeonTypeFlags::Int32: 2344 return shift ? 31 : (2 << IsQuad) - 1; 2345 case NeonTypeFlags::Int64: 2346 case NeonTypeFlags::Poly64: 2347 return shift ? 63 : (1 << IsQuad) - 1; 2348 case NeonTypeFlags::Poly128: 2349 return shift ? 127 : (1 << IsQuad) - 1; 2350 case NeonTypeFlags::Float16: 2351 assert(!shift && "cannot shift float types!"); 2352 return (4 << IsQuad) - 1; 2353 case NeonTypeFlags::Float32: 2354 assert(!shift && "cannot shift float types!"); 2355 return (2 << IsQuad) - 1; 2356 case NeonTypeFlags::Float64: 2357 assert(!shift && "cannot shift float types!"); 2358 return (1 << IsQuad) - 1; 2359 case NeonTypeFlags::BFloat16: 2360 assert(!shift && "cannot shift float types!"); 2361 return (4 << IsQuad) - 1; 2362 } 2363 llvm_unreachable("Invalid NeonTypeFlag!"); 2364 } 2365 2366 /// getNeonEltType - Return the QualType corresponding to the elements of 2367 /// the vector type specified by the NeonTypeFlags. This is used to check 2368 /// the pointer arguments for Neon load/store intrinsics. 2369 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 2370 bool IsPolyUnsigned, bool IsInt64Long) { 2371 switch (Flags.getEltType()) { 2372 case NeonTypeFlags::Int8: 2373 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 2374 case NeonTypeFlags::Int16: 2375 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 2376 case NeonTypeFlags::Int32: 2377 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 2378 case NeonTypeFlags::Int64: 2379 if (IsInt64Long) 2380 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 2381 else 2382 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 2383 : Context.LongLongTy; 2384 case NeonTypeFlags::Poly8: 2385 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 2386 case NeonTypeFlags::Poly16: 2387 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 2388 case NeonTypeFlags::Poly64: 2389 if (IsInt64Long) 2390 return Context.UnsignedLongTy; 2391 else 2392 return Context.UnsignedLongLongTy; 2393 case NeonTypeFlags::Poly128: 2394 break; 2395 case NeonTypeFlags::Float16: 2396 return Context.HalfTy; 2397 case NeonTypeFlags::Float32: 2398 return Context.FloatTy; 2399 case NeonTypeFlags::Float64: 2400 return Context.DoubleTy; 2401 case NeonTypeFlags::BFloat16: 2402 return Context.BFloat16Ty; 2403 } 2404 llvm_unreachable("Invalid NeonTypeFlag!"); 2405 } 2406 2407 bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2408 // Range check SVE intrinsics that take immediate values. 2409 SmallVector<std::tuple<int,int,int>, 3> ImmChecks; 2410 2411 switch (BuiltinID) { 2412 default: 2413 return false; 2414 #define GET_SVE_IMMEDIATE_CHECK 2415 #include "clang/Basic/arm_sve_sema_rangechecks.inc" 2416 #undef GET_SVE_IMMEDIATE_CHECK 2417 } 2418 2419 // Perform all the immediate checks for this builtin call. 2420 bool HasError = false; 2421 for (auto &I : ImmChecks) { 2422 int ArgNum, CheckTy, ElementSizeInBits; 2423 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I; 2424 2425 typedef bool(*OptionSetCheckFnTy)(int64_t Value); 2426 2427 // Function that checks whether the operand (ArgNum) is an immediate 2428 // that is one of the predefined values. 2429 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm, 2430 int ErrDiag) -> bool { 2431 // We can't check the value of a dependent argument. 2432 Expr *Arg = TheCall->getArg(ArgNum); 2433 if (Arg->isTypeDependent() || Arg->isValueDependent()) 2434 return false; 2435 2436 // Check constant-ness first. 2437 llvm::APSInt Imm; 2438 if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm)) 2439 return true; 2440 2441 if (!CheckImm(Imm.getSExtValue())) 2442 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange(); 2443 return false; 2444 }; 2445 2446 switch ((SVETypeFlags::ImmCheckType)CheckTy) { 2447 case SVETypeFlags::ImmCheck0_31: 2448 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31)) 2449 HasError = true; 2450 break; 2451 case SVETypeFlags::ImmCheck0_13: 2452 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13)) 2453 HasError = true; 2454 break; 2455 case SVETypeFlags::ImmCheck1_16: 2456 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16)) 2457 HasError = true; 2458 break; 2459 case SVETypeFlags::ImmCheck0_7: 2460 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7)) 2461 HasError = true; 2462 break; 2463 case SVETypeFlags::ImmCheckExtract: 2464 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2465 (2048 / ElementSizeInBits) - 1)) 2466 HasError = true; 2467 break; 2468 case SVETypeFlags::ImmCheckShiftRight: 2469 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits)) 2470 HasError = true; 2471 break; 2472 case SVETypeFlags::ImmCheckShiftRightNarrow: 2473 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 2474 ElementSizeInBits / 2)) 2475 HasError = true; 2476 break; 2477 case SVETypeFlags::ImmCheckShiftLeft: 2478 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2479 ElementSizeInBits - 1)) 2480 HasError = true; 2481 break; 2482 case SVETypeFlags::ImmCheckLaneIndex: 2483 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2484 (128 / (1 * ElementSizeInBits)) - 1)) 2485 HasError = true; 2486 break; 2487 case SVETypeFlags::ImmCheckLaneIndexCompRotate: 2488 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2489 (128 / (2 * ElementSizeInBits)) - 1)) 2490 HasError = true; 2491 break; 2492 case SVETypeFlags::ImmCheckLaneIndexDot: 2493 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2494 (128 / (4 * ElementSizeInBits)) - 1)) 2495 HasError = true; 2496 break; 2497 case SVETypeFlags::ImmCheckComplexRot90_270: 2498 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; }, 2499 diag::err_rotation_argument_to_cadd)) 2500 HasError = true; 2501 break; 2502 case SVETypeFlags::ImmCheckComplexRotAll90: 2503 if (CheckImmediateInSet( 2504 [](int64_t V) { 2505 return V == 0 || V == 90 || V == 180 || V == 270; 2506 }, 2507 diag::err_rotation_argument_to_cmla)) 2508 HasError = true; 2509 break; 2510 case SVETypeFlags::ImmCheck0_1: 2511 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1)) 2512 HasError = true; 2513 break; 2514 case SVETypeFlags::ImmCheck0_2: 2515 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2)) 2516 HasError = true; 2517 break; 2518 case SVETypeFlags::ImmCheck0_3: 2519 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3)) 2520 HasError = true; 2521 break; 2522 } 2523 } 2524 2525 return HasError; 2526 } 2527 2528 bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI, 2529 unsigned BuiltinID, CallExpr *TheCall) { 2530 llvm::APSInt Result; 2531 uint64_t mask = 0; 2532 unsigned TV = 0; 2533 int PtrArgNum = -1; 2534 bool HasConstPtr = false; 2535 switch (BuiltinID) { 2536 #define GET_NEON_OVERLOAD_CHECK 2537 #include "clang/Basic/arm_neon.inc" 2538 #include "clang/Basic/arm_fp16.inc" 2539 #undef GET_NEON_OVERLOAD_CHECK 2540 } 2541 2542 // For NEON intrinsics which are overloaded on vector element type, validate 2543 // the immediate which specifies which variant to emit. 2544 unsigned ImmArg = TheCall->getNumArgs()-1; 2545 if (mask) { 2546 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 2547 return true; 2548 2549 TV = Result.getLimitedValue(64); 2550 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 2551 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 2552 << TheCall->getArg(ImmArg)->getSourceRange(); 2553 } 2554 2555 if (PtrArgNum >= 0) { 2556 // Check that pointer arguments have the specified type. 2557 Expr *Arg = TheCall->getArg(PtrArgNum); 2558 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 2559 Arg = ICE->getSubExpr(); 2560 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 2561 QualType RHSTy = RHS.get()->getType(); 2562 2563 llvm::Triple::ArchType Arch = TI.getTriple().getArch(); 2564 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 2565 Arch == llvm::Triple::aarch64_32 || 2566 Arch == llvm::Triple::aarch64_be; 2567 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong; 2568 QualType EltTy = 2569 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 2570 if (HasConstPtr) 2571 EltTy = EltTy.withConst(); 2572 QualType LHSTy = Context.getPointerType(EltTy); 2573 AssignConvertType ConvTy; 2574 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 2575 if (RHS.isInvalid()) 2576 return true; 2577 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 2578 RHS.get(), AA_Assigning)) 2579 return true; 2580 } 2581 2582 // For NEON intrinsics which take an immediate value as part of the 2583 // instruction, range check them here. 2584 unsigned i = 0, l = 0, u = 0; 2585 switch (BuiltinID) { 2586 default: 2587 return false; 2588 #define GET_NEON_IMMEDIATE_CHECK 2589 #include "clang/Basic/arm_neon.inc" 2590 #include "clang/Basic/arm_fp16.inc" 2591 #undef GET_NEON_IMMEDIATE_CHECK 2592 } 2593 2594 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2595 } 2596 2597 bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2598 switch (BuiltinID) { 2599 default: 2600 return false; 2601 #include "clang/Basic/arm_mve_builtin_sema.inc" 2602 } 2603 } 2604 2605 bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2606 CallExpr *TheCall) { 2607 bool Err = false; 2608 switch (BuiltinID) { 2609 default: 2610 return false; 2611 #include "clang/Basic/arm_cde_builtin_sema.inc" 2612 } 2613 2614 if (Err) 2615 return true; 2616 2617 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true); 2618 } 2619 2620 bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI, 2621 const Expr *CoprocArg, bool WantCDE) { 2622 if (isConstantEvaluated()) 2623 return false; 2624 2625 // We can't check the value of a dependent argument. 2626 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent()) 2627 return false; 2628 2629 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context); 2630 int64_t CoprocNo = CoprocNoAP.getExtValue(); 2631 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative"); 2632 2633 uint32_t CDECoprocMask = TI.getARMCDECoprocMask(); 2634 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo)); 2635 2636 if (IsCDECoproc != WantCDE) 2637 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc) 2638 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange(); 2639 2640 return false; 2641 } 2642 2643 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 2644 unsigned MaxWidth) { 2645 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 2646 BuiltinID == ARM::BI__builtin_arm_ldaex || 2647 BuiltinID == ARM::BI__builtin_arm_strex || 2648 BuiltinID == ARM::BI__builtin_arm_stlex || 2649 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2650 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2651 BuiltinID == AArch64::BI__builtin_arm_strex || 2652 BuiltinID == AArch64::BI__builtin_arm_stlex) && 2653 "unexpected ARM builtin"); 2654 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 2655 BuiltinID == ARM::BI__builtin_arm_ldaex || 2656 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2657 BuiltinID == AArch64::BI__builtin_arm_ldaex; 2658 2659 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 2660 2661 // Ensure that we have the proper number of arguments. 2662 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 2663 return true; 2664 2665 // Inspect the pointer argument of the atomic builtin. This should always be 2666 // a pointer type, whose element is an integral scalar or pointer type. 2667 // Because it is a pointer type, we don't have to worry about any implicit 2668 // casts here. 2669 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 2670 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 2671 if (PointerArgRes.isInvalid()) 2672 return true; 2673 PointerArg = PointerArgRes.get(); 2674 2675 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 2676 if (!pointerType) { 2677 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 2678 << PointerArg->getType() << PointerArg->getSourceRange(); 2679 return true; 2680 } 2681 2682 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 2683 // task is to insert the appropriate casts into the AST. First work out just 2684 // what the appropriate type is. 2685 QualType ValType = pointerType->getPointeeType(); 2686 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 2687 if (IsLdrex) 2688 AddrType.addConst(); 2689 2690 // Issue a warning if the cast is dodgy. 2691 CastKind CastNeeded = CK_NoOp; 2692 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 2693 CastNeeded = CK_BitCast; 2694 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 2695 << PointerArg->getType() << Context.getPointerType(AddrType) 2696 << AA_Passing << PointerArg->getSourceRange(); 2697 } 2698 2699 // Finally, do the cast and replace the argument with the corrected version. 2700 AddrType = Context.getPointerType(AddrType); 2701 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 2702 if (PointerArgRes.isInvalid()) 2703 return true; 2704 PointerArg = PointerArgRes.get(); 2705 2706 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 2707 2708 // In general, we allow ints, floats and pointers to be loaded and stored. 2709 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 2710 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 2711 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 2712 << PointerArg->getType() << PointerArg->getSourceRange(); 2713 return true; 2714 } 2715 2716 // But ARM doesn't have instructions to deal with 128-bit versions. 2717 if (Context.getTypeSize(ValType) > MaxWidth) { 2718 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 2719 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 2720 << PointerArg->getType() << PointerArg->getSourceRange(); 2721 return true; 2722 } 2723 2724 switch (ValType.getObjCLifetime()) { 2725 case Qualifiers::OCL_None: 2726 case Qualifiers::OCL_ExplicitNone: 2727 // okay 2728 break; 2729 2730 case Qualifiers::OCL_Weak: 2731 case Qualifiers::OCL_Strong: 2732 case Qualifiers::OCL_Autoreleasing: 2733 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 2734 << ValType << PointerArg->getSourceRange(); 2735 return true; 2736 } 2737 2738 if (IsLdrex) { 2739 TheCall->setType(ValType); 2740 return false; 2741 } 2742 2743 // Initialize the argument to be stored. 2744 ExprResult ValArg = TheCall->getArg(0); 2745 InitializedEntity Entity = InitializedEntity::InitializeParameter( 2746 Context, ValType, /*consume*/ false); 2747 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 2748 if (ValArg.isInvalid()) 2749 return true; 2750 TheCall->setArg(0, ValArg.get()); 2751 2752 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 2753 // but the custom checker bypasses all default analysis. 2754 TheCall->setType(Context.IntTy); 2755 return false; 2756 } 2757 2758 bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2759 CallExpr *TheCall) { 2760 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 2761 BuiltinID == ARM::BI__builtin_arm_ldaex || 2762 BuiltinID == ARM::BI__builtin_arm_strex || 2763 BuiltinID == ARM::BI__builtin_arm_stlex) { 2764 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 2765 } 2766 2767 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 2768 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 2769 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 2770 } 2771 2772 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 2773 BuiltinID == ARM::BI__builtin_arm_wsr64) 2774 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 2775 2776 if (BuiltinID == ARM::BI__builtin_arm_rsr || 2777 BuiltinID == ARM::BI__builtin_arm_rsrp || 2778 BuiltinID == ARM::BI__builtin_arm_wsr || 2779 BuiltinID == ARM::BI__builtin_arm_wsrp) 2780 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2781 2782 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2783 return true; 2784 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall)) 2785 return true; 2786 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2787 return true; 2788 2789 // For intrinsics which take an immediate value as part of the instruction, 2790 // range check them here. 2791 // FIXME: VFP Intrinsics should error if VFP not present. 2792 switch (BuiltinID) { 2793 default: return false; 2794 case ARM::BI__builtin_arm_ssat: 2795 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 2796 case ARM::BI__builtin_arm_usat: 2797 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 2798 case ARM::BI__builtin_arm_ssat16: 2799 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 2800 case ARM::BI__builtin_arm_usat16: 2801 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 2802 case ARM::BI__builtin_arm_vcvtr_f: 2803 case ARM::BI__builtin_arm_vcvtr_d: 2804 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 2805 case ARM::BI__builtin_arm_dmb: 2806 case ARM::BI__builtin_arm_dsb: 2807 case ARM::BI__builtin_arm_isb: 2808 case ARM::BI__builtin_arm_dbg: 2809 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 2810 case ARM::BI__builtin_arm_cdp: 2811 case ARM::BI__builtin_arm_cdp2: 2812 case ARM::BI__builtin_arm_mcr: 2813 case ARM::BI__builtin_arm_mcr2: 2814 case ARM::BI__builtin_arm_mrc: 2815 case ARM::BI__builtin_arm_mrc2: 2816 case ARM::BI__builtin_arm_mcrr: 2817 case ARM::BI__builtin_arm_mcrr2: 2818 case ARM::BI__builtin_arm_mrrc: 2819 case ARM::BI__builtin_arm_mrrc2: 2820 case ARM::BI__builtin_arm_ldc: 2821 case ARM::BI__builtin_arm_ldcl: 2822 case ARM::BI__builtin_arm_ldc2: 2823 case ARM::BI__builtin_arm_ldc2l: 2824 case ARM::BI__builtin_arm_stc: 2825 case ARM::BI__builtin_arm_stcl: 2826 case ARM::BI__builtin_arm_stc2: 2827 case ARM::BI__builtin_arm_stc2l: 2828 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) || 2829 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), 2830 /*WantCDE*/ false); 2831 } 2832 } 2833 2834 bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, 2835 unsigned BuiltinID, 2836 CallExpr *TheCall) { 2837 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 2838 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2839 BuiltinID == AArch64::BI__builtin_arm_strex || 2840 BuiltinID == AArch64::BI__builtin_arm_stlex) { 2841 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 2842 } 2843 2844 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 2845 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 2846 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || 2847 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 2848 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 2849 } 2850 2851 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 2852 BuiltinID == AArch64::BI__builtin_arm_wsr64) 2853 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2854 2855 // Memory Tagging Extensions (MTE) Intrinsics 2856 if (BuiltinID == AArch64::BI__builtin_arm_irg || 2857 BuiltinID == AArch64::BI__builtin_arm_addg || 2858 BuiltinID == AArch64::BI__builtin_arm_gmi || 2859 BuiltinID == AArch64::BI__builtin_arm_ldg || 2860 BuiltinID == AArch64::BI__builtin_arm_stg || 2861 BuiltinID == AArch64::BI__builtin_arm_subp) { 2862 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 2863 } 2864 2865 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 2866 BuiltinID == AArch64::BI__builtin_arm_rsrp || 2867 BuiltinID == AArch64::BI__builtin_arm_wsr || 2868 BuiltinID == AArch64::BI__builtin_arm_wsrp) 2869 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2870 2871 // Only check the valid encoding range. Any constant in this range would be 2872 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 2873 // an exception for incorrect registers. This matches MSVC behavior. 2874 if (BuiltinID == AArch64::BI_ReadStatusReg || 2875 BuiltinID == AArch64::BI_WriteStatusReg) 2876 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 2877 2878 if (BuiltinID == AArch64::BI__getReg) 2879 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 2880 2881 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2882 return true; 2883 2884 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall)) 2885 return true; 2886 2887 // For intrinsics which take an immediate value as part of the instruction, 2888 // range check them here. 2889 unsigned i = 0, l = 0, u = 0; 2890 switch (BuiltinID) { 2891 default: return false; 2892 case AArch64::BI__builtin_arm_dmb: 2893 case AArch64::BI__builtin_arm_dsb: 2894 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 2895 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break; 2896 } 2897 2898 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2899 } 2900 2901 static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) { 2902 if (Arg->getType()->getAsPlaceholderType()) 2903 return false; 2904 2905 // The first argument needs to be a record field access. 2906 // If it is an array element access, we delay decision 2907 // to BPF backend to check whether the access is a 2908 // field access or not. 2909 return (Arg->IgnoreParens()->getObjectKind() == OK_BitField || 2910 isa<MemberExpr>(Arg->IgnoreParens()) || 2911 isa<ArraySubscriptExpr>(Arg->IgnoreParens())); 2912 } 2913 2914 static bool isEltOfVectorTy(ASTContext &Context, CallExpr *Call, Sema &S, 2915 QualType VectorTy, QualType EltTy) { 2916 QualType VectorEltTy = VectorTy->castAs<VectorType>()->getElementType(); 2917 if (!Context.hasSameType(VectorEltTy, EltTy)) { 2918 S.Diag(Call->getBeginLoc(), diag::err_typecheck_call_different_arg_types) 2919 << Call->getSourceRange() << VectorEltTy << EltTy; 2920 return false; 2921 } 2922 return true; 2923 } 2924 2925 static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) { 2926 QualType ArgType = Arg->getType(); 2927 if (ArgType->getAsPlaceholderType()) 2928 return false; 2929 2930 // for TYPE_EXISTENCE/TYPE_SIZEOF reloc type 2931 // format: 2932 // 1. __builtin_preserve_type_info(*(<type> *)0, flag); 2933 // 2. <type> var; 2934 // __builtin_preserve_type_info(var, flag); 2935 if (!isa<DeclRefExpr>(Arg->IgnoreParens()) && 2936 !isa<UnaryOperator>(Arg->IgnoreParens())) 2937 return false; 2938 2939 // Typedef type. 2940 if (ArgType->getAs<TypedefType>()) 2941 return true; 2942 2943 // Record type or Enum type. 2944 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 2945 if (const auto *RT = Ty->getAs<RecordType>()) { 2946 if (!RT->getDecl()->getDeclName().isEmpty()) 2947 return true; 2948 } else if (const auto *ET = Ty->getAs<EnumType>()) { 2949 if (!ET->getDecl()->getDeclName().isEmpty()) 2950 return true; 2951 } 2952 2953 return false; 2954 } 2955 2956 static bool isValidBPFPreserveEnumValueArg(Expr *Arg) { 2957 QualType ArgType = Arg->getType(); 2958 if (ArgType->getAsPlaceholderType()) 2959 return false; 2960 2961 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type 2962 // format: 2963 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>, 2964 // flag); 2965 const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens()); 2966 if (!UO) 2967 return false; 2968 2969 const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr()); 2970 if (!CE) 2971 return false; 2972 if (CE->getCastKind() != CK_IntegralToPointer && 2973 CE->getCastKind() != CK_NullToPointer) 2974 return false; 2975 2976 // The integer must be from an EnumConstantDecl. 2977 const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr()); 2978 if (!DR) 2979 return false; 2980 2981 const EnumConstantDecl *Enumerator = 2982 dyn_cast<EnumConstantDecl>(DR->getDecl()); 2983 if (!Enumerator) 2984 return false; 2985 2986 // The type must be EnumType. 2987 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 2988 const auto *ET = Ty->getAs<EnumType>(); 2989 if (!ET) 2990 return false; 2991 2992 // The enum value must be supported. 2993 return llvm::is_contained(ET->getDecl()->enumerators(), Enumerator); 2994 } 2995 2996 bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID, 2997 CallExpr *TheCall) { 2998 assert((BuiltinID == BPF::BI__builtin_preserve_field_info || 2999 BuiltinID == BPF::BI__builtin_btf_type_id || 3000 BuiltinID == BPF::BI__builtin_preserve_type_info || 3001 BuiltinID == BPF::BI__builtin_preserve_enum_value) && 3002 "unexpected BPF builtin"); 3003 3004 if (checkArgCount(*this, TheCall, 2)) 3005 return true; 3006 3007 // The second argument needs to be a constant int 3008 Expr *Arg = TheCall->getArg(1); 3009 Optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context); 3010 diag::kind kind; 3011 if (!Value) { 3012 if (BuiltinID == BPF::BI__builtin_preserve_field_info) 3013 kind = diag::err_preserve_field_info_not_const; 3014 else if (BuiltinID == BPF::BI__builtin_btf_type_id) 3015 kind = diag::err_btf_type_id_not_const; 3016 else if (BuiltinID == BPF::BI__builtin_preserve_type_info) 3017 kind = diag::err_preserve_type_info_not_const; 3018 else 3019 kind = diag::err_preserve_enum_value_not_const; 3020 Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange(); 3021 return true; 3022 } 3023 3024 // The first argument 3025 Arg = TheCall->getArg(0); 3026 bool InvalidArg = false; 3027 bool ReturnUnsignedInt = true; 3028 if (BuiltinID == BPF::BI__builtin_preserve_field_info) { 3029 if (!isValidBPFPreserveFieldInfoArg(Arg)) { 3030 InvalidArg = true; 3031 kind = diag::err_preserve_field_info_not_field; 3032 } 3033 } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) { 3034 if (!isValidBPFPreserveTypeInfoArg(Arg)) { 3035 InvalidArg = true; 3036 kind = diag::err_preserve_type_info_invalid; 3037 } 3038 } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) { 3039 if (!isValidBPFPreserveEnumValueArg(Arg)) { 3040 InvalidArg = true; 3041 kind = diag::err_preserve_enum_value_invalid; 3042 } 3043 ReturnUnsignedInt = false; 3044 } else if (BuiltinID == BPF::BI__builtin_btf_type_id) { 3045 ReturnUnsignedInt = false; 3046 } 3047 3048 if (InvalidArg) { 3049 Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange(); 3050 return true; 3051 } 3052 3053 if (ReturnUnsignedInt) 3054 TheCall->setType(Context.UnsignedIntTy); 3055 else 3056 TheCall->setType(Context.UnsignedLongTy); 3057 return false; 3058 } 3059 3060 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3061 struct ArgInfo { 3062 uint8_t OpNum; 3063 bool IsSigned; 3064 uint8_t BitWidth; 3065 uint8_t Align; 3066 }; 3067 struct BuiltinInfo { 3068 unsigned BuiltinID; 3069 ArgInfo Infos[2]; 3070 }; 3071 3072 static BuiltinInfo Infos[] = { 3073 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 3074 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 3075 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 3076 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} }, 3077 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 3078 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 3079 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 3080 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 3081 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 3082 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 3083 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 3084 3085 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 3086 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 3087 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 3088 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 3089 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 3090 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 3091 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 3092 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 3093 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 3094 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 3095 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 3096 3097 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 3098 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 3099 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 3100 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 3101 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 3102 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 3103 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 3104 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 3105 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 3106 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 3107 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 3108 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 3109 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 3110 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 3111 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 3112 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 3113 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 3114 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 3115 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 3116 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 3117 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 3118 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 3119 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 3120 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 3121 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 3122 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 3123 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 3124 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 3125 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 3126 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 3127 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 3128 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 3129 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 3130 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 3131 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 3132 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 3133 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 3134 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 3135 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 3136 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 3137 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 3138 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 3139 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 3140 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 3141 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 3142 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 3143 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 3144 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 3145 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 3146 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 3147 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 3148 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 3149 {{ 1, false, 6, 0 }} }, 3150 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 3151 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 3152 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 3153 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 3154 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 3155 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 3156 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 3157 {{ 1, false, 5, 0 }} }, 3158 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 3159 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 3160 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 3161 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 3162 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 3163 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 3164 { 2, false, 5, 0 }} }, 3165 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 3166 { 2, false, 6, 0 }} }, 3167 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 3168 { 3, false, 5, 0 }} }, 3169 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 3170 { 3, false, 6, 0 }} }, 3171 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 3172 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 3173 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 3174 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 3175 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 3176 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 3177 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 3178 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 3179 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 3180 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 3181 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 3182 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 3183 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 3184 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 3185 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 3186 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 3187 {{ 2, false, 4, 0 }, 3188 { 3, false, 5, 0 }} }, 3189 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 3190 {{ 2, false, 4, 0 }, 3191 { 3, false, 5, 0 }} }, 3192 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 3193 {{ 2, false, 4, 0 }, 3194 { 3, false, 5, 0 }} }, 3195 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 3196 {{ 2, false, 4, 0 }, 3197 { 3, false, 5, 0 }} }, 3198 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 3199 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 3200 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 3201 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 3202 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 3203 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 3204 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 3205 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 3206 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 3207 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 3208 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 3209 { 2, false, 5, 0 }} }, 3210 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 3211 { 2, false, 6, 0 }} }, 3212 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 3213 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 3214 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 3215 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 3216 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 3217 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 3218 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 3219 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 3220 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 3221 {{ 1, false, 4, 0 }} }, 3222 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 3223 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 3224 {{ 1, false, 4, 0 }} }, 3225 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 3226 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 3227 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 3228 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 3229 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 3230 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 3231 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 3232 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 3233 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 3234 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 3235 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 3236 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 3237 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 3238 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 3239 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 3240 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 3241 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 3242 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 3243 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 3244 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 3245 {{ 3, false, 1, 0 }} }, 3246 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 3247 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 3248 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 3249 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 3250 {{ 3, false, 1, 0 }} }, 3251 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 3252 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 3253 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 3254 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 3255 {{ 3, false, 1, 0 }} }, 3256 }; 3257 3258 // Use a dynamically initialized static to sort the table exactly once on 3259 // first run. 3260 static const bool SortOnce = 3261 (llvm::sort(Infos, 3262 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 3263 return LHS.BuiltinID < RHS.BuiltinID; 3264 }), 3265 true); 3266 (void)SortOnce; 3267 3268 const BuiltinInfo *F = llvm::partition_point( 3269 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; }); 3270 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 3271 return false; 3272 3273 bool Error = false; 3274 3275 for (const ArgInfo &A : F->Infos) { 3276 // Ignore empty ArgInfo elements. 3277 if (A.BitWidth == 0) 3278 continue; 3279 3280 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 3281 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 3282 if (!A.Align) { 3283 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3284 } else { 3285 unsigned M = 1 << A.Align; 3286 Min *= M; 3287 Max *= M; 3288 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3289 Error |= SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 3290 } 3291 } 3292 return Error; 3293 } 3294 3295 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 3296 CallExpr *TheCall) { 3297 return CheckHexagonBuiltinArgument(BuiltinID, TheCall); 3298 } 3299 3300 bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI, 3301 unsigned BuiltinID, CallExpr *TheCall) { 3302 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) || 3303 CheckMipsBuiltinArgument(BuiltinID, TheCall); 3304 } 3305 3306 bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, 3307 CallExpr *TheCall) { 3308 3309 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID && 3310 BuiltinID <= Mips::BI__builtin_mips_lwx) { 3311 if (!TI.hasFeature("dsp")) 3312 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp); 3313 } 3314 3315 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID && 3316 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) { 3317 if (!TI.hasFeature("dspr2")) 3318 return Diag(TheCall->getBeginLoc(), 3319 diag::err_mips_builtin_requires_dspr2); 3320 } 3321 3322 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID && 3323 BuiltinID <= Mips::BI__builtin_msa_xori_b) { 3324 if (!TI.hasFeature("msa")) 3325 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa); 3326 } 3327 3328 return false; 3329 } 3330 3331 // CheckMipsBuiltinArgument - Checks the constant value passed to the 3332 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The 3333 // ordering for DSP is unspecified. MSA is ordered by the data format used 3334 // by the underlying instruction i.e., df/m, df/n and then by size. 3335 // 3336 // FIXME: The size tests here should instead be tablegen'd along with the 3337 // definitions from include/clang/Basic/BuiltinsMips.def. 3338 // FIXME: GCC is strict on signedness for some of these intrinsics, we should 3339 // be too. 3340 bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3341 unsigned i = 0, l = 0, u = 0, m = 0; 3342 switch (BuiltinID) { 3343 default: return false; 3344 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 3345 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 3346 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 3347 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 3348 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 3349 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 3350 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 3351 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the 3352 // df/m field. 3353 // These intrinsics take an unsigned 3 bit immediate. 3354 case Mips::BI__builtin_msa_bclri_b: 3355 case Mips::BI__builtin_msa_bnegi_b: 3356 case Mips::BI__builtin_msa_bseti_b: 3357 case Mips::BI__builtin_msa_sat_s_b: 3358 case Mips::BI__builtin_msa_sat_u_b: 3359 case Mips::BI__builtin_msa_slli_b: 3360 case Mips::BI__builtin_msa_srai_b: 3361 case Mips::BI__builtin_msa_srari_b: 3362 case Mips::BI__builtin_msa_srli_b: 3363 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 3364 case Mips::BI__builtin_msa_binsli_b: 3365 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 3366 // These intrinsics take an unsigned 4 bit immediate. 3367 case Mips::BI__builtin_msa_bclri_h: 3368 case Mips::BI__builtin_msa_bnegi_h: 3369 case Mips::BI__builtin_msa_bseti_h: 3370 case Mips::BI__builtin_msa_sat_s_h: 3371 case Mips::BI__builtin_msa_sat_u_h: 3372 case Mips::BI__builtin_msa_slli_h: 3373 case Mips::BI__builtin_msa_srai_h: 3374 case Mips::BI__builtin_msa_srari_h: 3375 case Mips::BI__builtin_msa_srli_h: 3376 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 3377 case Mips::BI__builtin_msa_binsli_h: 3378 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 3379 // These intrinsics take an unsigned 5 bit immediate. 3380 // The first block of intrinsics actually have an unsigned 5 bit field, 3381 // not a df/n field. 3382 case Mips::BI__builtin_msa_cfcmsa: 3383 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; 3384 case Mips::BI__builtin_msa_clei_u_b: 3385 case Mips::BI__builtin_msa_clei_u_h: 3386 case Mips::BI__builtin_msa_clei_u_w: 3387 case Mips::BI__builtin_msa_clei_u_d: 3388 case Mips::BI__builtin_msa_clti_u_b: 3389 case Mips::BI__builtin_msa_clti_u_h: 3390 case Mips::BI__builtin_msa_clti_u_w: 3391 case Mips::BI__builtin_msa_clti_u_d: 3392 case Mips::BI__builtin_msa_maxi_u_b: 3393 case Mips::BI__builtin_msa_maxi_u_h: 3394 case Mips::BI__builtin_msa_maxi_u_w: 3395 case Mips::BI__builtin_msa_maxi_u_d: 3396 case Mips::BI__builtin_msa_mini_u_b: 3397 case Mips::BI__builtin_msa_mini_u_h: 3398 case Mips::BI__builtin_msa_mini_u_w: 3399 case Mips::BI__builtin_msa_mini_u_d: 3400 case Mips::BI__builtin_msa_addvi_b: 3401 case Mips::BI__builtin_msa_addvi_h: 3402 case Mips::BI__builtin_msa_addvi_w: 3403 case Mips::BI__builtin_msa_addvi_d: 3404 case Mips::BI__builtin_msa_bclri_w: 3405 case Mips::BI__builtin_msa_bnegi_w: 3406 case Mips::BI__builtin_msa_bseti_w: 3407 case Mips::BI__builtin_msa_sat_s_w: 3408 case Mips::BI__builtin_msa_sat_u_w: 3409 case Mips::BI__builtin_msa_slli_w: 3410 case Mips::BI__builtin_msa_srai_w: 3411 case Mips::BI__builtin_msa_srari_w: 3412 case Mips::BI__builtin_msa_srli_w: 3413 case Mips::BI__builtin_msa_srlri_w: 3414 case Mips::BI__builtin_msa_subvi_b: 3415 case Mips::BI__builtin_msa_subvi_h: 3416 case Mips::BI__builtin_msa_subvi_w: 3417 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 3418 case Mips::BI__builtin_msa_binsli_w: 3419 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 3420 // These intrinsics take an unsigned 6 bit immediate. 3421 case Mips::BI__builtin_msa_bclri_d: 3422 case Mips::BI__builtin_msa_bnegi_d: 3423 case Mips::BI__builtin_msa_bseti_d: 3424 case Mips::BI__builtin_msa_sat_s_d: 3425 case Mips::BI__builtin_msa_sat_u_d: 3426 case Mips::BI__builtin_msa_slli_d: 3427 case Mips::BI__builtin_msa_srai_d: 3428 case Mips::BI__builtin_msa_srari_d: 3429 case Mips::BI__builtin_msa_srli_d: 3430 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 3431 case Mips::BI__builtin_msa_binsli_d: 3432 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 3433 // These intrinsics take a signed 5 bit immediate. 3434 case Mips::BI__builtin_msa_ceqi_b: 3435 case Mips::BI__builtin_msa_ceqi_h: 3436 case Mips::BI__builtin_msa_ceqi_w: 3437 case Mips::BI__builtin_msa_ceqi_d: 3438 case Mips::BI__builtin_msa_clti_s_b: 3439 case Mips::BI__builtin_msa_clti_s_h: 3440 case Mips::BI__builtin_msa_clti_s_w: 3441 case Mips::BI__builtin_msa_clti_s_d: 3442 case Mips::BI__builtin_msa_clei_s_b: 3443 case Mips::BI__builtin_msa_clei_s_h: 3444 case Mips::BI__builtin_msa_clei_s_w: 3445 case Mips::BI__builtin_msa_clei_s_d: 3446 case Mips::BI__builtin_msa_maxi_s_b: 3447 case Mips::BI__builtin_msa_maxi_s_h: 3448 case Mips::BI__builtin_msa_maxi_s_w: 3449 case Mips::BI__builtin_msa_maxi_s_d: 3450 case Mips::BI__builtin_msa_mini_s_b: 3451 case Mips::BI__builtin_msa_mini_s_h: 3452 case Mips::BI__builtin_msa_mini_s_w: 3453 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 3454 // These intrinsics take an unsigned 8 bit immediate. 3455 case Mips::BI__builtin_msa_andi_b: 3456 case Mips::BI__builtin_msa_nori_b: 3457 case Mips::BI__builtin_msa_ori_b: 3458 case Mips::BI__builtin_msa_shf_b: 3459 case Mips::BI__builtin_msa_shf_h: 3460 case Mips::BI__builtin_msa_shf_w: 3461 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 3462 case Mips::BI__builtin_msa_bseli_b: 3463 case Mips::BI__builtin_msa_bmnzi_b: 3464 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 3465 // df/n format 3466 // These intrinsics take an unsigned 4 bit immediate. 3467 case Mips::BI__builtin_msa_copy_s_b: 3468 case Mips::BI__builtin_msa_copy_u_b: 3469 case Mips::BI__builtin_msa_insve_b: 3470 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 3471 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 3472 // These intrinsics take an unsigned 3 bit immediate. 3473 case Mips::BI__builtin_msa_copy_s_h: 3474 case Mips::BI__builtin_msa_copy_u_h: 3475 case Mips::BI__builtin_msa_insve_h: 3476 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 3477 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 3478 // These intrinsics take an unsigned 2 bit immediate. 3479 case Mips::BI__builtin_msa_copy_s_w: 3480 case Mips::BI__builtin_msa_copy_u_w: 3481 case Mips::BI__builtin_msa_insve_w: 3482 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 3483 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 3484 // These intrinsics take an unsigned 1 bit immediate. 3485 case Mips::BI__builtin_msa_copy_s_d: 3486 case Mips::BI__builtin_msa_copy_u_d: 3487 case Mips::BI__builtin_msa_insve_d: 3488 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 3489 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 3490 // Memory offsets and immediate loads. 3491 // These intrinsics take a signed 10 bit immediate. 3492 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 3493 case Mips::BI__builtin_msa_ldi_h: 3494 case Mips::BI__builtin_msa_ldi_w: 3495 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 3496 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 3497 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 3498 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 3499 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 3500 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break; 3501 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break; 3502 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 3503 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 3504 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 3505 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 3506 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break; 3507 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break; 3508 } 3509 3510 if (!m) 3511 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3512 3513 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 3514 SemaBuiltinConstantArgMultiple(TheCall, i, m); 3515 } 3516 3517 /// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str, 3518 /// advancing the pointer over the consumed characters. The decoded type is 3519 /// returned. If the decoded type represents a constant integer with a 3520 /// constraint on its value then Mask is set to that value. The type descriptors 3521 /// used in Str are specific to PPC MMA builtins and are documented in the file 3522 /// defining the PPC builtins. 3523 static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str, 3524 unsigned &Mask) { 3525 bool RequireICE = false; 3526 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 3527 switch (*Str++) { 3528 case 'V': 3529 return Context.getVectorType(Context.UnsignedCharTy, 16, 3530 VectorType::VectorKind::AltiVecVector); 3531 case 'i': { 3532 char *End; 3533 unsigned size = strtoul(Str, &End, 10); 3534 assert(End != Str && "Missing constant parameter constraint"); 3535 Str = End; 3536 Mask = size; 3537 return Context.IntTy; 3538 } 3539 case 'W': { 3540 char *End; 3541 unsigned size = strtoul(Str, &End, 10); 3542 assert(End != Str && "Missing PowerPC MMA type size"); 3543 Str = End; 3544 QualType Type; 3545 switch (size) { 3546 #define PPC_VECTOR_TYPE(typeName, Id, size) \ 3547 case size: Type = Context.Id##Ty; break; 3548 #include "clang/Basic/PPCTypes.def" 3549 default: llvm_unreachable("Invalid PowerPC MMA vector type"); 3550 } 3551 bool CheckVectorArgs = false; 3552 while (!CheckVectorArgs) { 3553 switch (*Str++) { 3554 case '*': 3555 Type = Context.getPointerType(Type); 3556 break; 3557 case 'C': 3558 Type = Type.withConst(); 3559 break; 3560 default: 3561 CheckVectorArgs = true; 3562 --Str; 3563 break; 3564 } 3565 } 3566 return Type; 3567 } 3568 default: 3569 return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true); 3570 } 3571 } 3572 3573 static bool isPPC_64Builtin(unsigned BuiltinID) { 3574 // These builtins only work on PPC 64bit targets. 3575 switch (BuiltinID) { 3576 case PPC::BI__builtin_divde: 3577 case PPC::BI__builtin_divdeu: 3578 case PPC::BI__builtin_bpermd: 3579 case PPC::BI__builtin_ppc_ldarx: 3580 case PPC::BI__builtin_ppc_stdcx: 3581 case PPC::BI__builtin_ppc_tdw: 3582 case PPC::BI__builtin_ppc_trapd: 3583 case PPC::BI__builtin_ppc_cmpeqb: 3584 case PPC::BI__builtin_ppc_setb: 3585 case PPC::BI__builtin_ppc_mulhd: 3586 case PPC::BI__builtin_ppc_mulhdu: 3587 case PPC::BI__builtin_ppc_maddhd: 3588 case PPC::BI__builtin_ppc_maddhdu: 3589 case PPC::BI__builtin_ppc_maddld: 3590 case PPC::BI__builtin_ppc_load8r: 3591 case PPC::BI__builtin_ppc_store8r: 3592 case PPC::BI__builtin_ppc_insert_exp: 3593 case PPC::BI__builtin_ppc_extract_sig: 3594 case PPC::BI__builtin_ppc_addex: 3595 case PPC::BI__builtin_darn: 3596 case PPC::BI__builtin_darn_raw: 3597 case PPC::BI__builtin_ppc_compare_and_swaplp: 3598 case PPC::BI__builtin_ppc_fetch_and_addlp: 3599 case PPC::BI__builtin_ppc_fetch_and_andlp: 3600 case PPC::BI__builtin_ppc_fetch_and_orlp: 3601 case PPC::BI__builtin_ppc_fetch_and_swaplp: 3602 return true; 3603 } 3604 return false; 3605 } 3606 3607 static bool SemaFeatureCheck(Sema &S, CallExpr *TheCall, 3608 StringRef FeatureToCheck, unsigned DiagID, 3609 StringRef DiagArg = "") { 3610 if (S.Context.getTargetInfo().hasFeature(FeatureToCheck)) 3611 return false; 3612 3613 if (DiagArg.empty()) 3614 S.Diag(TheCall->getBeginLoc(), DiagID) << TheCall->getSourceRange(); 3615 else 3616 S.Diag(TheCall->getBeginLoc(), DiagID) 3617 << DiagArg << TheCall->getSourceRange(); 3618 3619 return true; 3620 } 3621 3622 /// Returns true if the argument consists of one contiguous run of 1s with any 3623 /// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so 3624 /// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not, 3625 /// since all 1s are not contiguous. 3626 bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) { 3627 llvm::APSInt Result; 3628 // We can't check the value of a dependent argument. 3629 Expr *Arg = TheCall->getArg(ArgNum); 3630 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3631 return false; 3632 3633 // Check constant-ness first. 3634 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3635 return true; 3636 3637 // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s. 3638 if (Result.isShiftedMask() || (~Result).isShiftedMask()) 3639 return false; 3640 3641 return Diag(TheCall->getBeginLoc(), 3642 diag::err_argument_not_contiguous_bit_field) 3643 << ArgNum << Arg->getSourceRange(); 3644 } 3645 3646 bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3647 CallExpr *TheCall) { 3648 unsigned i = 0, l = 0, u = 0; 3649 bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64; 3650 llvm::APSInt Result; 3651 3652 if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit) 3653 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 3654 << TheCall->getSourceRange(); 3655 3656 switch (BuiltinID) { 3657 default: return false; 3658 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 3659 case PPC::BI__builtin_altivec_crypto_vshasigmad: 3660 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3661 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3662 case PPC::BI__builtin_altivec_dss: 3663 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3); 3664 case PPC::BI__builtin_tbegin: 3665 case PPC::BI__builtin_tend: 3666 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1) || 3667 SemaFeatureCheck(*this, TheCall, "htm", 3668 diag::err_ppc_builtin_requires_htm); 3669 case PPC::BI__builtin_tsr: 3670 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 3671 SemaFeatureCheck(*this, TheCall, "htm", 3672 diag::err_ppc_builtin_requires_htm); 3673 case PPC::BI__builtin_tabortwc: 3674 case PPC::BI__builtin_tabortdc: 3675 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 3676 SemaFeatureCheck(*this, TheCall, "htm", 3677 diag::err_ppc_builtin_requires_htm); 3678 case PPC::BI__builtin_tabortwci: 3679 case PPC::BI__builtin_tabortdci: 3680 return SemaFeatureCheck(*this, TheCall, "htm", 3681 diag::err_ppc_builtin_requires_htm) || 3682 (SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 3683 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31)); 3684 case PPC::BI__builtin_tabort: 3685 case PPC::BI__builtin_tcheck: 3686 case PPC::BI__builtin_treclaim: 3687 case PPC::BI__builtin_trechkpt: 3688 case PPC::BI__builtin_tendall: 3689 case PPC::BI__builtin_tresume: 3690 case PPC::BI__builtin_tsuspend: 3691 case PPC::BI__builtin_get_texasr: 3692 case PPC::BI__builtin_get_texasru: 3693 case PPC::BI__builtin_get_tfhar: 3694 case PPC::BI__builtin_get_tfiar: 3695 case PPC::BI__builtin_set_texasr: 3696 case PPC::BI__builtin_set_texasru: 3697 case PPC::BI__builtin_set_tfhar: 3698 case PPC::BI__builtin_set_tfiar: 3699 case PPC::BI__builtin_ttest: 3700 return SemaFeatureCheck(*this, TheCall, "htm", 3701 diag::err_ppc_builtin_requires_htm); 3702 // According to GCC 'Basic PowerPC Built-in Functions Available on ISA 2.05', 3703 // __builtin_(un)pack_longdouble are available only if long double uses IBM 3704 // extended double representation. 3705 case PPC::BI__builtin_unpack_longdouble: 3706 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 1)) 3707 return true; 3708 LLVM_FALLTHROUGH; 3709 case PPC::BI__builtin_pack_longdouble: 3710 if (&TI.getLongDoubleFormat() != &llvm::APFloat::PPCDoubleDouble()) 3711 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_requires_abi) 3712 << "ibmlongdouble"; 3713 return false; 3714 case PPC::BI__builtin_altivec_dst: 3715 case PPC::BI__builtin_altivec_dstt: 3716 case PPC::BI__builtin_altivec_dstst: 3717 case PPC::BI__builtin_altivec_dststt: 3718 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 3719 case PPC::BI__builtin_vsx_xxpermdi: 3720 case PPC::BI__builtin_vsx_xxsldwi: 3721 return SemaBuiltinVSX(TheCall); 3722 case PPC::BI__builtin_divwe: 3723 case PPC::BI__builtin_divweu: 3724 case PPC::BI__builtin_divde: 3725 case PPC::BI__builtin_divdeu: 3726 return SemaFeatureCheck(*this, TheCall, "extdiv", 3727 diag::err_ppc_builtin_only_on_arch, "7"); 3728 case PPC::BI__builtin_bpermd: 3729 return SemaFeatureCheck(*this, TheCall, "bpermd", 3730 diag::err_ppc_builtin_only_on_arch, "7"); 3731 case PPC::BI__builtin_unpack_vector_int128: 3732 return SemaFeatureCheck(*this, TheCall, "vsx", 3733 diag::err_ppc_builtin_only_on_arch, "7") || 3734 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3735 case PPC::BI__builtin_pack_vector_int128: 3736 return SemaFeatureCheck(*this, TheCall, "vsx", 3737 diag::err_ppc_builtin_only_on_arch, "7"); 3738 case PPC::BI__builtin_altivec_vgnb: 3739 return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7); 3740 case PPC::BI__builtin_altivec_vec_replace_elt: 3741 case PPC::BI__builtin_altivec_vec_replace_unaligned: { 3742 QualType VecTy = TheCall->getArg(0)->getType(); 3743 QualType EltTy = TheCall->getArg(1)->getType(); 3744 unsigned Width = Context.getIntWidth(EltTy); 3745 return SemaBuiltinConstantArgRange(TheCall, 2, 0, Width == 32 ? 12 : 8) || 3746 !isEltOfVectorTy(Context, TheCall, *this, VecTy, EltTy); 3747 } 3748 case PPC::BI__builtin_vsx_xxeval: 3749 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255); 3750 case PPC::BI__builtin_altivec_vsldbi: 3751 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 3752 case PPC::BI__builtin_altivec_vsrdbi: 3753 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 3754 case PPC::BI__builtin_vsx_xxpermx: 3755 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7); 3756 case PPC::BI__builtin_ppc_tw: 3757 case PPC::BI__builtin_ppc_tdw: 3758 return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31); 3759 case PPC::BI__builtin_ppc_cmpeqb: 3760 case PPC::BI__builtin_ppc_setb: 3761 case PPC::BI__builtin_ppc_maddhd: 3762 case PPC::BI__builtin_ppc_maddhdu: 3763 case PPC::BI__builtin_ppc_maddld: 3764 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3765 diag::err_ppc_builtin_only_on_arch, "9"); 3766 case PPC::BI__builtin_ppc_cmprb: 3767 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3768 diag::err_ppc_builtin_only_on_arch, "9") || 3769 SemaBuiltinConstantArgRange(TheCall, 0, 0, 1); 3770 // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must 3771 // be a constant that represents a contiguous bit field. 3772 case PPC::BI__builtin_ppc_rlwnm: 3773 return SemaValueIsRunOfOnes(TheCall, 2); 3774 case PPC::BI__builtin_ppc_rlwimi: 3775 case PPC::BI__builtin_ppc_rldimi: 3776 return SemaBuiltinConstantArg(TheCall, 2, Result) || 3777 SemaValueIsRunOfOnes(TheCall, 3); 3778 case PPC::BI__builtin_ppc_extract_exp: 3779 case PPC::BI__builtin_ppc_extract_sig: 3780 case PPC::BI__builtin_ppc_insert_exp: 3781 return SemaFeatureCheck(*this, TheCall, "power9-vector", 3782 diag::err_ppc_builtin_only_on_arch, "9"); 3783 case PPC::BI__builtin_ppc_addex: { 3784 if (SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3785 diag::err_ppc_builtin_only_on_arch, "9") || 3786 SemaBuiltinConstantArgRange(TheCall, 2, 0, 3)) 3787 return true; 3788 // Output warning for reserved values 1 to 3. 3789 int ArgValue = 3790 TheCall->getArg(2)->getIntegerConstantExpr(Context)->getSExtValue(); 3791 if (ArgValue != 0) 3792 Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour) 3793 << ArgValue; 3794 return false; 3795 } 3796 case PPC::BI__builtin_ppc_mtfsb0: 3797 case PPC::BI__builtin_ppc_mtfsb1: 3798 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 3799 case PPC::BI__builtin_ppc_mtfsf: 3800 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 255); 3801 case PPC::BI__builtin_ppc_mtfsfi: 3802 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 3803 SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 3804 case PPC::BI__builtin_ppc_alignx: 3805 return SemaBuiltinConstantArgPower2(TheCall, 0); 3806 case PPC::BI__builtin_ppc_rdlam: 3807 return SemaValueIsRunOfOnes(TheCall, 2); 3808 case PPC::BI__builtin_ppc_icbt: 3809 case PPC::BI__builtin_ppc_sthcx: 3810 case PPC::BI__builtin_ppc_stbcx: 3811 case PPC::BI__builtin_ppc_lharx: 3812 case PPC::BI__builtin_ppc_lbarx: 3813 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 3814 diag::err_ppc_builtin_only_on_arch, "8"); 3815 case PPC::BI__builtin_vsx_ldrmb: 3816 case PPC::BI__builtin_vsx_strmb: 3817 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 3818 diag::err_ppc_builtin_only_on_arch, "8") || 3819 SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 3820 case PPC::BI__builtin_altivec_vcntmbb: 3821 case PPC::BI__builtin_altivec_vcntmbh: 3822 case PPC::BI__builtin_altivec_vcntmbw: 3823 case PPC::BI__builtin_altivec_vcntmbd: 3824 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3825 case PPC::BI__builtin_darn: 3826 case PPC::BI__builtin_darn_raw: 3827 case PPC::BI__builtin_darn_32: 3828 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3829 diag::err_ppc_builtin_only_on_arch, "9"); 3830 case PPC::BI__builtin_vsx_xxgenpcvbm: 3831 case PPC::BI__builtin_vsx_xxgenpcvhm: 3832 case PPC::BI__builtin_vsx_xxgenpcvwm: 3833 case PPC::BI__builtin_vsx_xxgenpcvdm: 3834 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3); 3835 case PPC::BI__builtin_ppc_compare_exp_uo: 3836 case PPC::BI__builtin_ppc_compare_exp_lt: 3837 case PPC::BI__builtin_ppc_compare_exp_gt: 3838 case PPC::BI__builtin_ppc_compare_exp_eq: 3839 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3840 diag::err_ppc_builtin_only_on_arch, "9") || 3841 SemaFeatureCheck(*this, TheCall, "vsx", 3842 diag::err_ppc_builtin_requires_vsx); 3843 case PPC::BI__builtin_ppc_test_data_class: { 3844 // Check if the first argument of the __builtin_ppc_test_data_class call is 3845 // valid. The argument must be either a 'float' or a 'double'. 3846 QualType ArgType = TheCall->getArg(0)->getType(); 3847 if (ArgType != QualType(Context.FloatTy) && 3848 ArgType != QualType(Context.DoubleTy)) 3849 return Diag(TheCall->getBeginLoc(), 3850 diag::err_ppc_invalid_test_data_class_type); 3851 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3852 diag::err_ppc_builtin_only_on_arch, "9") || 3853 SemaFeatureCheck(*this, TheCall, "vsx", 3854 diag::err_ppc_builtin_requires_vsx) || 3855 SemaBuiltinConstantArgRange(TheCall, 1, 0, 127); 3856 } 3857 case PPC::BI__builtin_ppc_load8r: 3858 case PPC::BI__builtin_ppc_store8r: 3859 return SemaFeatureCheck(*this, TheCall, "isa-v206-instructions", 3860 diag::err_ppc_builtin_only_on_arch, "7"); 3861 #define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \ 3862 case PPC::BI__builtin_##Name: \ 3863 return SemaBuiltinPPCMMACall(TheCall, BuiltinID, Types); 3864 #include "clang/Basic/BuiltinsPPC.def" 3865 } 3866 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3867 } 3868 3869 // Check if the given type is a non-pointer PPC MMA type. This function is used 3870 // in Sema to prevent invalid uses of restricted PPC MMA types. 3871 bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) { 3872 if (Type->isPointerType() || Type->isArrayType()) 3873 return false; 3874 3875 QualType CoreType = Type.getCanonicalType().getUnqualifiedType(); 3876 #define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty 3877 if (false 3878 #include "clang/Basic/PPCTypes.def" 3879 ) { 3880 Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type); 3881 return true; 3882 } 3883 return false; 3884 } 3885 3886 bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, 3887 CallExpr *TheCall) { 3888 // position of memory order and scope arguments in the builtin 3889 unsigned OrderIndex, ScopeIndex; 3890 switch (BuiltinID) { 3891 case AMDGPU::BI__builtin_amdgcn_atomic_inc32: 3892 case AMDGPU::BI__builtin_amdgcn_atomic_inc64: 3893 case AMDGPU::BI__builtin_amdgcn_atomic_dec32: 3894 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: 3895 OrderIndex = 2; 3896 ScopeIndex = 3; 3897 break; 3898 case AMDGPU::BI__builtin_amdgcn_fence: 3899 OrderIndex = 0; 3900 ScopeIndex = 1; 3901 break; 3902 default: 3903 return false; 3904 } 3905 3906 ExprResult Arg = TheCall->getArg(OrderIndex); 3907 auto ArgExpr = Arg.get(); 3908 Expr::EvalResult ArgResult; 3909 3910 if (!ArgExpr->EvaluateAsInt(ArgResult, Context)) 3911 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int) 3912 << ArgExpr->getType(); 3913 auto Ord = ArgResult.Val.getInt().getZExtValue(); 3914 3915 // Check validity of memory ordering as per C11 / C++11's memody model. 3916 // Only fence needs check. Atomic dec/inc allow all memory orders. 3917 if (!llvm::isValidAtomicOrderingCABI(Ord)) 3918 return Diag(ArgExpr->getBeginLoc(), 3919 diag::warn_atomic_op_has_invalid_memory_order) 3920 << ArgExpr->getSourceRange(); 3921 switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) { 3922 case llvm::AtomicOrderingCABI::relaxed: 3923 case llvm::AtomicOrderingCABI::consume: 3924 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence) 3925 return Diag(ArgExpr->getBeginLoc(), 3926 diag::warn_atomic_op_has_invalid_memory_order) 3927 << ArgExpr->getSourceRange(); 3928 break; 3929 case llvm::AtomicOrderingCABI::acquire: 3930 case llvm::AtomicOrderingCABI::release: 3931 case llvm::AtomicOrderingCABI::acq_rel: 3932 case llvm::AtomicOrderingCABI::seq_cst: 3933 break; 3934 } 3935 3936 Arg = TheCall->getArg(ScopeIndex); 3937 ArgExpr = Arg.get(); 3938 Expr::EvalResult ArgResult1; 3939 // Check that sync scope is a constant literal 3940 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context)) 3941 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal) 3942 << ArgExpr->getType(); 3943 3944 return false; 3945 } 3946 3947 bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) { 3948 llvm::APSInt Result; 3949 3950 // We can't check the value of a dependent argument. 3951 Expr *Arg = TheCall->getArg(ArgNum); 3952 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3953 return false; 3954 3955 // Check constant-ness first. 3956 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3957 return true; 3958 3959 int64_t Val = Result.getSExtValue(); 3960 if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7)) 3961 return false; 3962 3963 return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul) 3964 << Arg->getSourceRange(); 3965 } 3966 3967 bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, 3968 unsigned BuiltinID, 3969 CallExpr *TheCall) { 3970 // CodeGenFunction can also detect this, but this gives a better error 3971 // message. 3972 bool FeatureMissing = false; 3973 SmallVector<StringRef> ReqFeatures; 3974 StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID); 3975 Features.split(ReqFeatures, ','); 3976 3977 // Check if each required feature is included 3978 for (StringRef F : ReqFeatures) { 3979 SmallVector<StringRef> ReqOpFeatures; 3980 F.split(ReqOpFeatures, '|'); 3981 bool HasFeature = false; 3982 for (StringRef OF : ReqOpFeatures) { 3983 if (TI.hasFeature(OF)) { 3984 HasFeature = true; 3985 continue; 3986 } 3987 } 3988 3989 if (!HasFeature) { 3990 std::string FeatureStrs = ""; 3991 for (StringRef OF : ReqOpFeatures) { 3992 // If the feature is 64bit, alter the string so it will print better in 3993 // the diagnostic. 3994 if (OF == "64bit") 3995 OF = "RV64"; 3996 3997 // Convert features like "zbr" and "experimental-zbr" to "Zbr". 3998 OF.consume_front("experimental-"); 3999 std::string FeatureStr = OF.str(); 4000 FeatureStr[0] = std::toupper(FeatureStr[0]); 4001 // Combine strings. 4002 FeatureStrs += FeatureStrs == "" ? "" : ", "; 4003 FeatureStrs += "'"; 4004 FeatureStrs += FeatureStr; 4005 FeatureStrs += "'"; 4006 } 4007 // Error message 4008 FeatureMissing = true; 4009 Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension) 4010 << TheCall->getSourceRange() << StringRef(FeatureStrs); 4011 } 4012 } 4013 4014 if (FeatureMissing) 4015 return true; 4016 4017 switch (BuiltinID) { 4018 case RISCVVector::BI__builtin_rvv_vsetvli: 4019 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) || 4020 CheckRISCVLMUL(TheCall, 2); 4021 case RISCVVector::BI__builtin_rvv_vsetvlimax: 4022 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 4023 CheckRISCVLMUL(TheCall, 1); 4024 } 4025 4026 return false; 4027 } 4028 4029 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 4030 CallExpr *TheCall) { 4031 if (BuiltinID == SystemZ::BI__builtin_tabort) { 4032 Expr *Arg = TheCall->getArg(0); 4033 if (Optional<llvm::APSInt> AbortCode = Arg->getIntegerConstantExpr(Context)) 4034 if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256) 4035 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 4036 << Arg->getSourceRange(); 4037 } 4038 4039 // For intrinsics which take an immediate value as part of the instruction, 4040 // range check them here. 4041 unsigned i = 0, l = 0, u = 0; 4042 switch (BuiltinID) { 4043 default: return false; 4044 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 4045 case SystemZ::BI__builtin_s390_verimb: 4046 case SystemZ::BI__builtin_s390_verimh: 4047 case SystemZ::BI__builtin_s390_verimf: 4048 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 4049 case SystemZ::BI__builtin_s390_vfaeb: 4050 case SystemZ::BI__builtin_s390_vfaeh: 4051 case SystemZ::BI__builtin_s390_vfaef: 4052 case SystemZ::BI__builtin_s390_vfaebs: 4053 case SystemZ::BI__builtin_s390_vfaehs: 4054 case SystemZ::BI__builtin_s390_vfaefs: 4055 case SystemZ::BI__builtin_s390_vfaezb: 4056 case SystemZ::BI__builtin_s390_vfaezh: 4057 case SystemZ::BI__builtin_s390_vfaezf: 4058 case SystemZ::BI__builtin_s390_vfaezbs: 4059 case SystemZ::BI__builtin_s390_vfaezhs: 4060 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 4061 case SystemZ::BI__builtin_s390_vfisb: 4062 case SystemZ::BI__builtin_s390_vfidb: 4063 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 4064 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 4065 case SystemZ::BI__builtin_s390_vftcisb: 4066 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 4067 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 4068 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 4069 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 4070 case SystemZ::BI__builtin_s390_vstrcb: 4071 case SystemZ::BI__builtin_s390_vstrch: 4072 case SystemZ::BI__builtin_s390_vstrcf: 4073 case SystemZ::BI__builtin_s390_vstrczb: 4074 case SystemZ::BI__builtin_s390_vstrczh: 4075 case SystemZ::BI__builtin_s390_vstrczf: 4076 case SystemZ::BI__builtin_s390_vstrcbs: 4077 case SystemZ::BI__builtin_s390_vstrchs: 4078 case SystemZ::BI__builtin_s390_vstrcfs: 4079 case SystemZ::BI__builtin_s390_vstrczbs: 4080 case SystemZ::BI__builtin_s390_vstrczhs: 4081 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 4082 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 4083 case SystemZ::BI__builtin_s390_vfminsb: 4084 case SystemZ::BI__builtin_s390_vfmaxsb: 4085 case SystemZ::BI__builtin_s390_vfmindb: 4086 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 4087 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break; 4088 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break; 4089 case SystemZ::BI__builtin_s390_vclfnhs: 4090 case SystemZ::BI__builtin_s390_vclfnls: 4091 case SystemZ::BI__builtin_s390_vcfn: 4092 case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break; 4093 case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break; 4094 } 4095 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 4096 } 4097 4098 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 4099 /// This checks that the target supports __builtin_cpu_supports and 4100 /// that the string argument is constant and valid. 4101 static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI, 4102 CallExpr *TheCall) { 4103 Expr *Arg = TheCall->getArg(0); 4104 4105 // Check if the argument is a string literal. 4106 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 4107 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 4108 << Arg->getSourceRange(); 4109 4110 // Check the contents of the string. 4111 StringRef Feature = 4112 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 4113 if (!TI.validateCpuSupports(Feature)) 4114 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 4115 << Arg->getSourceRange(); 4116 return false; 4117 } 4118 4119 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 4120 /// This checks that the target supports __builtin_cpu_is and 4121 /// that the string argument is constant and valid. 4122 static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) { 4123 Expr *Arg = TheCall->getArg(0); 4124 4125 // Check if the argument is a string literal. 4126 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 4127 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 4128 << Arg->getSourceRange(); 4129 4130 // Check the contents of the string. 4131 StringRef Feature = 4132 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 4133 if (!TI.validateCpuIs(Feature)) 4134 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 4135 << Arg->getSourceRange(); 4136 return false; 4137 } 4138 4139 // Check if the rounding mode is legal. 4140 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 4141 // Indicates if this instruction has rounding control or just SAE. 4142 bool HasRC = false; 4143 4144 unsigned ArgNum = 0; 4145 switch (BuiltinID) { 4146 default: 4147 return false; 4148 case X86::BI__builtin_ia32_vcvttsd2si32: 4149 case X86::BI__builtin_ia32_vcvttsd2si64: 4150 case X86::BI__builtin_ia32_vcvttsd2usi32: 4151 case X86::BI__builtin_ia32_vcvttsd2usi64: 4152 case X86::BI__builtin_ia32_vcvttss2si32: 4153 case X86::BI__builtin_ia32_vcvttss2si64: 4154 case X86::BI__builtin_ia32_vcvttss2usi32: 4155 case X86::BI__builtin_ia32_vcvttss2usi64: 4156 case X86::BI__builtin_ia32_vcvttsh2si32: 4157 case X86::BI__builtin_ia32_vcvttsh2si64: 4158 case X86::BI__builtin_ia32_vcvttsh2usi32: 4159 case X86::BI__builtin_ia32_vcvttsh2usi64: 4160 ArgNum = 1; 4161 break; 4162 case X86::BI__builtin_ia32_maxpd512: 4163 case X86::BI__builtin_ia32_maxps512: 4164 case X86::BI__builtin_ia32_minpd512: 4165 case X86::BI__builtin_ia32_minps512: 4166 case X86::BI__builtin_ia32_maxph512: 4167 case X86::BI__builtin_ia32_minph512: 4168 ArgNum = 2; 4169 break; 4170 case X86::BI__builtin_ia32_vcvtph2pd512_mask: 4171 case X86::BI__builtin_ia32_vcvtph2psx512_mask: 4172 case X86::BI__builtin_ia32_cvtps2pd512_mask: 4173 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 4174 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 4175 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 4176 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 4177 case X86::BI__builtin_ia32_cvttps2dq512_mask: 4178 case X86::BI__builtin_ia32_cvttps2qq512_mask: 4179 case X86::BI__builtin_ia32_cvttps2udq512_mask: 4180 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 4181 case X86::BI__builtin_ia32_vcvttph2w512_mask: 4182 case X86::BI__builtin_ia32_vcvttph2uw512_mask: 4183 case X86::BI__builtin_ia32_vcvttph2dq512_mask: 4184 case X86::BI__builtin_ia32_vcvttph2udq512_mask: 4185 case X86::BI__builtin_ia32_vcvttph2qq512_mask: 4186 case X86::BI__builtin_ia32_vcvttph2uqq512_mask: 4187 case X86::BI__builtin_ia32_exp2pd_mask: 4188 case X86::BI__builtin_ia32_exp2ps_mask: 4189 case X86::BI__builtin_ia32_getexppd512_mask: 4190 case X86::BI__builtin_ia32_getexpps512_mask: 4191 case X86::BI__builtin_ia32_getexpph512_mask: 4192 case X86::BI__builtin_ia32_rcp28pd_mask: 4193 case X86::BI__builtin_ia32_rcp28ps_mask: 4194 case X86::BI__builtin_ia32_rsqrt28pd_mask: 4195 case X86::BI__builtin_ia32_rsqrt28ps_mask: 4196 case X86::BI__builtin_ia32_vcomisd: 4197 case X86::BI__builtin_ia32_vcomiss: 4198 case X86::BI__builtin_ia32_vcomish: 4199 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 4200 ArgNum = 3; 4201 break; 4202 case X86::BI__builtin_ia32_cmppd512_mask: 4203 case X86::BI__builtin_ia32_cmpps512_mask: 4204 case X86::BI__builtin_ia32_cmpsd_mask: 4205 case X86::BI__builtin_ia32_cmpss_mask: 4206 case X86::BI__builtin_ia32_cmpsh_mask: 4207 case X86::BI__builtin_ia32_vcvtsh2sd_round_mask: 4208 case X86::BI__builtin_ia32_vcvtsh2ss_round_mask: 4209 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 4210 case X86::BI__builtin_ia32_getexpsd128_round_mask: 4211 case X86::BI__builtin_ia32_getexpss128_round_mask: 4212 case X86::BI__builtin_ia32_getexpsh128_round_mask: 4213 case X86::BI__builtin_ia32_getmantpd512_mask: 4214 case X86::BI__builtin_ia32_getmantps512_mask: 4215 case X86::BI__builtin_ia32_getmantph512_mask: 4216 case X86::BI__builtin_ia32_maxsd_round_mask: 4217 case X86::BI__builtin_ia32_maxss_round_mask: 4218 case X86::BI__builtin_ia32_maxsh_round_mask: 4219 case X86::BI__builtin_ia32_minsd_round_mask: 4220 case X86::BI__builtin_ia32_minss_round_mask: 4221 case X86::BI__builtin_ia32_minsh_round_mask: 4222 case X86::BI__builtin_ia32_rcp28sd_round_mask: 4223 case X86::BI__builtin_ia32_rcp28ss_round_mask: 4224 case X86::BI__builtin_ia32_reducepd512_mask: 4225 case X86::BI__builtin_ia32_reduceps512_mask: 4226 case X86::BI__builtin_ia32_reduceph512_mask: 4227 case X86::BI__builtin_ia32_rndscalepd_mask: 4228 case X86::BI__builtin_ia32_rndscaleps_mask: 4229 case X86::BI__builtin_ia32_rndscaleph_mask: 4230 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 4231 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 4232 ArgNum = 4; 4233 break; 4234 case X86::BI__builtin_ia32_fixupimmpd512_mask: 4235 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 4236 case X86::BI__builtin_ia32_fixupimmps512_mask: 4237 case X86::BI__builtin_ia32_fixupimmps512_maskz: 4238 case X86::BI__builtin_ia32_fixupimmsd_mask: 4239 case X86::BI__builtin_ia32_fixupimmsd_maskz: 4240 case X86::BI__builtin_ia32_fixupimmss_mask: 4241 case X86::BI__builtin_ia32_fixupimmss_maskz: 4242 case X86::BI__builtin_ia32_getmantsd_round_mask: 4243 case X86::BI__builtin_ia32_getmantss_round_mask: 4244 case X86::BI__builtin_ia32_getmantsh_round_mask: 4245 case X86::BI__builtin_ia32_rangepd512_mask: 4246 case X86::BI__builtin_ia32_rangeps512_mask: 4247 case X86::BI__builtin_ia32_rangesd128_round_mask: 4248 case X86::BI__builtin_ia32_rangess128_round_mask: 4249 case X86::BI__builtin_ia32_reducesd_mask: 4250 case X86::BI__builtin_ia32_reducess_mask: 4251 case X86::BI__builtin_ia32_reducesh_mask: 4252 case X86::BI__builtin_ia32_rndscalesd_round_mask: 4253 case X86::BI__builtin_ia32_rndscaless_round_mask: 4254 case X86::BI__builtin_ia32_rndscalesh_round_mask: 4255 ArgNum = 5; 4256 break; 4257 case X86::BI__builtin_ia32_vcvtsd2si64: 4258 case X86::BI__builtin_ia32_vcvtsd2si32: 4259 case X86::BI__builtin_ia32_vcvtsd2usi32: 4260 case X86::BI__builtin_ia32_vcvtsd2usi64: 4261 case X86::BI__builtin_ia32_vcvtss2si32: 4262 case X86::BI__builtin_ia32_vcvtss2si64: 4263 case X86::BI__builtin_ia32_vcvtss2usi32: 4264 case X86::BI__builtin_ia32_vcvtss2usi64: 4265 case X86::BI__builtin_ia32_vcvtsh2si32: 4266 case X86::BI__builtin_ia32_vcvtsh2si64: 4267 case X86::BI__builtin_ia32_vcvtsh2usi32: 4268 case X86::BI__builtin_ia32_vcvtsh2usi64: 4269 case X86::BI__builtin_ia32_sqrtpd512: 4270 case X86::BI__builtin_ia32_sqrtps512: 4271 case X86::BI__builtin_ia32_sqrtph512: 4272 ArgNum = 1; 4273 HasRC = true; 4274 break; 4275 case X86::BI__builtin_ia32_addph512: 4276 case X86::BI__builtin_ia32_divph512: 4277 case X86::BI__builtin_ia32_mulph512: 4278 case X86::BI__builtin_ia32_subph512: 4279 case X86::BI__builtin_ia32_addpd512: 4280 case X86::BI__builtin_ia32_addps512: 4281 case X86::BI__builtin_ia32_divpd512: 4282 case X86::BI__builtin_ia32_divps512: 4283 case X86::BI__builtin_ia32_mulpd512: 4284 case X86::BI__builtin_ia32_mulps512: 4285 case X86::BI__builtin_ia32_subpd512: 4286 case X86::BI__builtin_ia32_subps512: 4287 case X86::BI__builtin_ia32_cvtsi2sd64: 4288 case X86::BI__builtin_ia32_cvtsi2ss32: 4289 case X86::BI__builtin_ia32_cvtsi2ss64: 4290 case X86::BI__builtin_ia32_cvtusi2sd64: 4291 case X86::BI__builtin_ia32_cvtusi2ss32: 4292 case X86::BI__builtin_ia32_cvtusi2ss64: 4293 case X86::BI__builtin_ia32_vcvtusi2sh: 4294 case X86::BI__builtin_ia32_vcvtusi642sh: 4295 case X86::BI__builtin_ia32_vcvtsi2sh: 4296 case X86::BI__builtin_ia32_vcvtsi642sh: 4297 ArgNum = 2; 4298 HasRC = true; 4299 break; 4300 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 4301 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 4302 case X86::BI__builtin_ia32_vcvtpd2ph512_mask: 4303 case X86::BI__builtin_ia32_vcvtps2phx512_mask: 4304 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 4305 case X86::BI__builtin_ia32_cvtpd2dq512_mask: 4306 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 4307 case X86::BI__builtin_ia32_cvtpd2udq512_mask: 4308 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 4309 case X86::BI__builtin_ia32_cvtps2dq512_mask: 4310 case X86::BI__builtin_ia32_cvtps2qq512_mask: 4311 case X86::BI__builtin_ia32_cvtps2udq512_mask: 4312 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 4313 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 4314 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 4315 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 4316 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 4317 case X86::BI__builtin_ia32_vcvtdq2ph512_mask: 4318 case X86::BI__builtin_ia32_vcvtudq2ph512_mask: 4319 case X86::BI__builtin_ia32_vcvtw2ph512_mask: 4320 case X86::BI__builtin_ia32_vcvtuw2ph512_mask: 4321 case X86::BI__builtin_ia32_vcvtph2w512_mask: 4322 case X86::BI__builtin_ia32_vcvtph2uw512_mask: 4323 case X86::BI__builtin_ia32_vcvtph2dq512_mask: 4324 case X86::BI__builtin_ia32_vcvtph2udq512_mask: 4325 case X86::BI__builtin_ia32_vcvtph2qq512_mask: 4326 case X86::BI__builtin_ia32_vcvtph2uqq512_mask: 4327 case X86::BI__builtin_ia32_vcvtqq2ph512_mask: 4328 case X86::BI__builtin_ia32_vcvtuqq2ph512_mask: 4329 ArgNum = 3; 4330 HasRC = true; 4331 break; 4332 case X86::BI__builtin_ia32_addsh_round_mask: 4333 case X86::BI__builtin_ia32_addss_round_mask: 4334 case X86::BI__builtin_ia32_addsd_round_mask: 4335 case X86::BI__builtin_ia32_divsh_round_mask: 4336 case X86::BI__builtin_ia32_divss_round_mask: 4337 case X86::BI__builtin_ia32_divsd_round_mask: 4338 case X86::BI__builtin_ia32_mulsh_round_mask: 4339 case X86::BI__builtin_ia32_mulss_round_mask: 4340 case X86::BI__builtin_ia32_mulsd_round_mask: 4341 case X86::BI__builtin_ia32_subsh_round_mask: 4342 case X86::BI__builtin_ia32_subss_round_mask: 4343 case X86::BI__builtin_ia32_subsd_round_mask: 4344 case X86::BI__builtin_ia32_scalefph512_mask: 4345 case X86::BI__builtin_ia32_scalefpd512_mask: 4346 case X86::BI__builtin_ia32_scalefps512_mask: 4347 case X86::BI__builtin_ia32_scalefsd_round_mask: 4348 case X86::BI__builtin_ia32_scalefss_round_mask: 4349 case X86::BI__builtin_ia32_scalefsh_round_mask: 4350 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 4351 case X86::BI__builtin_ia32_vcvtss2sh_round_mask: 4352 case X86::BI__builtin_ia32_vcvtsd2sh_round_mask: 4353 case X86::BI__builtin_ia32_sqrtsd_round_mask: 4354 case X86::BI__builtin_ia32_sqrtss_round_mask: 4355 case X86::BI__builtin_ia32_sqrtsh_round_mask: 4356 case X86::BI__builtin_ia32_vfmaddsd3_mask: 4357 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 4358 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 4359 case X86::BI__builtin_ia32_vfmaddss3_mask: 4360 case X86::BI__builtin_ia32_vfmaddss3_maskz: 4361 case X86::BI__builtin_ia32_vfmaddss3_mask3: 4362 case X86::BI__builtin_ia32_vfmaddsh3_mask: 4363 case X86::BI__builtin_ia32_vfmaddsh3_maskz: 4364 case X86::BI__builtin_ia32_vfmaddsh3_mask3: 4365 case X86::BI__builtin_ia32_vfmaddpd512_mask: 4366 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 4367 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 4368 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 4369 case X86::BI__builtin_ia32_vfmaddps512_mask: 4370 case X86::BI__builtin_ia32_vfmaddps512_maskz: 4371 case X86::BI__builtin_ia32_vfmaddps512_mask3: 4372 case X86::BI__builtin_ia32_vfmsubps512_mask3: 4373 case X86::BI__builtin_ia32_vfmaddph512_mask: 4374 case X86::BI__builtin_ia32_vfmaddph512_maskz: 4375 case X86::BI__builtin_ia32_vfmaddph512_mask3: 4376 case X86::BI__builtin_ia32_vfmsubph512_mask3: 4377 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 4378 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 4379 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 4380 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 4381 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 4382 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 4383 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 4384 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 4385 case X86::BI__builtin_ia32_vfmaddsubph512_mask: 4386 case X86::BI__builtin_ia32_vfmaddsubph512_maskz: 4387 case X86::BI__builtin_ia32_vfmaddsubph512_mask3: 4388 case X86::BI__builtin_ia32_vfmsubaddph512_mask3: 4389 case X86::BI__builtin_ia32_vfmaddcsh_mask: 4390 case X86::BI__builtin_ia32_vfmaddcsh_round_mask: 4391 case X86::BI__builtin_ia32_vfmaddcsh_round_mask3: 4392 case X86::BI__builtin_ia32_vfmaddcph512_mask: 4393 case X86::BI__builtin_ia32_vfmaddcph512_maskz: 4394 case X86::BI__builtin_ia32_vfmaddcph512_mask3: 4395 case X86::BI__builtin_ia32_vfcmaddcsh_mask: 4396 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask: 4397 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3: 4398 case X86::BI__builtin_ia32_vfcmaddcph512_mask: 4399 case X86::BI__builtin_ia32_vfcmaddcph512_maskz: 4400 case X86::BI__builtin_ia32_vfcmaddcph512_mask3: 4401 case X86::BI__builtin_ia32_vfmulcsh_mask: 4402 case X86::BI__builtin_ia32_vfmulcph512_mask: 4403 case X86::BI__builtin_ia32_vfcmulcsh_mask: 4404 case X86::BI__builtin_ia32_vfcmulcph512_mask: 4405 ArgNum = 4; 4406 HasRC = true; 4407 break; 4408 } 4409 4410 llvm::APSInt Result; 4411 4412 // We can't check the value of a dependent argument. 4413 Expr *Arg = TheCall->getArg(ArgNum); 4414 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4415 return false; 4416 4417 // Check constant-ness first. 4418 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4419 return true; 4420 4421 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 4422 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 4423 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding 4424 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together. 4425 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 4426 Result == 8/*ROUND_NO_EXC*/ || 4427 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) || 4428 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 4429 return false; 4430 4431 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 4432 << Arg->getSourceRange(); 4433 } 4434 4435 // Check if the gather/scatter scale is legal. 4436 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 4437 CallExpr *TheCall) { 4438 unsigned ArgNum = 0; 4439 switch (BuiltinID) { 4440 default: 4441 return false; 4442 case X86::BI__builtin_ia32_gatherpfdpd: 4443 case X86::BI__builtin_ia32_gatherpfdps: 4444 case X86::BI__builtin_ia32_gatherpfqpd: 4445 case X86::BI__builtin_ia32_gatherpfqps: 4446 case X86::BI__builtin_ia32_scatterpfdpd: 4447 case X86::BI__builtin_ia32_scatterpfdps: 4448 case X86::BI__builtin_ia32_scatterpfqpd: 4449 case X86::BI__builtin_ia32_scatterpfqps: 4450 ArgNum = 3; 4451 break; 4452 case X86::BI__builtin_ia32_gatherd_pd: 4453 case X86::BI__builtin_ia32_gatherd_pd256: 4454 case X86::BI__builtin_ia32_gatherq_pd: 4455 case X86::BI__builtin_ia32_gatherq_pd256: 4456 case X86::BI__builtin_ia32_gatherd_ps: 4457 case X86::BI__builtin_ia32_gatherd_ps256: 4458 case X86::BI__builtin_ia32_gatherq_ps: 4459 case X86::BI__builtin_ia32_gatherq_ps256: 4460 case X86::BI__builtin_ia32_gatherd_q: 4461 case X86::BI__builtin_ia32_gatherd_q256: 4462 case X86::BI__builtin_ia32_gatherq_q: 4463 case X86::BI__builtin_ia32_gatherq_q256: 4464 case X86::BI__builtin_ia32_gatherd_d: 4465 case X86::BI__builtin_ia32_gatherd_d256: 4466 case X86::BI__builtin_ia32_gatherq_d: 4467 case X86::BI__builtin_ia32_gatherq_d256: 4468 case X86::BI__builtin_ia32_gather3div2df: 4469 case X86::BI__builtin_ia32_gather3div2di: 4470 case X86::BI__builtin_ia32_gather3div4df: 4471 case X86::BI__builtin_ia32_gather3div4di: 4472 case X86::BI__builtin_ia32_gather3div4sf: 4473 case X86::BI__builtin_ia32_gather3div4si: 4474 case X86::BI__builtin_ia32_gather3div8sf: 4475 case X86::BI__builtin_ia32_gather3div8si: 4476 case X86::BI__builtin_ia32_gather3siv2df: 4477 case X86::BI__builtin_ia32_gather3siv2di: 4478 case X86::BI__builtin_ia32_gather3siv4df: 4479 case X86::BI__builtin_ia32_gather3siv4di: 4480 case X86::BI__builtin_ia32_gather3siv4sf: 4481 case X86::BI__builtin_ia32_gather3siv4si: 4482 case X86::BI__builtin_ia32_gather3siv8sf: 4483 case X86::BI__builtin_ia32_gather3siv8si: 4484 case X86::BI__builtin_ia32_gathersiv8df: 4485 case X86::BI__builtin_ia32_gathersiv16sf: 4486 case X86::BI__builtin_ia32_gatherdiv8df: 4487 case X86::BI__builtin_ia32_gatherdiv16sf: 4488 case X86::BI__builtin_ia32_gathersiv8di: 4489 case X86::BI__builtin_ia32_gathersiv16si: 4490 case X86::BI__builtin_ia32_gatherdiv8di: 4491 case X86::BI__builtin_ia32_gatherdiv16si: 4492 case X86::BI__builtin_ia32_scatterdiv2df: 4493 case X86::BI__builtin_ia32_scatterdiv2di: 4494 case X86::BI__builtin_ia32_scatterdiv4df: 4495 case X86::BI__builtin_ia32_scatterdiv4di: 4496 case X86::BI__builtin_ia32_scatterdiv4sf: 4497 case X86::BI__builtin_ia32_scatterdiv4si: 4498 case X86::BI__builtin_ia32_scatterdiv8sf: 4499 case X86::BI__builtin_ia32_scatterdiv8si: 4500 case X86::BI__builtin_ia32_scattersiv2df: 4501 case X86::BI__builtin_ia32_scattersiv2di: 4502 case X86::BI__builtin_ia32_scattersiv4df: 4503 case X86::BI__builtin_ia32_scattersiv4di: 4504 case X86::BI__builtin_ia32_scattersiv4sf: 4505 case X86::BI__builtin_ia32_scattersiv4si: 4506 case X86::BI__builtin_ia32_scattersiv8sf: 4507 case X86::BI__builtin_ia32_scattersiv8si: 4508 case X86::BI__builtin_ia32_scattersiv8df: 4509 case X86::BI__builtin_ia32_scattersiv16sf: 4510 case X86::BI__builtin_ia32_scatterdiv8df: 4511 case X86::BI__builtin_ia32_scatterdiv16sf: 4512 case X86::BI__builtin_ia32_scattersiv8di: 4513 case X86::BI__builtin_ia32_scattersiv16si: 4514 case X86::BI__builtin_ia32_scatterdiv8di: 4515 case X86::BI__builtin_ia32_scatterdiv16si: 4516 ArgNum = 4; 4517 break; 4518 } 4519 4520 llvm::APSInt Result; 4521 4522 // We can't check the value of a dependent argument. 4523 Expr *Arg = TheCall->getArg(ArgNum); 4524 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4525 return false; 4526 4527 // Check constant-ness first. 4528 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4529 return true; 4530 4531 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 4532 return false; 4533 4534 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 4535 << Arg->getSourceRange(); 4536 } 4537 4538 enum { TileRegLow = 0, TileRegHigh = 7 }; 4539 4540 bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, 4541 ArrayRef<int> ArgNums) { 4542 for (int ArgNum : ArgNums) { 4543 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh)) 4544 return true; 4545 } 4546 return false; 4547 } 4548 4549 bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall, 4550 ArrayRef<int> ArgNums) { 4551 // Because the max number of tile register is TileRegHigh + 1, so here we use 4552 // each bit to represent the usage of them in bitset. 4553 std::bitset<TileRegHigh + 1> ArgValues; 4554 for (int ArgNum : ArgNums) { 4555 Expr *Arg = TheCall->getArg(ArgNum); 4556 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4557 continue; 4558 4559 llvm::APSInt Result; 4560 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4561 return true; 4562 int ArgExtValue = Result.getExtValue(); 4563 assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) && 4564 "Incorrect tile register num."); 4565 if (ArgValues.test(ArgExtValue)) 4566 return Diag(TheCall->getBeginLoc(), 4567 diag::err_x86_builtin_tile_arg_duplicate) 4568 << TheCall->getArg(ArgNum)->getSourceRange(); 4569 ArgValues.set(ArgExtValue); 4570 } 4571 return false; 4572 } 4573 4574 bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, 4575 ArrayRef<int> ArgNums) { 4576 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) || 4577 CheckX86BuiltinTileDuplicate(TheCall, ArgNums); 4578 } 4579 4580 bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) { 4581 switch (BuiltinID) { 4582 default: 4583 return false; 4584 case X86::BI__builtin_ia32_tileloadd64: 4585 case X86::BI__builtin_ia32_tileloaddt164: 4586 case X86::BI__builtin_ia32_tilestored64: 4587 case X86::BI__builtin_ia32_tilezero: 4588 return CheckX86BuiltinTileArgumentsRange(TheCall, 0); 4589 case X86::BI__builtin_ia32_tdpbssd: 4590 case X86::BI__builtin_ia32_tdpbsud: 4591 case X86::BI__builtin_ia32_tdpbusd: 4592 case X86::BI__builtin_ia32_tdpbuud: 4593 case X86::BI__builtin_ia32_tdpbf16ps: 4594 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2}); 4595 } 4596 } 4597 static bool isX86_32Builtin(unsigned BuiltinID) { 4598 // These builtins only work on x86-32 targets. 4599 switch (BuiltinID) { 4600 case X86::BI__builtin_ia32_readeflags_u32: 4601 case X86::BI__builtin_ia32_writeeflags_u32: 4602 return true; 4603 } 4604 4605 return false; 4606 } 4607 4608 bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 4609 CallExpr *TheCall) { 4610 if (BuiltinID == X86::BI__builtin_cpu_supports) 4611 return SemaBuiltinCpuSupports(*this, TI, TheCall); 4612 4613 if (BuiltinID == X86::BI__builtin_cpu_is) 4614 return SemaBuiltinCpuIs(*this, TI, TheCall); 4615 4616 // Check for 32-bit only builtins on a 64-bit target. 4617 const llvm::Triple &TT = TI.getTriple(); 4618 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 4619 return Diag(TheCall->getCallee()->getBeginLoc(), 4620 diag::err_32_bit_builtin_64_bit_tgt); 4621 4622 // If the intrinsic has rounding or SAE make sure its valid. 4623 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 4624 return true; 4625 4626 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 4627 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 4628 return true; 4629 4630 // If the intrinsic has a tile arguments, make sure they are valid. 4631 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall)) 4632 return true; 4633 4634 // For intrinsics which take an immediate value as part of the instruction, 4635 // range check them here. 4636 int i = 0, l = 0, u = 0; 4637 switch (BuiltinID) { 4638 default: 4639 return false; 4640 case X86::BI__builtin_ia32_vec_ext_v2si: 4641 case X86::BI__builtin_ia32_vec_ext_v2di: 4642 case X86::BI__builtin_ia32_vextractf128_pd256: 4643 case X86::BI__builtin_ia32_vextractf128_ps256: 4644 case X86::BI__builtin_ia32_vextractf128_si256: 4645 case X86::BI__builtin_ia32_extract128i256: 4646 case X86::BI__builtin_ia32_extractf64x4_mask: 4647 case X86::BI__builtin_ia32_extracti64x4_mask: 4648 case X86::BI__builtin_ia32_extractf32x8_mask: 4649 case X86::BI__builtin_ia32_extracti32x8_mask: 4650 case X86::BI__builtin_ia32_extractf64x2_256_mask: 4651 case X86::BI__builtin_ia32_extracti64x2_256_mask: 4652 case X86::BI__builtin_ia32_extractf32x4_256_mask: 4653 case X86::BI__builtin_ia32_extracti32x4_256_mask: 4654 i = 1; l = 0; u = 1; 4655 break; 4656 case X86::BI__builtin_ia32_vec_set_v2di: 4657 case X86::BI__builtin_ia32_vinsertf128_pd256: 4658 case X86::BI__builtin_ia32_vinsertf128_ps256: 4659 case X86::BI__builtin_ia32_vinsertf128_si256: 4660 case X86::BI__builtin_ia32_insert128i256: 4661 case X86::BI__builtin_ia32_insertf32x8: 4662 case X86::BI__builtin_ia32_inserti32x8: 4663 case X86::BI__builtin_ia32_insertf64x4: 4664 case X86::BI__builtin_ia32_inserti64x4: 4665 case X86::BI__builtin_ia32_insertf64x2_256: 4666 case X86::BI__builtin_ia32_inserti64x2_256: 4667 case X86::BI__builtin_ia32_insertf32x4_256: 4668 case X86::BI__builtin_ia32_inserti32x4_256: 4669 i = 2; l = 0; u = 1; 4670 break; 4671 case X86::BI__builtin_ia32_vpermilpd: 4672 case X86::BI__builtin_ia32_vec_ext_v4hi: 4673 case X86::BI__builtin_ia32_vec_ext_v4si: 4674 case X86::BI__builtin_ia32_vec_ext_v4sf: 4675 case X86::BI__builtin_ia32_vec_ext_v4di: 4676 case X86::BI__builtin_ia32_extractf32x4_mask: 4677 case X86::BI__builtin_ia32_extracti32x4_mask: 4678 case X86::BI__builtin_ia32_extractf64x2_512_mask: 4679 case X86::BI__builtin_ia32_extracti64x2_512_mask: 4680 i = 1; l = 0; u = 3; 4681 break; 4682 case X86::BI_mm_prefetch: 4683 case X86::BI__builtin_ia32_vec_ext_v8hi: 4684 case X86::BI__builtin_ia32_vec_ext_v8si: 4685 i = 1; l = 0; u = 7; 4686 break; 4687 case X86::BI__builtin_ia32_sha1rnds4: 4688 case X86::BI__builtin_ia32_blendpd: 4689 case X86::BI__builtin_ia32_shufpd: 4690 case X86::BI__builtin_ia32_vec_set_v4hi: 4691 case X86::BI__builtin_ia32_vec_set_v4si: 4692 case X86::BI__builtin_ia32_vec_set_v4di: 4693 case X86::BI__builtin_ia32_shuf_f32x4_256: 4694 case X86::BI__builtin_ia32_shuf_f64x2_256: 4695 case X86::BI__builtin_ia32_shuf_i32x4_256: 4696 case X86::BI__builtin_ia32_shuf_i64x2_256: 4697 case X86::BI__builtin_ia32_insertf64x2_512: 4698 case X86::BI__builtin_ia32_inserti64x2_512: 4699 case X86::BI__builtin_ia32_insertf32x4: 4700 case X86::BI__builtin_ia32_inserti32x4: 4701 i = 2; l = 0; u = 3; 4702 break; 4703 case X86::BI__builtin_ia32_vpermil2pd: 4704 case X86::BI__builtin_ia32_vpermil2pd256: 4705 case X86::BI__builtin_ia32_vpermil2ps: 4706 case X86::BI__builtin_ia32_vpermil2ps256: 4707 i = 3; l = 0; u = 3; 4708 break; 4709 case X86::BI__builtin_ia32_cmpb128_mask: 4710 case X86::BI__builtin_ia32_cmpw128_mask: 4711 case X86::BI__builtin_ia32_cmpd128_mask: 4712 case X86::BI__builtin_ia32_cmpq128_mask: 4713 case X86::BI__builtin_ia32_cmpb256_mask: 4714 case X86::BI__builtin_ia32_cmpw256_mask: 4715 case X86::BI__builtin_ia32_cmpd256_mask: 4716 case X86::BI__builtin_ia32_cmpq256_mask: 4717 case X86::BI__builtin_ia32_cmpb512_mask: 4718 case X86::BI__builtin_ia32_cmpw512_mask: 4719 case X86::BI__builtin_ia32_cmpd512_mask: 4720 case X86::BI__builtin_ia32_cmpq512_mask: 4721 case X86::BI__builtin_ia32_ucmpb128_mask: 4722 case X86::BI__builtin_ia32_ucmpw128_mask: 4723 case X86::BI__builtin_ia32_ucmpd128_mask: 4724 case X86::BI__builtin_ia32_ucmpq128_mask: 4725 case X86::BI__builtin_ia32_ucmpb256_mask: 4726 case X86::BI__builtin_ia32_ucmpw256_mask: 4727 case X86::BI__builtin_ia32_ucmpd256_mask: 4728 case X86::BI__builtin_ia32_ucmpq256_mask: 4729 case X86::BI__builtin_ia32_ucmpb512_mask: 4730 case X86::BI__builtin_ia32_ucmpw512_mask: 4731 case X86::BI__builtin_ia32_ucmpd512_mask: 4732 case X86::BI__builtin_ia32_ucmpq512_mask: 4733 case X86::BI__builtin_ia32_vpcomub: 4734 case X86::BI__builtin_ia32_vpcomuw: 4735 case X86::BI__builtin_ia32_vpcomud: 4736 case X86::BI__builtin_ia32_vpcomuq: 4737 case X86::BI__builtin_ia32_vpcomb: 4738 case X86::BI__builtin_ia32_vpcomw: 4739 case X86::BI__builtin_ia32_vpcomd: 4740 case X86::BI__builtin_ia32_vpcomq: 4741 case X86::BI__builtin_ia32_vec_set_v8hi: 4742 case X86::BI__builtin_ia32_vec_set_v8si: 4743 i = 2; l = 0; u = 7; 4744 break; 4745 case X86::BI__builtin_ia32_vpermilpd256: 4746 case X86::BI__builtin_ia32_roundps: 4747 case X86::BI__builtin_ia32_roundpd: 4748 case X86::BI__builtin_ia32_roundps256: 4749 case X86::BI__builtin_ia32_roundpd256: 4750 case X86::BI__builtin_ia32_getmantpd128_mask: 4751 case X86::BI__builtin_ia32_getmantpd256_mask: 4752 case X86::BI__builtin_ia32_getmantps128_mask: 4753 case X86::BI__builtin_ia32_getmantps256_mask: 4754 case X86::BI__builtin_ia32_getmantpd512_mask: 4755 case X86::BI__builtin_ia32_getmantps512_mask: 4756 case X86::BI__builtin_ia32_getmantph128_mask: 4757 case X86::BI__builtin_ia32_getmantph256_mask: 4758 case X86::BI__builtin_ia32_getmantph512_mask: 4759 case X86::BI__builtin_ia32_vec_ext_v16qi: 4760 case X86::BI__builtin_ia32_vec_ext_v16hi: 4761 i = 1; l = 0; u = 15; 4762 break; 4763 case X86::BI__builtin_ia32_pblendd128: 4764 case X86::BI__builtin_ia32_blendps: 4765 case X86::BI__builtin_ia32_blendpd256: 4766 case X86::BI__builtin_ia32_shufpd256: 4767 case X86::BI__builtin_ia32_roundss: 4768 case X86::BI__builtin_ia32_roundsd: 4769 case X86::BI__builtin_ia32_rangepd128_mask: 4770 case X86::BI__builtin_ia32_rangepd256_mask: 4771 case X86::BI__builtin_ia32_rangepd512_mask: 4772 case X86::BI__builtin_ia32_rangeps128_mask: 4773 case X86::BI__builtin_ia32_rangeps256_mask: 4774 case X86::BI__builtin_ia32_rangeps512_mask: 4775 case X86::BI__builtin_ia32_getmantsd_round_mask: 4776 case X86::BI__builtin_ia32_getmantss_round_mask: 4777 case X86::BI__builtin_ia32_getmantsh_round_mask: 4778 case X86::BI__builtin_ia32_vec_set_v16qi: 4779 case X86::BI__builtin_ia32_vec_set_v16hi: 4780 i = 2; l = 0; u = 15; 4781 break; 4782 case X86::BI__builtin_ia32_vec_ext_v32qi: 4783 i = 1; l = 0; u = 31; 4784 break; 4785 case X86::BI__builtin_ia32_cmpps: 4786 case X86::BI__builtin_ia32_cmpss: 4787 case X86::BI__builtin_ia32_cmppd: 4788 case X86::BI__builtin_ia32_cmpsd: 4789 case X86::BI__builtin_ia32_cmpps256: 4790 case X86::BI__builtin_ia32_cmppd256: 4791 case X86::BI__builtin_ia32_cmpps128_mask: 4792 case X86::BI__builtin_ia32_cmppd128_mask: 4793 case X86::BI__builtin_ia32_cmpps256_mask: 4794 case X86::BI__builtin_ia32_cmppd256_mask: 4795 case X86::BI__builtin_ia32_cmpps512_mask: 4796 case X86::BI__builtin_ia32_cmppd512_mask: 4797 case X86::BI__builtin_ia32_cmpsd_mask: 4798 case X86::BI__builtin_ia32_cmpss_mask: 4799 case X86::BI__builtin_ia32_vec_set_v32qi: 4800 i = 2; l = 0; u = 31; 4801 break; 4802 case X86::BI__builtin_ia32_permdf256: 4803 case X86::BI__builtin_ia32_permdi256: 4804 case X86::BI__builtin_ia32_permdf512: 4805 case X86::BI__builtin_ia32_permdi512: 4806 case X86::BI__builtin_ia32_vpermilps: 4807 case X86::BI__builtin_ia32_vpermilps256: 4808 case X86::BI__builtin_ia32_vpermilpd512: 4809 case X86::BI__builtin_ia32_vpermilps512: 4810 case X86::BI__builtin_ia32_pshufd: 4811 case X86::BI__builtin_ia32_pshufd256: 4812 case X86::BI__builtin_ia32_pshufd512: 4813 case X86::BI__builtin_ia32_pshufhw: 4814 case X86::BI__builtin_ia32_pshufhw256: 4815 case X86::BI__builtin_ia32_pshufhw512: 4816 case X86::BI__builtin_ia32_pshuflw: 4817 case X86::BI__builtin_ia32_pshuflw256: 4818 case X86::BI__builtin_ia32_pshuflw512: 4819 case X86::BI__builtin_ia32_vcvtps2ph: 4820 case X86::BI__builtin_ia32_vcvtps2ph_mask: 4821 case X86::BI__builtin_ia32_vcvtps2ph256: 4822 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 4823 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 4824 case X86::BI__builtin_ia32_rndscaleps_128_mask: 4825 case X86::BI__builtin_ia32_rndscalepd_128_mask: 4826 case X86::BI__builtin_ia32_rndscaleps_256_mask: 4827 case X86::BI__builtin_ia32_rndscalepd_256_mask: 4828 case X86::BI__builtin_ia32_rndscaleps_mask: 4829 case X86::BI__builtin_ia32_rndscalepd_mask: 4830 case X86::BI__builtin_ia32_rndscaleph_mask: 4831 case X86::BI__builtin_ia32_reducepd128_mask: 4832 case X86::BI__builtin_ia32_reducepd256_mask: 4833 case X86::BI__builtin_ia32_reducepd512_mask: 4834 case X86::BI__builtin_ia32_reduceps128_mask: 4835 case X86::BI__builtin_ia32_reduceps256_mask: 4836 case X86::BI__builtin_ia32_reduceps512_mask: 4837 case X86::BI__builtin_ia32_reduceph128_mask: 4838 case X86::BI__builtin_ia32_reduceph256_mask: 4839 case X86::BI__builtin_ia32_reduceph512_mask: 4840 case X86::BI__builtin_ia32_prold512: 4841 case X86::BI__builtin_ia32_prolq512: 4842 case X86::BI__builtin_ia32_prold128: 4843 case X86::BI__builtin_ia32_prold256: 4844 case X86::BI__builtin_ia32_prolq128: 4845 case X86::BI__builtin_ia32_prolq256: 4846 case X86::BI__builtin_ia32_prord512: 4847 case X86::BI__builtin_ia32_prorq512: 4848 case X86::BI__builtin_ia32_prord128: 4849 case X86::BI__builtin_ia32_prord256: 4850 case X86::BI__builtin_ia32_prorq128: 4851 case X86::BI__builtin_ia32_prorq256: 4852 case X86::BI__builtin_ia32_fpclasspd128_mask: 4853 case X86::BI__builtin_ia32_fpclasspd256_mask: 4854 case X86::BI__builtin_ia32_fpclassps128_mask: 4855 case X86::BI__builtin_ia32_fpclassps256_mask: 4856 case X86::BI__builtin_ia32_fpclassps512_mask: 4857 case X86::BI__builtin_ia32_fpclasspd512_mask: 4858 case X86::BI__builtin_ia32_fpclassph128_mask: 4859 case X86::BI__builtin_ia32_fpclassph256_mask: 4860 case X86::BI__builtin_ia32_fpclassph512_mask: 4861 case X86::BI__builtin_ia32_fpclasssd_mask: 4862 case X86::BI__builtin_ia32_fpclassss_mask: 4863 case X86::BI__builtin_ia32_fpclasssh_mask: 4864 case X86::BI__builtin_ia32_pslldqi128_byteshift: 4865 case X86::BI__builtin_ia32_pslldqi256_byteshift: 4866 case X86::BI__builtin_ia32_pslldqi512_byteshift: 4867 case X86::BI__builtin_ia32_psrldqi128_byteshift: 4868 case X86::BI__builtin_ia32_psrldqi256_byteshift: 4869 case X86::BI__builtin_ia32_psrldqi512_byteshift: 4870 case X86::BI__builtin_ia32_kshiftliqi: 4871 case X86::BI__builtin_ia32_kshiftlihi: 4872 case X86::BI__builtin_ia32_kshiftlisi: 4873 case X86::BI__builtin_ia32_kshiftlidi: 4874 case X86::BI__builtin_ia32_kshiftriqi: 4875 case X86::BI__builtin_ia32_kshiftrihi: 4876 case X86::BI__builtin_ia32_kshiftrisi: 4877 case X86::BI__builtin_ia32_kshiftridi: 4878 i = 1; l = 0; u = 255; 4879 break; 4880 case X86::BI__builtin_ia32_vperm2f128_pd256: 4881 case X86::BI__builtin_ia32_vperm2f128_ps256: 4882 case X86::BI__builtin_ia32_vperm2f128_si256: 4883 case X86::BI__builtin_ia32_permti256: 4884 case X86::BI__builtin_ia32_pblendw128: 4885 case X86::BI__builtin_ia32_pblendw256: 4886 case X86::BI__builtin_ia32_blendps256: 4887 case X86::BI__builtin_ia32_pblendd256: 4888 case X86::BI__builtin_ia32_palignr128: 4889 case X86::BI__builtin_ia32_palignr256: 4890 case X86::BI__builtin_ia32_palignr512: 4891 case X86::BI__builtin_ia32_alignq512: 4892 case X86::BI__builtin_ia32_alignd512: 4893 case X86::BI__builtin_ia32_alignd128: 4894 case X86::BI__builtin_ia32_alignd256: 4895 case X86::BI__builtin_ia32_alignq128: 4896 case X86::BI__builtin_ia32_alignq256: 4897 case X86::BI__builtin_ia32_vcomisd: 4898 case X86::BI__builtin_ia32_vcomiss: 4899 case X86::BI__builtin_ia32_shuf_f32x4: 4900 case X86::BI__builtin_ia32_shuf_f64x2: 4901 case X86::BI__builtin_ia32_shuf_i32x4: 4902 case X86::BI__builtin_ia32_shuf_i64x2: 4903 case X86::BI__builtin_ia32_shufpd512: 4904 case X86::BI__builtin_ia32_shufps: 4905 case X86::BI__builtin_ia32_shufps256: 4906 case X86::BI__builtin_ia32_shufps512: 4907 case X86::BI__builtin_ia32_dbpsadbw128: 4908 case X86::BI__builtin_ia32_dbpsadbw256: 4909 case X86::BI__builtin_ia32_dbpsadbw512: 4910 case X86::BI__builtin_ia32_vpshldd128: 4911 case X86::BI__builtin_ia32_vpshldd256: 4912 case X86::BI__builtin_ia32_vpshldd512: 4913 case X86::BI__builtin_ia32_vpshldq128: 4914 case X86::BI__builtin_ia32_vpshldq256: 4915 case X86::BI__builtin_ia32_vpshldq512: 4916 case X86::BI__builtin_ia32_vpshldw128: 4917 case X86::BI__builtin_ia32_vpshldw256: 4918 case X86::BI__builtin_ia32_vpshldw512: 4919 case X86::BI__builtin_ia32_vpshrdd128: 4920 case X86::BI__builtin_ia32_vpshrdd256: 4921 case X86::BI__builtin_ia32_vpshrdd512: 4922 case X86::BI__builtin_ia32_vpshrdq128: 4923 case X86::BI__builtin_ia32_vpshrdq256: 4924 case X86::BI__builtin_ia32_vpshrdq512: 4925 case X86::BI__builtin_ia32_vpshrdw128: 4926 case X86::BI__builtin_ia32_vpshrdw256: 4927 case X86::BI__builtin_ia32_vpshrdw512: 4928 i = 2; l = 0; u = 255; 4929 break; 4930 case X86::BI__builtin_ia32_fixupimmpd512_mask: 4931 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 4932 case X86::BI__builtin_ia32_fixupimmps512_mask: 4933 case X86::BI__builtin_ia32_fixupimmps512_maskz: 4934 case X86::BI__builtin_ia32_fixupimmsd_mask: 4935 case X86::BI__builtin_ia32_fixupimmsd_maskz: 4936 case X86::BI__builtin_ia32_fixupimmss_mask: 4937 case X86::BI__builtin_ia32_fixupimmss_maskz: 4938 case X86::BI__builtin_ia32_fixupimmpd128_mask: 4939 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 4940 case X86::BI__builtin_ia32_fixupimmpd256_mask: 4941 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 4942 case X86::BI__builtin_ia32_fixupimmps128_mask: 4943 case X86::BI__builtin_ia32_fixupimmps128_maskz: 4944 case X86::BI__builtin_ia32_fixupimmps256_mask: 4945 case X86::BI__builtin_ia32_fixupimmps256_maskz: 4946 case X86::BI__builtin_ia32_pternlogd512_mask: 4947 case X86::BI__builtin_ia32_pternlogd512_maskz: 4948 case X86::BI__builtin_ia32_pternlogq512_mask: 4949 case X86::BI__builtin_ia32_pternlogq512_maskz: 4950 case X86::BI__builtin_ia32_pternlogd128_mask: 4951 case X86::BI__builtin_ia32_pternlogd128_maskz: 4952 case X86::BI__builtin_ia32_pternlogd256_mask: 4953 case X86::BI__builtin_ia32_pternlogd256_maskz: 4954 case X86::BI__builtin_ia32_pternlogq128_mask: 4955 case X86::BI__builtin_ia32_pternlogq128_maskz: 4956 case X86::BI__builtin_ia32_pternlogq256_mask: 4957 case X86::BI__builtin_ia32_pternlogq256_maskz: 4958 i = 3; l = 0; u = 255; 4959 break; 4960 case X86::BI__builtin_ia32_gatherpfdpd: 4961 case X86::BI__builtin_ia32_gatherpfdps: 4962 case X86::BI__builtin_ia32_gatherpfqpd: 4963 case X86::BI__builtin_ia32_gatherpfqps: 4964 case X86::BI__builtin_ia32_scatterpfdpd: 4965 case X86::BI__builtin_ia32_scatterpfdps: 4966 case X86::BI__builtin_ia32_scatterpfqpd: 4967 case X86::BI__builtin_ia32_scatterpfqps: 4968 i = 4; l = 2; u = 3; 4969 break; 4970 case X86::BI__builtin_ia32_reducesd_mask: 4971 case X86::BI__builtin_ia32_reducess_mask: 4972 case X86::BI__builtin_ia32_rndscalesd_round_mask: 4973 case X86::BI__builtin_ia32_rndscaless_round_mask: 4974 case X86::BI__builtin_ia32_rndscalesh_round_mask: 4975 case X86::BI__builtin_ia32_reducesh_mask: 4976 i = 4; l = 0; u = 255; 4977 break; 4978 } 4979 4980 // Note that we don't force a hard error on the range check here, allowing 4981 // template-generated or macro-generated dead code to potentially have out-of- 4982 // range values. These need to code generate, but don't need to necessarily 4983 // make any sense. We use a warning that defaults to an error. 4984 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 4985 } 4986 4987 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 4988 /// parameter with the FormatAttr's correct format_idx and firstDataArg. 4989 /// Returns true when the format fits the function and the FormatStringInfo has 4990 /// been populated. 4991 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 4992 FormatStringInfo *FSI) { 4993 FSI->HasVAListArg = Format->getFirstArg() == 0; 4994 FSI->FormatIdx = Format->getFormatIdx() - 1; 4995 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1; 4996 4997 // The way the format attribute works in GCC, the implicit this argument 4998 // of member functions is counted. However, it doesn't appear in our own 4999 // lists, so decrement format_idx in that case. 5000 if (IsCXXMember) { 5001 if(FSI->FormatIdx == 0) 5002 return false; 5003 --FSI->FormatIdx; 5004 if (FSI->FirstDataArg != 0) 5005 --FSI->FirstDataArg; 5006 } 5007 return true; 5008 } 5009 5010 /// Checks if a the given expression evaluates to null. 5011 /// 5012 /// Returns true if the value evaluates to null. 5013 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 5014 // If the expression has non-null type, it doesn't evaluate to null. 5015 if (auto nullability 5016 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { 5017 if (*nullability == NullabilityKind::NonNull) 5018 return false; 5019 } 5020 5021 // As a special case, transparent unions initialized with zero are 5022 // considered null for the purposes of the nonnull attribute. 5023 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 5024 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 5025 if (const CompoundLiteralExpr *CLE = 5026 dyn_cast<CompoundLiteralExpr>(Expr)) 5027 if (const InitListExpr *ILE = 5028 dyn_cast<InitListExpr>(CLE->getInitializer())) 5029 Expr = ILE->getInit(0); 5030 } 5031 5032 bool Result; 5033 return (!Expr->isValueDependent() && 5034 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 5035 !Result); 5036 } 5037 5038 static void CheckNonNullArgument(Sema &S, 5039 const Expr *ArgExpr, 5040 SourceLocation CallSiteLoc) { 5041 if (CheckNonNullExpr(S, ArgExpr)) 5042 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 5043 S.PDiag(diag::warn_null_arg) 5044 << ArgExpr->getSourceRange()); 5045 } 5046 5047 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 5048 FormatStringInfo FSI; 5049 if ((GetFormatStringType(Format) == FST_NSString) && 5050 getFormatStringInfo(Format, false, &FSI)) { 5051 Idx = FSI.FormatIdx; 5052 return true; 5053 } 5054 return false; 5055 } 5056 5057 /// Diagnose use of %s directive in an NSString which is being passed 5058 /// as formatting string to formatting method. 5059 static void 5060 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 5061 const NamedDecl *FDecl, 5062 Expr **Args, 5063 unsigned NumArgs) { 5064 unsigned Idx = 0; 5065 bool Format = false; 5066 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 5067 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 5068 Idx = 2; 5069 Format = true; 5070 } 5071 else 5072 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 5073 if (S.GetFormatNSStringIdx(I, Idx)) { 5074 Format = true; 5075 break; 5076 } 5077 } 5078 if (!Format || NumArgs <= Idx) 5079 return; 5080 const Expr *FormatExpr = Args[Idx]; 5081 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 5082 FormatExpr = CSCE->getSubExpr(); 5083 const StringLiteral *FormatString; 5084 if (const ObjCStringLiteral *OSL = 5085 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 5086 FormatString = OSL->getString(); 5087 else 5088 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 5089 if (!FormatString) 5090 return; 5091 if (S.FormatStringHasSArg(FormatString)) { 5092 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 5093 << "%s" << 1 << 1; 5094 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 5095 << FDecl->getDeclName(); 5096 } 5097 } 5098 5099 /// Determine whether the given type has a non-null nullability annotation. 5100 static bool isNonNullType(ASTContext &ctx, QualType type) { 5101 if (auto nullability = type->getNullability(ctx)) 5102 return *nullability == NullabilityKind::NonNull; 5103 5104 return false; 5105 } 5106 5107 static void CheckNonNullArguments(Sema &S, 5108 const NamedDecl *FDecl, 5109 const FunctionProtoType *Proto, 5110 ArrayRef<const Expr *> Args, 5111 SourceLocation CallSiteLoc) { 5112 assert((FDecl || Proto) && "Need a function declaration or prototype"); 5113 5114 // Already checked by by constant evaluator. 5115 if (S.isConstantEvaluated()) 5116 return; 5117 // Check the attributes attached to the method/function itself. 5118 llvm::SmallBitVector NonNullArgs; 5119 if (FDecl) { 5120 // Handle the nonnull attribute on the function/method declaration itself. 5121 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 5122 if (!NonNull->args_size()) { 5123 // Easy case: all pointer arguments are nonnull. 5124 for (const auto *Arg : Args) 5125 if (S.isValidPointerAttrType(Arg->getType())) 5126 CheckNonNullArgument(S, Arg, CallSiteLoc); 5127 return; 5128 } 5129 5130 for (const ParamIdx &Idx : NonNull->args()) { 5131 unsigned IdxAST = Idx.getASTIndex(); 5132 if (IdxAST >= Args.size()) 5133 continue; 5134 if (NonNullArgs.empty()) 5135 NonNullArgs.resize(Args.size()); 5136 NonNullArgs.set(IdxAST); 5137 } 5138 } 5139 } 5140 5141 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 5142 // Handle the nonnull attribute on the parameters of the 5143 // function/method. 5144 ArrayRef<ParmVarDecl*> parms; 5145 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 5146 parms = FD->parameters(); 5147 else 5148 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 5149 5150 unsigned ParamIndex = 0; 5151 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 5152 I != E; ++I, ++ParamIndex) { 5153 const ParmVarDecl *PVD = *I; 5154 if (PVD->hasAttr<NonNullAttr>() || 5155 isNonNullType(S.Context, PVD->getType())) { 5156 if (NonNullArgs.empty()) 5157 NonNullArgs.resize(Args.size()); 5158 5159 NonNullArgs.set(ParamIndex); 5160 } 5161 } 5162 } else { 5163 // If we have a non-function, non-method declaration but no 5164 // function prototype, try to dig out the function prototype. 5165 if (!Proto) { 5166 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 5167 QualType type = VD->getType().getNonReferenceType(); 5168 if (auto pointerType = type->getAs<PointerType>()) 5169 type = pointerType->getPointeeType(); 5170 else if (auto blockType = type->getAs<BlockPointerType>()) 5171 type = blockType->getPointeeType(); 5172 // FIXME: data member pointers? 5173 5174 // Dig out the function prototype, if there is one. 5175 Proto = type->getAs<FunctionProtoType>(); 5176 } 5177 } 5178 5179 // Fill in non-null argument information from the nullability 5180 // information on the parameter types (if we have them). 5181 if (Proto) { 5182 unsigned Index = 0; 5183 for (auto paramType : Proto->getParamTypes()) { 5184 if (isNonNullType(S.Context, paramType)) { 5185 if (NonNullArgs.empty()) 5186 NonNullArgs.resize(Args.size()); 5187 5188 NonNullArgs.set(Index); 5189 } 5190 5191 ++Index; 5192 } 5193 } 5194 } 5195 5196 // Check for non-null arguments. 5197 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 5198 ArgIndex != ArgIndexEnd; ++ArgIndex) { 5199 if (NonNullArgs[ArgIndex]) 5200 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc); 5201 } 5202 } 5203 5204 /// Warn if a pointer or reference argument passed to a function points to an 5205 /// object that is less aligned than the parameter. This can happen when 5206 /// creating a typedef with a lower alignment than the original type and then 5207 /// calling functions defined in terms of the original type. 5208 void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, 5209 StringRef ParamName, QualType ArgTy, 5210 QualType ParamTy) { 5211 5212 // If a function accepts a pointer or reference type 5213 if (!ParamTy->isPointerType() && !ParamTy->isReferenceType()) 5214 return; 5215 5216 // If the parameter is a pointer type, get the pointee type for the 5217 // argument too. If the parameter is a reference type, don't try to get 5218 // the pointee type for the argument. 5219 if (ParamTy->isPointerType()) 5220 ArgTy = ArgTy->getPointeeType(); 5221 5222 // Remove reference or pointer 5223 ParamTy = ParamTy->getPointeeType(); 5224 5225 // Find expected alignment, and the actual alignment of the passed object. 5226 // getTypeAlignInChars requires complete types 5227 if (ArgTy.isNull() || ParamTy->isIncompleteType() || 5228 ArgTy->isIncompleteType() || ParamTy->isUndeducedType() || 5229 ArgTy->isUndeducedType()) 5230 return; 5231 5232 CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy); 5233 CharUnits ArgAlign = Context.getTypeAlignInChars(ArgTy); 5234 5235 // If the argument is less aligned than the parameter, there is a 5236 // potential alignment issue. 5237 if (ArgAlign < ParamAlign) 5238 Diag(Loc, diag::warn_param_mismatched_alignment) 5239 << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity() 5240 << ParamName << (FDecl != nullptr) << FDecl; 5241 } 5242 5243 /// Handles the checks for format strings, non-POD arguments to vararg 5244 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if 5245 /// attributes. 5246 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 5247 const Expr *ThisArg, ArrayRef<const Expr *> Args, 5248 bool IsMemberFunction, SourceLocation Loc, 5249 SourceRange Range, VariadicCallType CallType) { 5250 // FIXME: We should check as much as we can in the template definition. 5251 if (CurContext->isDependentContext()) 5252 return; 5253 5254 // Printf and scanf checking. 5255 llvm::SmallBitVector CheckedVarArgs; 5256 if (FDecl) { 5257 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 5258 // Only create vector if there are format attributes. 5259 CheckedVarArgs.resize(Args.size()); 5260 5261 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 5262 CheckedVarArgs); 5263 } 5264 } 5265 5266 // Refuse POD arguments that weren't caught by the format string 5267 // checks above. 5268 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 5269 if (CallType != VariadicDoesNotApply && 5270 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 5271 unsigned NumParams = Proto ? Proto->getNumParams() 5272 : FDecl && isa<FunctionDecl>(FDecl) 5273 ? cast<FunctionDecl>(FDecl)->getNumParams() 5274 : FDecl && isa<ObjCMethodDecl>(FDecl) 5275 ? cast<ObjCMethodDecl>(FDecl)->param_size() 5276 : 0; 5277 5278 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 5279 // Args[ArgIdx] can be null in malformed code. 5280 if (const Expr *Arg = Args[ArgIdx]) { 5281 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 5282 checkVariadicArgument(Arg, CallType); 5283 } 5284 } 5285 } 5286 5287 if (FDecl || Proto) { 5288 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 5289 5290 // Type safety checking. 5291 if (FDecl) { 5292 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 5293 CheckArgumentWithTypeTag(I, Args, Loc); 5294 } 5295 } 5296 5297 // Check that passed arguments match the alignment of original arguments. 5298 // Try to get the missing prototype from the declaration. 5299 if (!Proto && FDecl) { 5300 const auto *FT = FDecl->getFunctionType(); 5301 if (isa_and_nonnull<FunctionProtoType>(FT)) 5302 Proto = cast<FunctionProtoType>(FDecl->getFunctionType()); 5303 } 5304 if (Proto) { 5305 // For variadic functions, we may have more args than parameters. 5306 // For some K&R functions, we may have less args than parameters. 5307 const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size()); 5308 for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) { 5309 // Args[ArgIdx] can be null in malformed code. 5310 if (const Expr *Arg = Args[ArgIdx]) { 5311 if (Arg->containsErrors()) 5312 continue; 5313 5314 QualType ParamTy = Proto->getParamType(ArgIdx); 5315 QualType ArgTy = Arg->getType(); 5316 CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1), 5317 ArgTy, ParamTy); 5318 } 5319 } 5320 } 5321 5322 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) { 5323 auto *AA = FDecl->getAttr<AllocAlignAttr>(); 5324 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()]; 5325 if (!Arg->isValueDependent()) { 5326 Expr::EvalResult Align; 5327 if (Arg->EvaluateAsInt(Align, Context)) { 5328 const llvm::APSInt &I = Align.Val.getInt(); 5329 if (!I.isPowerOf2()) 5330 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two) 5331 << Arg->getSourceRange(); 5332 5333 if (I > Sema::MaximumAlignment) 5334 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great) 5335 << Arg->getSourceRange() << Sema::MaximumAlignment; 5336 } 5337 } 5338 } 5339 5340 if (FD) 5341 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 5342 } 5343 5344 /// CheckConstructorCall - Check a constructor call for correctness and safety 5345 /// properties not enforced by the C type system. 5346 void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, 5347 ArrayRef<const Expr *> Args, 5348 const FunctionProtoType *Proto, 5349 SourceLocation Loc) { 5350 VariadicCallType CallType = 5351 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 5352 5353 auto *Ctor = cast<CXXConstructorDecl>(FDecl); 5354 CheckArgAlignment(Loc, FDecl, "'this'", Context.getPointerType(ThisType), 5355 Context.getPointerType(Ctor->getThisObjectType())); 5356 5357 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 5358 Loc, SourceRange(), CallType); 5359 } 5360 5361 /// CheckFunctionCall - Check a direct function call for various correctness 5362 /// and safety properties not strictly enforced by the C type system. 5363 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 5364 const FunctionProtoType *Proto) { 5365 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 5366 isa<CXXMethodDecl>(FDecl); 5367 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 5368 IsMemberOperatorCall; 5369 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 5370 TheCall->getCallee()); 5371 Expr** Args = TheCall->getArgs(); 5372 unsigned NumArgs = TheCall->getNumArgs(); 5373 5374 Expr *ImplicitThis = nullptr; 5375 if (IsMemberOperatorCall) { 5376 // If this is a call to a member operator, hide the first argument 5377 // from checkCall. 5378 // FIXME: Our choice of AST representation here is less than ideal. 5379 ImplicitThis = Args[0]; 5380 ++Args; 5381 --NumArgs; 5382 } else if (IsMemberFunction) 5383 ImplicitThis = 5384 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 5385 5386 if (ImplicitThis) { 5387 // ImplicitThis may or may not be a pointer, depending on whether . or -> is 5388 // used. 5389 QualType ThisType = ImplicitThis->getType(); 5390 if (!ThisType->isPointerType()) { 5391 assert(!ThisType->isReferenceType()); 5392 ThisType = Context.getPointerType(ThisType); 5393 } 5394 5395 QualType ThisTypeFromDecl = 5396 Context.getPointerType(cast<CXXMethodDecl>(FDecl)->getThisObjectType()); 5397 5398 CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType, 5399 ThisTypeFromDecl); 5400 } 5401 5402 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs), 5403 IsMemberFunction, TheCall->getRParenLoc(), 5404 TheCall->getCallee()->getSourceRange(), CallType); 5405 5406 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 5407 // None of the checks below are needed for functions that don't have 5408 // simple names (e.g., C++ conversion functions). 5409 if (!FnInfo) 5410 return false; 5411 5412 CheckTCBEnforcement(TheCall, FDecl); 5413 5414 CheckAbsoluteValueFunction(TheCall, FDecl); 5415 CheckMaxUnsignedZero(TheCall, FDecl); 5416 5417 if (getLangOpts().ObjC) 5418 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 5419 5420 unsigned CMId = FDecl->getMemoryFunctionKind(); 5421 5422 // Handle memory setting and copying functions. 5423 switch (CMId) { 5424 case 0: 5425 return false; 5426 case Builtin::BIstrlcpy: // fallthrough 5427 case Builtin::BIstrlcat: 5428 CheckStrlcpycatArguments(TheCall, FnInfo); 5429 break; 5430 case Builtin::BIstrncat: 5431 CheckStrncatArguments(TheCall, FnInfo); 5432 break; 5433 case Builtin::BIfree: 5434 CheckFreeArguments(TheCall); 5435 break; 5436 default: 5437 CheckMemaccessArguments(TheCall, CMId, FnInfo); 5438 } 5439 5440 return false; 5441 } 5442 5443 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 5444 ArrayRef<const Expr *> Args) { 5445 VariadicCallType CallType = 5446 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 5447 5448 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 5449 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 5450 CallType); 5451 5452 return false; 5453 } 5454 5455 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 5456 const FunctionProtoType *Proto) { 5457 QualType Ty; 5458 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 5459 Ty = V->getType().getNonReferenceType(); 5460 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 5461 Ty = F->getType().getNonReferenceType(); 5462 else 5463 return false; 5464 5465 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 5466 !Ty->isFunctionProtoType()) 5467 return false; 5468 5469 VariadicCallType CallType; 5470 if (!Proto || !Proto->isVariadic()) { 5471 CallType = VariadicDoesNotApply; 5472 } else if (Ty->isBlockPointerType()) { 5473 CallType = VariadicBlock; 5474 } else { // Ty->isFunctionPointerType() 5475 CallType = VariadicFunction; 5476 } 5477 5478 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 5479 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 5480 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 5481 TheCall->getCallee()->getSourceRange(), CallType); 5482 5483 return false; 5484 } 5485 5486 /// Checks function calls when a FunctionDecl or a NamedDecl is not available, 5487 /// such as function pointers returned from functions. 5488 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 5489 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 5490 TheCall->getCallee()); 5491 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 5492 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 5493 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 5494 TheCall->getCallee()->getSourceRange(), CallType); 5495 5496 return false; 5497 } 5498 5499 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 5500 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 5501 return false; 5502 5503 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 5504 switch (Op) { 5505 case AtomicExpr::AO__c11_atomic_init: 5506 case AtomicExpr::AO__opencl_atomic_init: 5507 llvm_unreachable("There is no ordering argument for an init"); 5508 5509 case AtomicExpr::AO__c11_atomic_load: 5510 case AtomicExpr::AO__opencl_atomic_load: 5511 case AtomicExpr::AO__hip_atomic_load: 5512 case AtomicExpr::AO__atomic_load_n: 5513 case AtomicExpr::AO__atomic_load: 5514 return OrderingCABI != llvm::AtomicOrderingCABI::release && 5515 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 5516 5517 case AtomicExpr::AO__c11_atomic_store: 5518 case AtomicExpr::AO__opencl_atomic_store: 5519 case AtomicExpr::AO__hip_atomic_store: 5520 case AtomicExpr::AO__atomic_store: 5521 case AtomicExpr::AO__atomic_store_n: 5522 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 5523 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 5524 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 5525 5526 default: 5527 return true; 5528 } 5529 } 5530 5531 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 5532 AtomicExpr::AtomicOp Op) { 5533 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 5534 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 5535 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()}; 5536 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()}, 5537 DRE->getSourceRange(), TheCall->getRParenLoc(), Args, 5538 Op); 5539 } 5540 5541 ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, 5542 SourceLocation RParenLoc, MultiExprArg Args, 5543 AtomicExpr::AtomicOp Op, 5544 AtomicArgumentOrder ArgOrder) { 5545 // All the non-OpenCL operations take one of the following forms. 5546 // The OpenCL operations take the __c11 forms with one extra argument for 5547 // synchronization scope. 5548 enum { 5549 // C __c11_atomic_init(A *, C) 5550 Init, 5551 5552 // C __c11_atomic_load(A *, int) 5553 Load, 5554 5555 // void __atomic_load(A *, CP, int) 5556 LoadCopy, 5557 5558 // void __atomic_store(A *, CP, int) 5559 Copy, 5560 5561 // C __c11_atomic_add(A *, M, int) 5562 Arithmetic, 5563 5564 // C __atomic_exchange_n(A *, CP, int) 5565 Xchg, 5566 5567 // void __atomic_exchange(A *, C *, CP, int) 5568 GNUXchg, 5569 5570 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 5571 C11CmpXchg, 5572 5573 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 5574 GNUCmpXchg 5575 } Form = Init; 5576 5577 const unsigned NumForm = GNUCmpXchg + 1; 5578 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 5579 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 5580 // where: 5581 // C is an appropriate type, 5582 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 5583 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 5584 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 5585 // the int parameters are for orderings. 5586 5587 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 5588 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 5589 "need to update code for modified forms"); 5590 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 5591 AtomicExpr::AO__c11_atomic_fetch_min + 1 == 5592 AtomicExpr::AO__atomic_load, 5593 "need to update code for modified C11 atomics"); 5594 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 5595 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 5596 bool IsHIP = Op >= AtomicExpr::AO__hip_atomic_load && 5597 Op <= AtomicExpr::AO__hip_atomic_fetch_max; 5598 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 5599 Op <= AtomicExpr::AO__c11_atomic_fetch_min) || 5600 IsOpenCL; 5601 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 5602 Op == AtomicExpr::AO__atomic_store_n || 5603 Op == AtomicExpr::AO__atomic_exchange_n || 5604 Op == AtomicExpr::AO__atomic_compare_exchange_n; 5605 bool IsAddSub = false; 5606 5607 switch (Op) { 5608 case AtomicExpr::AO__c11_atomic_init: 5609 case AtomicExpr::AO__opencl_atomic_init: 5610 Form = Init; 5611 break; 5612 5613 case AtomicExpr::AO__c11_atomic_load: 5614 case AtomicExpr::AO__opencl_atomic_load: 5615 case AtomicExpr::AO__hip_atomic_load: 5616 case AtomicExpr::AO__atomic_load_n: 5617 Form = Load; 5618 break; 5619 5620 case AtomicExpr::AO__atomic_load: 5621 Form = LoadCopy; 5622 break; 5623 5624 case AtomicExpr::AO__c11_atomic_store: 5625 case AtomicExpr::AO__opencl_atomic_store: 5626 case AtomicExpr::AO__hip_atomic_store: 5627 case AtomicExpr::AO__atomic_store: 5628 case AtomicExpr::AO__atomic_store_n: 5629 Form = Copy; 5630 break; 5631 case AtomicExpr::AO__hip_atomic_fetch_add: 5632 case AtomicExpr::AO__hip_atomic_fetch_min: 5633 case AtomicExpr::AO__hip_atomic_fetch_max: 5634 case AtomicExpr::AO__c11_atomic_fetch_add: 5635 case AtomicExpr::AO__c11_atomic_fetch_sub: 5636 case AtomicExpr::AO__opencl_atomic_fetch_add: 5637 case AtomicExpr::AO__opencl_atomic_fetch_sub: 5638 case AtomicExpr::AO__atomic_fetch_add: 5639 case AtomicExpr::AO__atomic_fetch_sub: 5640 case AtomicExpr::AO__atomic_add_fetch: 5641 case AtomicExpr::AO__atomic_sub_fetch: 5642 IsAddSub = true; 5643 Form = Arithmetic; 5644 break; 5645 case AtomicExpr::AO__c11_atomic_fetch_and: 5646 case AtomicExpr::AO__c11_atomic_fetch_or: 5647 case AtomicExpr::AO__c11_atomic_fetch_xor: 5648 case AtomicExpr::AO__hip_atomic_fetch_and: 5649 case AtomicExpr::AO__hip_atomic_fetch_or: 5650 case AtomicExpr::AO__hip_atomic_fetch_xor: 5651 case AtomicExpr::AO__c11_atomic_fetch_nand: 5652 case AtomicExpr::AO__opencl_atomic_fetch_and: 5653 case AtomicExpr::AO__opencl_atomic_fetch_or: 5654 case AtomicExpr::AO__opencl_atomic_fetch_xor: 5655 case AtomicExpr::AO__atomic_fetch_and: 5656 case AtomicExpr::AO__atomic_fetch_or: 5657 case AtomicExpr::AO__atomic_fetch_xor: 5658 case AtomicExpr::AO__atomic_fetch_nand: 5659 case AtomicExpr::AO__atomic_and_fetch: 5660 case AtomicExpr::AO__atomic_or_fetch: 5661 case AtomicExpr::AO__atomic_xor_fetch: 5662 case AtomicExpr::AO__atomic_nand_fetch: 5663 Form = Arithmetic; 5664 break; 5665 case AtomicExpr::AO__c11_atomic_fetch_min: 5666 case AtomicExpr::AO__c11_atomic_fetch_max: 5667 case AtomicExpr::AO__opencl_atomic_fetch_min: 5668 case AtomicExpr::AO__opencl_atomic_fetch_max: 5669 case AtomicExpr::AO__atomic_min_fetch: 5670 case AtomicExpr::AO__atomic_max_fetch: 5671 case AtomicExpr::AO__atomic_fetch_min: 5672 case AtomicExpr::AO__atomic_fetch_max: 5673 Form = Arithmetic; 5674 break; 5675 5676 case AtomicExpr::AO__c11_atomic_exchange: 5677 case AtomicExpr::AO__hip_atomic_exchange: 5678 case AtomicExpr::AO__opencl_atomic_exchange: 5679 case AtomicExpr::AO__atomic_exchange_n: 5680 Form = Xchg; 5681 break; 5682 5683 case AtomicExpr::AO__atomic_exchange: 5684 Form = GNUXchg; 5685 break; 5686 5687 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 5688 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 5689 case AtomicExpr::AO__hip_atomic_compare_exchange_strong: 5690 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 5691 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 5692 case AtomicExpr::AO__hip_atomic_compare_exchange_weak: 5693 Form = C11CmpXchg; 5694 break; 5695 5696 case AtomicExpr::AO__atomic_compare_exchange: 5697 case AtomicExpr::AO__atomic_compare_exchange_n: 5698 Form = GNUCmpXchg; 5699 break; 5700 } 5701 5702 unsigned AdjustedNumArgs = NumArgs[Form]; 5703 if ((IsOpenCL || IsHIP) && Op != AtomicExpr::AO__opencl_atomic_init) 5704 ++AdjustedNumArgs; 5705 // Check we have the right number of arguments. 5706 if (Args.size() < AdjustedNumArgs) { 5707 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args) 5708 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 5709 << ExprRange; 5710 return ExprError(); 5711 } else if (Args.size() > AdjustedNumArgs) { 5712 Diag(Args[AdjustedNumArgs]->getBeginLoc(), 5713 diag::err_typecheck_call_too_many_args) 5714 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 5715 << ExprRange; 5716 return ExprError(); 5717 } 5718 5719 // Inspect the first argument of the atomic operation. 5720 Expr *Ptr = Args[0]; 5721 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 5722 if (ConvertedPtr.isInvalid()) 5723 return ExprError(); 5724 5725 Ptr = ConvertedPtr.get(); 5726 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 5727 if (!pointerType) { 5728 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer) 5729 << Ptr->getType() << Ptr->getSourceRange(); 5730 return ExprError(); 5731 } 5732 5733 // For a __c11 builtin, this should be a pointer to an _Atomic type. 5734 QualType AtomTy = pointerType->getPointeeType(); // 'A' 5735 QualType ValType = AtomTy; // 'C' 5736 if (IsC11) { 5737 if (!AtomTy->isAtomicType()) { 5738 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic) 5739 << Ptr->getType() << Ptr->getSourceRange(); 5740 return ExprError(); 5741 } 5742 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 5743 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 5744 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic) 5745 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 5746 << Ptr->getSourceRange(); 5747 return ExprError(); 5748 } 5749 ValType = AtomTy->castAs<AtomicType>()->getValueType(); 5750 } else if (Form != Load && Form != LoadCopy) { 5751 if (ValType.isConstQualified()) { 5752 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer) 5753 << Ptr->getType() << Ptr->getSourceRange(); 5754 return ExprError(); 5755 } 5756 } 5757 5758 // For an arithmetic operation, the implied arithmetic must be well-formed. 5759 if (Form == Arithmetic) { 5760 // GCC does not enforce these rules for GNU atomics, but we do to help catch 5761 // trivial type errors. 5762 auto IsAllowedValueType = [&](QualType ValType) { 5763 if (ValType->isIntegerType()) 5764 return true; 5765 if (ValType->isPointerType()) 5766 return true; 5767 if (!ValType->isFloatingType()) 5768 return false; 5769 // LLVM Parser does not allow atomicrmw with x86_fp80 type. 5770 if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) && 5771 &Context.getTargetInfo().getLongDoubleFormat() == 5772 &llvm::APFloat::x87DoubleExtended()) 5773 return false; 5774 return true; 5775 }; 5776 if (IsAddSub && !IsAllowedValueType(ValType)) { 5777 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_ptr_or_fp) 5778 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 5779 return ExprError(); 5780 } 5781 if (!IsAddSub && !ValType->isIntegerType()) { 5782 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int) 5783 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 5784 return ExprError(); 5785 } 5786 if (IsC11 && ValType->isPointerType() && 5787 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 5788 diag::err_incomplete_type)) { 5789 return ExprError(); 5790 } 5791 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 5792 // For __atomic_*_n operations, the value type must be a scalar integral or 5793 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 5794 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 5795 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 5796 return ExprError(); 5797 } 5798 5799 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 5800 !AtomTy->isScalarType()) { 5801 // For GNU atomics, require a trivially-copyable type. This is not part of 5802 // the GNU atomics specification but we enforce it for consistency with 5803 // other atomics which generally all require a trivially-copyable type. This 5804 // is because atomics just copy bits. 5805 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy) 5806 << Ptr->getType() << Ptr->getSourceRange(); 5807 return ExprError(); 5808 } 5809 5810 switch (ValType.getObjCLifetime()) { 5811 case Qualifiers::OCL_None: 5812 case Qualifiers::OCL_ExplicitNone: 5813 // okay 5814 break; 5815 5816 case Qualifiers::OCL_Weak: 5817 case Qualifiers::OCL_Strong: 5818 case Qualifiers::OCL_Autoreleasing: 5819 // FIXME: Can this happen? By this point, ValType should be known 5820 // to be trivially copyable. 5821 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership) 5822 << ValType << Ptr->getSourceRange(); 5823 return ExprError(); 5824 } 5825 5826 // All atomic operations have an overload which takes a pointer to a volatile 5827 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 5828 // into the result or the other operands. Similarly atomic_load takes a 5829 // pointer to a const 'A'. 5830 ValType.removeLocalVolatile(); 5831 ValType.removeLocalConst(); 5832 QualType ResultType = ValType; 5833 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 5834 Form == Init) 5835 ResultType = Context.VoidTy; 5836 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 5837 ResultType = Context.BoolTy; 5838 5839 // The type of a parameter passed 'by value'. In the GNU atomics, such 5840 // arguments are actually passed as pointers. 5841 QualType ByValType = ValType; // 'CP' 5842 bool IsPassedByAddress = false; 5843 if (!IsC11 && !IsHIP && !IsN) { 5844 ByValType = Ptr->getType(); 5845 IsPassedByAddress = true; 5846 } 5847 5848 SmallVector<Expr *, 5> APIOrderedArgs; 5849 if (ArgOrder == Sema::AtomicArgumentOrder::AST) { 5850 APIOrderedArgs.push_back(Args[0]); 5851 switch (Form) { 5852 case Init: 5853 case Load: 5854 APIOrderedArgs.push_back(Args[1]); // Val1/Order 5855 break; 5856 case LoadCopy: 5857 case Copy: 5858 case Arithmetic: 5859 case Xchg: 5860 APIOrderedArgs.push_back(Args[2]); // Val1 5861 APIOrderedArgs.push_back(Args[1]); // Order 5862 break; 5863 case GNUXchg: 5864 APIOrderedArgs.push_back(Args[2]); // Val1 5865 APIOrderedArgs.push_back(Args[3]); // Val2 5866 APIOrderedArgs.push_back(Args[1]); // Order 5867 break; 5868 case C11CmpXchg: 5869 APIOrderedArgs.push_back(Args[2]); // Val1 5870 APIOrderedArgs.push_back(Args[4]); // Val2 5871 APIOrderedArgs.push_back(Args[1]); // Order 5872 APIOrderedArgs.push_back(Args[3]); // OrderFail 5873 break; 5874 case GNUCmpXchg: 5875 APIOrderedArgs.push_back(Args[2]); // Val1 5876 APIOrderedArgs.push_back(Args[4]); // Val2 5877 APIOrderedArgs.push_back(Args[5]); // Weak 5878 APIOrderedArgs.push_back(Args[1]); // Order 5879 APIOrderedArgs.push_back(Args[3]); // OrderFail 5880 break; 5881 } 5882 } else 5883 APIOrderedArgs.append(Args.begin(), Args.end()); 5884 5885 // The first argument's non-CV pointer type is used to deduce the type of 5886 // subsequent arguments, except for: 5887 // - weak flag (always converted to bool) 5888 // - memory order (always converted to int) 5889 // - scope (always converted to int) 5890 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) { 5891 QualType Ty; 5892 if (i < NumVals[Form] + 1) { 5893 switch (i) { 5894 case 0: 5895 // The first argument is always a pointer. It has a fixed type. 5896 // It is always dereferenced, a nullptr is undefined. 5897 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 5898 // Nothing else to do: we already know all we want about this pointer. 5899 continue; 5900 case 1: 5901 // The second argument is the non-atomic operand. For arithmetic, this 5902 // is always passed by value, and for a compare_exchange it is always 5903 // passed by address. For the rest, GNU uses by-address and C11 uses 5904 // by-value. 5905 assert(Form != Load); 5906 if (Form == Arithmetic && ValType->isPointerType()) 5907 Ty = Context.getPointerDiffType(); 5908 else if (Form == Init || Form == Arithmetic) 5909 Ty = ValType; 5910 else if (Form == Copy || Form == Xchg) { 5911 if (IsPassedByAddress) { 5912 // The value pointer is always dereferenced, a nullptr is undefined. 5913 CheckNonNullArgument(*this, APIOrderedArgs[i], 5914 ExprRange.getBegin()); 5915 } 5916 Ty = ByValType; 5917 } else { 5918 Expr *ValArg = APIOrderedArgs[i]; 5919 // The value pointer is always dereferenced, a nullptr is undefined. 5920 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin()); 5921 LangAS AS = LangAS::Default; 5922 // Keep address space of non-atomic pointer type. 5923 if (const PointerType *PtrTy = 5924 ValArg->getType()->getAs<PointerType>()) { 5925 AS = PtrTy->getPointeeType().getAddressSpace(); 5926 } 5927 Ty = Context.getPointerType( 5928 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 5929 } 5930 break; 5931 case 2: 5932 // The third argument to compare_exchange / GNU exchange is the desired 5933 // value, either by-value (for the C11 and *_n variant) or as a pointer. 5934 if (IsPassedByAddress) 5935 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 5936 Ty = ByValType; 5937 break; 5938 case 3: 5939 // The fourth argument to GNU compare_exchange is a 'weak' flag. 5940 Ty = Context.BoolTy; 5941 break; 5942 } 5943 } else { 5944 // The order(s) and scope are always converted to int. 5945 Ty = Context.IntTy; 5946 } 5947 5948 InitializedEntity Entity = 5949 InitializedEntity::InitializeParameter(Context, Ty, false); 5950 ExprResult Arg = APIOrderedArgs[i]; 5951 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5952 if (Arg.isInvalid()) 5953 return true; 5954 APIOrderedArgs[i] = Arg.get(); 5955 } 5956 5957 // Permute the arguments into a 'consistent' order. 5958 SmallVector<Expr*, 5> SubExprs; 5959 SubExprs.push_back(Ptr); 5960 switch (Form) { 5961 case Init: 5962 // Note, AtomicExpr::getVal1() has a special case for this atomic. 5963 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5964 break; 5965 case Load: 5966 SubExprs.push_back(APIOrderedArgs[1]); // Order 5967 break; 5968 case LoadCopy: 5969 case Copy: 5970 case Arithmetic: 5971 case Xchg: 5972 SubExprs.push_back(APIOrderedArgs[2]); // Order 5973 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5974 break; 5975 case GNUXchg: 5976 // Note, AtomicExpr::getVal2() has a special case for this atomic. 5977 SubExprs.push_back(APIOrderedArgs[3]); // Order 5978 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5979 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5980 break; 5981 case C11CmpXchg: 5982 SubExprs.push_back(APIOrderedArgs[3]); // Order 5983 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5984 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail 5985 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5986 break; 5987 case GNUCmpXchg: 5988 SubExprs.push_back(APIOrderedArgs[4]); // Order 5989 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5990 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail 5991 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5992 SubExprs.push_back(APIOrderedArgs[3]); // Weak 5993 break; 5994 } 5995 5996 if (SubExprs.size() >= 2 && Form != Init) { 5997 if (Optional<llvm::APSInt> Result = 5998 SubExprs[1]->getIntegerConstantExpr(Context)) 5999 if (!isValidOrderingForOp(Result->getSExtValue(), Op)) 6000 Diag(SubExprs[1]->getBeginLoc(), 6001 diag::warn_atomic_op_has_invalid_memory_order) 6002 << SubExprs[1]->getSourceRange(); 6003 } 6004 6005 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 6006 auto *Scope = Args[Args.size() - 1]; 6007 if (Optional<llvm::APSInt> Result = 6008 Scope->getIntegerConstantExpr(Context)) { 6009 if (!ScopeModel->isValid(Result->getZExtValue())) 6010 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 6011 << Scope->getSourceRange(); 6012 } 6013 SubExprs.push_back(Scope); 6014 } 6015 6016 AtomicExpr *AE = new (Context) 6017 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc); 6018 6019 if ((Op == AtomicExpr::AO__c11_atomic_load || 6020 Op == AtomicExpr::AO__c11_atomic_store || 6021 Op == AtomicExpr::AO__opencl_atomic_load || 6022 Op == AtomicExpr::AO__hip_atomic_load || 6023 Op == AtomicExpr::AO__opencl_atomic_store || 6024 Op == AtomicExpr::AO__hip_atomic_store) && 6025 Context.AtomicUsesUnsupportedLibcall(AE)) 6026 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 6027 << ((Op == AtomicExpr::AO__c11_atomic_load || 6028 Op == AtomicExpr::AO__opencl_atomic_load || 6029 Op == AtomicExpr::AO__hip_atomic_load) 6030 ? 0 6031 : 1); 6032 6033 if (ValType->isBitIntType()) { 6034 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_bit_int_prohibit); 6035 return ExprError(); 6036 } 6037 6038 return AE; 6039 } 6040 6041 /// checkBuiltinArgument - Given a call to a builtin function, perform 6042 /// normal type-checking on the given argument, updating the call in 6043 /// place. This is useful when a builtin function requires custom 6044 /// type-checking for some of its arguments but not necessarily all of 6045 /// them. 6046 /// 6047 /// Returns true on error. 6048 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 6049 FunctionDecl *Fn = E->getDirectCallee(); 6050 assert(Fn && "builtin call without direct callee!"); 6051 6052 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 6053 InitializedEntity Entity = 6054 InitializedEntity::InitializeParameter(S.Context, Param); 6055 6056 ExprResult Arg = E->getArg(0); 6057 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 6058 if (Arg.isInvalid()) 6059 return true; 6060 6061 E->setArg(ArgIndex, Arg.get()); 6062 return false; 6063 } 6064 6065 /// We have a call to a function like __sync_fetch_and_add, which is an 6066 /// overloaded function based on the pointer type of its first argument. 6067 /// The main BuildCallExpr routines have already promoted the types of 6068 /// arguments because all of these calls are prototyped as void(...). 6069 /// 6070 /// This function goes through and does final semantic checking for these 6071 /// builtins, as well as generating any warnings. 6072 ExprResult 6073 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 6074 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 6075 Expr *Callee = TheCall->getCallee(); 6076 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 6077 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6078 6079 // Ensure that we have at least one argument to do type inference from. 6080 if (TheCall->getNumArgs() < 1) { 6081 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 6082 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange(); 6083 return ExprError(); 6084 } 6085 6086 // Inspect the first argument of the atomic builtin. This should always be 6087 // a pointer type, whose element is an integral scalar or pointer type. 6088 // Because it is a pointer type, we don't have to worry about any implicit 6089 // casts here. 6090 // FIXME: We don't allow floating point scalars as input. 6091 Expr *FirstArg = TheCall->getArg(0); 6092 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 6093 if (FirstArgResult.isInvalid()) 6094 return ExprError(); 6095 FirstArg = FirstArgResult.get(); 6096 TheCall->setArg(0, FirstArg); 6097 6098 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 6099 if (!pointerType) { 6100 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 6101 << FirstArg->getType() << FirstArg->getSourceRange(); 6102 return ExprError(); 6103 } 6104 6105 QualType ValType = pointerType->getPointeeType(); 6106 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 6107 !ValType->isBlockPointerType()) { 6108 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 6109 << FirstArg->getType() << FirstArg->getSourceRange(); 6110 return ExprError(); 6111 } 6112 6113 if (ValType.isConstQualified()) { 6114 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 6115 << FirstArg->getType() << FirstArg->getSourceRange(); 6116 return ExprError(); 6117 } 6118 6119 switch (ValType.getObjCLifetime()) { 6120 case Qualifiers::OCL_None: 6121 case Qualifiers::OCL_ExplicitNone: 6122 // okay 6123 break; 6124 6125 case Qualifiers::OCL_Weak: 6126 case Qualifiers::OCL_Strong: 6127 case Qualifiers::OCL_Autoreleasing: 6128 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 6129 << ValType << FirstArg->getSourceRange(); 6130 return ExprError(); 6131 } 6132 6133 // Strip any qualifiers off ValType. 6134 ValType = ValType.getUnqualifiedType(); 6135 6136 // The majority of builtins return a value, but a few have special return 6137 // types, so allow them to override appropriately below. 6138 QualType ResultType = ValType; 6139 6140 // We need to figure out which concrete builtin this maps onto. For example, 6141 // __sync_fetch_and_add with a 2 byte object turns into 6142 // __sync_fetch_and_add_2. 6143 #define BUILTIN_ROW(x) \ 6144 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 6145 Builtin::BI##x##_8, Builtin::BI##x##_16 } 6146 6147 static const unsigned BuiltinIndices[][5] = { 6148 BUILTIN_ROW(__sync_fetch_and_add), 6149 BUILTIN_ROW(__sync_fetch_and_sub), 6150 BUILTIN_ROW(__sync_fetch_and_or), 6151 BUILTIN_ROW(__sync_fetch_and_and), 6152 BUILTIN_ROW(__sync_fetch_and_xor), 6153 BUILTIN_ROW(__sync_fetch_and_nand), 6154 6155 BUILTIN_ROW(__sync_add_and_fetch), 6156 BUILTIN_ROW(__sync_sub_and_fetch), 6157 BUILTIN_ROW(__sync_and_and_fetch), 6158 BUILTIN_ROW(__sync_or_and_fetch), 6159 BUILTIN_ROW(__sync_xor_and_fetch), 6160 BUILTIN_ROW(__sync_nand_and_fetch), 6161 6162 BUILTIN_ROW(__sync_val_compare_and_swap), 6163 BUILTIN_ROW(__sync_bool_compare_and_swap), 6164 BUILTIN_ROW(__sync_lock_test_and_set), 6165 BUILTIN_ROW(__sync_lock_release), 6166 BUILTIN_ROW(__sync_swap) 6167 }; 6168 #undef BUILTIN_ROW 6169 6170 // Determine the index of the size. 6171 unsigned SizeIndex; 6172 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 6173 case 1: SizeIndex = 0; break; 6174 case 2: SizeIndex = 1; break; 6175 case 4: SizeIndex = 2; break; 6176 case 8: SizeIndex = 3; break; 6177 case 16: SizeIndex = 4; break; 6178 default: 6179 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 6180 << FirstArg->getType() << FirstArg->getSourceRange(); 6181 return ExprError(); 6182 } 6183 6184 // Each of these builtins has one pointer argument, followed by some number of 6185 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 6186 // that we ignore. Find out which row of BuiltinIndices to read from as well 6187 // as the number of fixed args. 6188 unsigned BuiltinID = FDecl->getBuiltinID(); 6189 unsigned BuiltinIndex, NumFixed = 1; 6190 bool WarnAboutSemanticsChange = false; 6191 switch (BuiltinID) { 6192 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 6193 case Builtin::BI__sync_fetch_and_add: 6194 case Builtin::BI__sync_fetch_and_add_1: 6195 case Builtin::BI__sync_fetch_and_add_2: 6196 case Builtin::BI__sync_fetch_and_add_4: 6197 case Builtin::BI__sync_fetch_and_add_8: 6198 case Builtin::BI__sync_fetch_and_add_16: 6199 BuiltinIndex = 0; 6200 break; 6201 6202 case Builtin::BI__sync_fetch_and_sub: 6203 case Builtin::BI__sync_fetch_and_sub_1: 6204 case Builtin::BI__sync_fetch_and_sub_2: 6205 case Builtin::BI__sync_fetch_and_sub_4: 6206 case Builtin::BI__sync_fetch_and_sub_8: 6207 case Builtin::BI__sync_fetch_and_sub_16: 6208 BuiltinIndex = 1; 6209 break; 6210 6211 case Builtin::BI__sync_fetch_and_or: 6212 case Builtin::BI__sync_fetch_and_or_1: 6213 case Builtin::BI__sync_fetch_and_or_2: 6214 case Builtin::BI__sync_fetch_and_or_4: 6215 case Builtin::BI__sync_fetch_and_or_8: 6216 case Builtin::BI__sync_fetch_and_or_16: 6217 BuiltinIndex = 2; 6218 break; 6219 6220 case Builtin::BI__sync_fetch_and_and: 6221 case Builtin::BI__sync_fetch_and_and_1: 6222 case Builtin::BI__sync_fetch_and_and_2: 6223 case Builtin::BI__sync_fetch_and_and_4: 6224 case Builtin::BI__sync_fetch_and_and_8: 6225 case Builtin::BI__sync_fetch_and_and_16: 6226 BuiltinIndex = 3; 6227 break; 6228 6229 case Builtin::BI__sync_fetch_and_xor: 6230 case Builtin::BI__sync_fetch_and_xor_1: 6231 case Builtin::BI__sync_fetch_and_xor_2: 6232 case Builtin::BI__sync_fetch_and_xor_4: 6233 case Builtin::BI__sync_fetch_and_xor_8: 6234 case Builtin::BI__sync_fetch_and_xor_16: 6235 BuiltinIndex = 4; 6236 break; 6237 6238 case Builtin::BI__sync_fetch_and_nand: 6239 case Builtin::BI__sync_fetch_and_nand_1: 6240 case Builtin::BI__sync_fetch_and_nand_2: 6241 case Builtin::BI__sync_fetch_and_nand_4: 6242 case Builtin::BI__sync_fetch_and_nand_8: 6243 case Builtin::BI__sync_fetch_and_nand_16: 6244 BuiltinIndex = 5; 6245 WarnAboutSemanticsChange = true; 6246 break; 6247 6248 case Builtin::BI__sync_add_and_fetch: 6249 case Builtin::BI__sync_add_and_fetch_1: 6250 case Builtin::BI__sync_add_and_fetch_2: 6251 case Builtin::BI__sync_add_and_fetch_4: 6252 case Builtin::BI__sync_add_and_fetch_8: 6253 case Builtin::BI__sync_add_and_fetch_16: 6254 BuiltinIndex = 6; 6255 break; 6256 6257 case Builtin::BI__sync_sub_and_fetch: 6258 case Builtin::BI__sync_sub_and_fetch_1: 6259 case Builtin::BI__sync_sub_and_fetch_2: 6260 case Builtin::BI__sync_sub_and_fetch_4: 6261 case Builtin::BI__sync_sub_and_fetch_8: 6262 case Builtin::BI__sync_sub_and_fetch_16: 6263 BuiltinIndex = 7; 6264 break; 6265 6266 case Builtin::BI__sync_and_and_fetch: 6267 case Builtin::BI__sync_and_and_fetch_1: 6268 case Builtin::BI__sync_and_and_fetch_2: 6269 case Builtin::BI__sync_and_and_fetch_4: 6270 case Builtin::BI__sync_and_and_fetch_8: 6271 case Builtin::BI__sync_and_and_fetch_16: 6272 BuiltinIndex = 8; 6273 break; 6274 6275 case Builtin::BI__sync_or_and_fetch: 6276 case Builtin::BI__sync_or_and_fetch_1: 6277 case Builtin::BI__sync_or_and_fetch_2: 6278 case Builtin::BI__sync_or_and_fetch_4: 6279 case Builtin::BI__sync_or_and_fetch_8: 6280 case Builtin::BI__sync_or_and_fetch_16: 6281 BuiltinIndex = 9; 6282 break; 6283 6284 case Builtin::BI__sync_xor_and_fetch: 6285 case Builtin::BI__sync_xor_and_fetch_1: 6286 case Builtin::BI__sync_xor_and_fetch_2: 6287 case Builtin::BI__sync_xor_and_fetch_4: 6288 case Builtin::BI__sync_xor_and_fetch_8: 6289 case Builtin::BI__sync_xor_and_fetch_16: 6290 BuiltinIndex = 10; 6291 break; 6292 6293 case Builtin::BI__sync_nand_and_fetch: 6294 case Builtin::BI__sync_nand_and_fetch_1: 6295 case Builtin::BI__sync_nand_and_fetch_2: 6296 case Builtin::BI__sync_nand_and_fetch_4: 6297 case Builtin::BI__sync_nand_and_fetch_8: 6298 case Builtin::BI__sync_nand_and_fetch_16: 6299 BuiltinIndex = 11; 6300 WarnAboutSemanticsChange = true; 6301 break; 6302 6303 case Builtin::BI__sync_val_compare_and_swap: 6304 case Builtin::BI__sync_val_compare_and_swap_1: 6305 case Builtin::BI__sync_val_compare_and_swap_2: 6306 case Builtin::BI__sync_val_compare_and_swap_4: 6307 case Builtin::BI__sync_val_compare_and_swap_8: 6308 case Builtin::BI__sync_val_compare_and_swap_16: 6309 BuiltinIndex = 12; 6310 NumFixed = 2; 6311 break; 6312 6313 case Builtin::BI__sync_bool_compare_and_swap: 6314 case Builtin::BI__sync_bool_compare_and_swap_1: 6315 case Builtin::BI__sync_bool_compare_and_swap_2: 6316 case Builtin::BI__sync_bool_compare_and_swap_4: 6317 case Builtin::BI__sync_bool_compare_and_swap_8: 6318 case Builtin::BI__sync_bool_compare_and_swap_16: 6319 BuiltinIndex = 13; 6320 NumFixed = 2; 6321 ResultType = Context.BoolTy; 6322 break; 6323 6324 case Builtin::BI__sync_lock_test_and_set: 6325 case Builtin::BI__sync_lock_test_and_set_1: 6326 case Builtin::BI__sync_lock_test_and_set_2: 6327 case Builtin::BI__sync_lock_test_and_set_4: 6328 case Builtin::BI__sync_lock_test_and_set_8: 6329 case Builtin::BI__sync_lock_test_and_set_16: 6330 BuiltinIndex = 14; 6331 break; 6332 6333 case Builtin::BI__sync_lock_release: 6334 case Builtin::BI__sync_lock_release_1: 6335 case Builtin::BI__sync_lock_release_2: 6336 case Builtin::BI__sync_lock_release_4: 6337 case Builtin::BI__sync_lock_release_8: 6338 case Builtin::BI__sync_lock_release_16: 6339 BuiltinIndex = 15; 6340 NumFixed = 0; 6341 ResultType = Context.VoidTy; 6342 break; 6343 6344 case Builtin::BI__sync_swap: 6345 case Builtin::BI__sync_swap_1: 6346 case Builtin::BI__sync_swap_2: 6347 case Builtin::BI__sync_swap_4: 6348 case Builtin::BI__sync_swap_8: 6349 case Builtin::BI__sync_swap_16: 6350 BuiltinIndex = 16; 6351 break; 6352 } 6353 6354 // Now that we know how many fixed arguments we expect, first check that we 6355 // have at least that many. 6356 if (TheCall->getNumArgs() < 1+NumFixed) { 6357 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 6358 << 0 << 1 + NumFixed << TheCall->getNumArgs() 6359 << Callee->getSourceRange(); 6360 return ExprError(); 6361 } 6362 6363 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 6364 << Callee->getSourceRange(); 6365 6366 if (WarnAboutSemanticsChange) { 6367 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 6368 << Callee->getSourceRange(); 6369 } 6370 6371 // Get the decl for the concrete builtin from this, we can tell what the 6372 // concrete integer type we should convert to is. 6373 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 6374 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 6375 FunctionDecl *NewBuiltinDecl; 6376 if (NewBuiltinID == BuiltinID) 6377 NewBuiltinDecl = FDecl; 6378 else { 6379 // Perform builtin lookup to avoid redeclaring it. 6380 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 6381 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 6382 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 6383 assert(Res.getFoundDecl()); 6384 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 6385 if (!NewBuiltinDecl) 6386 return ExprError(); 6387 } 6388 6389 // The first argument --- the pointer --- has a fixed type; we 6390 // deduce the types of the rest of the arguments accordingly. Walk 6391 // the remaining arguments, converting them to the deduced value type. 6392 for (unsigned i = 0; i != NumFixed; ++i) { 6393 ExprResult Arg = TheCall->getArg(i+1); 6394 6395 // GCC does an implicit conversion to the pointer or integer ValType. This 6396 // can fail in some cases (1i -> int**), check for this error case now. 6397 // Initialize the argument. 6398 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 6399 ValType, /*consume*/ false); 6400 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6401 if (Arg.isInvalid()) 6402 return ExprError(); 6403 6404 // Okay, we have something that *can* be converted to the right type. Check 6405 // to see if there is a potentially weird extension going on here. This can 6406 // happen when you do an atomic operation on something like an char* and 6407 // pass in 42. The 42 gets converted to char. This is even more strange 6408 // for things like 45.123 -> char, etc. 6409 // FIXME: Do this check. 6410 TheCall->setArg(i+1, Arg.get()); 6411 } 6412 6413 // Create a new DeclRefExpr to refer to the new decl. 6414 DeclRefExpr *NewDRE = DeclRefExpr::Create( 6415 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, 6416 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, 6417 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse()); 6418 6419 // Set the callee in the CallExpr. 6420 // FIXME: This loses syntactic information. 6421 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 6422 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 6423 CK_BuiltinFnToFnPtr); 6424 TheCall->setCallee(PromotedCall.get()); 6425 6426 // Change the result type of the call to match the original value type. This 6427 // is arbitrary, but the codegen for these builtins ins design to handle it 6428 // gracefully. 6429 TheCall->setType(ResultType); 6430 6431 // Prohibit problematic uses of bit-precise integer types with atomic 6432 // builtins. The arguments would have already been converted to the first 6433 // argument's type, so only need to check the first argument. 6434 const auto *BitIntValType = ValType->getAs<BitIntType>(); 6435 if (BitIntValType && !llvm::isPowerOf2_64(BitIntValType->getNumBits())) { 6436 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size); 6437 return ExprError(); 6438 } 6439 6440 return TheCallResult; 6441 } 6442 6443 /// SemaBuiltinNontemporalOverloaded - We have a call to 6444 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 6445 /// overloaded function based on the pointer type of its last argument. 6446 /// 6447 /// This function goes through and does final semantic checking for these 6448 /// builtins. 6449 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 6450 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 6451 DeclRefExpr *DRE = 6452 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6453 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6454 unsigned BuiltinID = FDecl->getBuiltinID(); 6455 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 6456 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 6457 "Unexpected nontemporal load/store builtin!"); 6458 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 6459 unsigned numArgs = isStore ? 2 : 1; 6460 6461 // Ensure that we have the proper number of arguments. 6462 if (checkArgCount(*this, TheCall, numArgs)) 6463 return ExprError(); 6464 6465 // Inspect the last argument of the nontemporal builtin. This should always 6466 // be a pointer type, from which we imply the type of the memory access. 6467 // Because it is a pointer type, we don't have to worry about any implicit 6468 // casts here. 6469 Expr *PointerArg = TheCall->getArg(numArgs - 1); 6470 ExprResult PointerArgResult = 6471 DefaultFunctionArrayLvalueConversion(PointerArg); 6472 6473 if (PointerArgResult.isInvalid()) 6474 return ExprError(); 6475 PointerArg = PointerArgResult.get(); 6476 TheCall->setArg(numArgs - 1, PointerArg); 6477 6478 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 6479 if (!pointerType) { 6480 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 6481 << PointerArg->getType() << PointerArg->getSourceRange(); 6482 return ExprError(); 6483 } 6484 6485 QualType ValType = pointerType->getPointeeType(); 6486 6487 // Strip any qualifiers off ValType. 6488 ValType = ValType.getUnqualifiedType(); 6489 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 6490 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 6491 !ValType->isVectorType()) { 6492 Diag(DRE->getBeginLoc(), 6493 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 6494 << PointerArg->getType() << PointerArg->getSourceRange(); 6495 return ExprError(); 6496 } 6497 6498 if (!isStore) { 6499 TheCall->setType(ValType); 6500 return TheCallResult; 6501 } 6502 6503 ExprResult ValArg = TheCall->getArg(0); 6504 InitializedEntity Entity = InitializedEntity::InitializeParameter( 6505 Context, ValType, /*consume*/ false); 6506 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 6507 if (ValArg.isInvalid()) 6508 return ExprError(); 6509 6510 TheCall->setArg(0, ValArg.get()); 6511 TheCall->setType(Context.VoidTy); 6512 return TheCallResult; 6513 } 6514 6515 /// CheckObjCString - Checks that the argument to the builtin 6516 /// CFString constructor is correct 6517 /// Note: It might also make sense to do the UTF-16 conversion here (would 6518 /// simplify the backend). 6519 bool Sema::CheckObjCString(Expr *Arg) { 6520 Arg = Arg->IgnoreParenCasts(); 6521 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 6522 6523 if (!Literal || !Literal->isAscii()) { 6524 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 6525 << Arg->getSourceRange(); 6526 return true; 6527 } 6528 6529 if (Literal->containsNonAsciiOrNull()) { 6530 StringRef String = Literal->getString(); 6531 unsigned NumBytes = String.size(); 6532 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 6533 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 6534 llvm::UTF16 *ToPtr = &ToBuf[0]; 6535 6536 llvm::ConversionResult Result = 6537 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 6538 ToPtr + NumBytes, llvm::strictConversion); 6539 // Check for conversion failure. 6540 if (Result != llvm::conversionOK) 6541 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 6542 << Arg->getSourceRange(); 6543 } 6544 return false; 6545 } 6546 6547 /// CheckObjCString - Checks that the format string argument to the os_log() 6548 /// and os_trace() functions is correct, and converts it to const char *. 6549 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 6550 Arg = Arg->IgnoreParenCasts(); 6551 auto *Literal = dyn_cast<StringLiteral>(Arg); 6552 if (!Literal) { 6553 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 6554 Literal = ObjcLiteral->getString(); 6555 } 6556 } 6557 6558 if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) { 6559 return ExprError( 6560 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 6561 << Arg->getSourceRange()); 6562 } 6563 6564 ExprResult Result(Literal); 6565 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 6566 InitializedEntity Entity = 6567 InitializedEntity::InitializeParameter(Context, ResultTy, false); 6568 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 6569 return Result; 6570 } 6571 6572 /// Check that the user is calling the appropriate va_start builtin for the 6573 /// target and calling convention. 6574 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 6575 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 6576 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 6577 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 || 6578 TT.getArch() == llvm::Triple::aarch64_32); 6579 bool IsWindows = TT.isOSWindows(); 6580 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 6581 if (IsX64 || IsAArch64) { 6582 CallingConv CC = CC_C; 6583 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 6584 CC = FD->getType()->castAs<FunctionType>()->getCallConv(); 6585 if (IsMSVAStart) { 6586 // Don't allow this in System V ABI functions. 6587 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 6588 return S.Diag(Fn->getBeginLoc(), 6589 diag::err_ms_va_start_used_in_sysv_function); 6590 } else { 6591 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 6592 // On x64 Windows, don't allow this in System V ABI functions. 6593 // (Yes, that means there's no corresponding way to support variadic 6594 // System V ABI functions on Windows.) 6595 if ((IsWindows && CC == CC_X86_64SysV) || 6596 (!IsWindows && CC == CC_Win64)) 6597 return S.Diag(Fn->getBeginLoc(), 6598 diag::err_va_start_used_in_wrong_abi_function) 6599 << !IsWindows; 6600 } 6601 return false; 6602 } 6603 6604 if (IsMSVAStart) 6605 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 6606 return false; 6607 } 6608 6609 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 6610 ParmVarDecl **LastParam = nullptr) { 6611 // Determine whether the current function, block, or obj-c method is variadic 6612 // and get its parameter list. 6613 bool IsVariadic = false; 6614 ArrayRef<ParmVarDecl *> Params; 6615 DeclContext *Caller = S.CurContext; 6616 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 6617 IsVariadic = Block->isVariadic(); 6618 Params = Block->parameters(); 6619 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 6620 IsVariadic = FD->isVariadic(); 6621 Params = FD->parameters(); 6622 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 6623 IsVariadic = MD->isVariadic(); 6624 // FIXME: This isn't correct for methods (results in bogus warning). 6625 Params = MD->parameters(); 6626 } else if (isa<CapturedDecl>(Caller)) { 6627 // We don't support va_start in a CapturedDecl. 6628 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 6629 return true; 6630 } else { 6631 // This must be some other declcontext that parses exprs. 6632 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 6633 return true; 6634 } 6635 6636 if (!IsVariadic) { 6637 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 6638 return true; 6639 } 6640 6641 if (LastParam) 6642 *LastParam = Params.empty() ? nullptr : Params.back(); 6643 6644 return false; 6645 } 6646 6647 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 6648 /// for validity. Emit an error and return true on failure; return false 6649 /// on success. 6650 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 6651 Expr *Fn = TheCall->getCallee(); 6652 6653 if (checkVAStartABI(*this, BuiltinID, Fn)) 6654 return true; 6655 6656 if (checkArgCount(*this, TheCall, 2)) 6657 return true; 6658 6659 // Type-check the first argument normally. 6660 if (checkBuiltinArgument(*this, TheCall, 0)) 6661 return true; 6662 6663 // Check that the current function is variadic, and get its last parameter. 6664 ParmVarDecl *LastParam; 6665 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 6666 return true; 6667 6668 // Verify that the second argument to the builtin is the last argument of the 6669 // current function or method. 6670 bool SecondArgIsLastNamedArgument = false; 6671 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 6672 6673 // These are valid if SecondArgIsLastNamedArgument is false after the next 6674 // block. 6675 QualType Type; 6676 SourceLocation ParamLoc; 6677 bool IsCRegister = false; 6678 6679 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 6680 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 6681 SecondArgIsLastNamedArgument = PV == LastParam; 6682 6683 Type = PV->getType(); 6684 ParamLoc = PV->getLocation(); 6685 IsCRegister = 6686 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 6687 } 6688 } 6689 6690 if (!SecondArgIsLastNamedArgument) 6691 Diag(TheCall->getArg(1)->getBeginLoc(), 6692 diag::warn_second_arg_of_va_start_not_last_named_param); 6693 else if (IsCRegister || Type->isReferenceType() || 6694 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 6695 // Promotable integers are UB, but enumerations need a bit of 6696 // extra checking to see what their promotable type actually is. 6697 if (!Type->isPromotableIntegerType()) 6698 return false; 6699 if (!Type->isEnumeralType()) 6700 return true; 6701 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl(); 6702 return !(ED && 6703 Context.typesAreCompatible(ED->getPromotionType(), Type)); 6704 }()) { 6705 unsigned Reason = 0; 6706 if (Type->isReferenceType()) Reason = 1; 6707 else if (IsCRegister) Reason = 2; 6708 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 6709 Diag(ParamLoc, diag::note_parameter_type) << Type; 6710 } 6711 6712 TheCall->setType(Context.VoidTy); 6713 return false; 6714 } 6715 6716 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 6717 auto IsSuitablyTypedFormatArgument = [this](const Expr *Arg) -> bool { 6718 const LangOptions &LO = getLangOpts(); 6719 6720 if (LO.CPlusPlus) 6721 return Arg->getType() 6722 .getCanonicalType() 6723 .getTypePtr() 6724 ->getPointeeType() 6725 .withoutLocalFastQualifiers() == Context.CharTy; 6726 6727 // In C, allow aliasing through `char *`, this is required for AArch64 at 6728 // least. 6729 return true; 6730 }; 6731 6732 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 6733 // const char *named_addr); 6734 6735 Expr *Func = Call->getCallee(); 6736 6737 if (Call->getNumArgs() < 3) 6738 return Diag(Call->getEndLoc(), 6739 diag::err_typecheck_call_too_few_args_at_least) 6740 << 0 /*function call*/ << 3 << Call->getNumArgs(); 6741 6742 // Type-check the first argument normally. 6743 if (checkBuiltinArgument(*this, Call, 0)) 6744 return true; 6745 6746 // Check that the current function is variadic. 6747 if (checkVAStartIsInVariadicFunction(*this, Func)) 6748 return true; 6749 6750 // __va_start on Windows does not validate the parameter qualifiers 6751 6752 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 6753 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 6754 6755 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 6756 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 6757 6758 const QualType &ConstCharPtrTy = 6759 Context.getPointerType(Context.CharTy.withConst()); 6760 if (!Arg1Ty->isPointerType() || !IsSuitablyTypedFormatArgument(Arg1)) 6761 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 6762 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 6763 << 0 /* qualifier difference */ 6764 << 3 /* parameter mismatch */ 6765 << 2 << Arg1->getType() << ConstCharPtrTy; 6766 6767 const QualType SizeTy = Context.getSizeType(); 6768 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 6769 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 6770 << Arg2->getType() << SizeTy << 1 /* different class */ 6771 << 0 /* qualifier difference */ 6772 << 3 /* parameter mismatch */ 6773 << 3 << Arg2->getType() << SizeTy; 6774 6775 return false; 6776 } 6777 6778 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 6779 /// friends. This is declared to take (...), so we have to check everything. 6780 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 6781 if (checkArgCount(*this, TheCall, 2)) 6782 return true; 6783 6784 ExprResult OrigArg0 = TheCall->getArg(0); 6785 ExprResult OrigArg1 = TheCall->getArg(1); 6786 6787 // Do standard promotions between the two arguments, returning their common 6788 // type. 6789 QualType Res = UsualArithmeticConversions( 6790 OrigArg0, OrigArg1, TheCall->getExprLoc(), ACK_Comparison); 6791 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 6792 return true; 6793 6794 // Make sure any conversions are pushed back into the call; this is 6795 // type safe since unordered compare builtins are declared as "_Bool 6796 // foo(...)". 6797 TheCall->setArg(0, OrigArg0.get()); 6798 TheCall->setArg(1, OrigArg1.get()); 6799 6800 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 6801 return false; 6802 6803 // If the common type isn't a real floating type, then the arguments were 6804 // invalid for this operation. 6805 if (Res.isNull() || !Res->isRealFloatingType()) 6806 return Diag(OrigArg0.get()->getBeginLoc(), 6807 diag::err_typecheck_call_invalid_ordered_compare) 6808 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 6809 << SourceRange(OrigArg0.get()->getBeginLoc(), 6810 OrigArg1.get()->getEndLoc()); 6811 6812 return false; 6813 } 6814 6815 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 6816 /// __builtin_isnan and friends. This is declared to take (...), so we have 6817 /// to check everything. We expect the last argument to be a floating point 6818 /// value. 6819 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 6820 if (checkArgCount(*this, TheCall, NumArgs)) 6821 return true; 6822 6823 // __builtin_fpclassify is the only case where NumArgs != 1, so we can count 6824 // on all preceding parameters just being int. Try all of those. 6825 for (unsigned i = 0; i < NumArgs - 1; ++i) { 6826 Expr *Arg = TheCall->getArg(i); 6827 6828 if (Arg->isTypeDependent()) 6829 return false; 6830 6831 ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing); 6832 6833 if (Res.isInvalid()) 6834 return true; 6835 TheCall->setArg(i, Res.get()); 6836 } 6837 6838 Expr *OrigArg = TheCall->getArg(NumArgs-1); 6839 6840 if (OrigArg->isTypeDependent()) 6841 return false; 6842 6843 // Usual Unary Conversions will convert half to float, which we want for 6844 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the 6845 // type how it is, but do normal L->Rvalue conversions. 6846 if (Context.getTargetInfo().useFP16ConversionIntrinsics()) 6847 OrigArg = UsualUnaryConversions(OrigArg).get(); 6848 else 6849 OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get(); 6850 TheCall->setArg(NumArgs - 1, OrigArg); 6851 6852 // This operation requires a non-_Complex floating-point number. 6853 if (!OrigArg->getType()->isRealFloatingType()) 6854 return Diag(OrigArg->getBeginLoc(), 6855 diag::err_typecheck_call_invalid_unary_fp) 6856 << OrigArg->getType() << OrigArg->getSourceRange(); 6857 6858 return false; 6859 } 6860 6861 /// Perform semantic analysis for a call to __builtin_complex. 6862 bool Sema::SemaBuiltinComplex(CallExpr *TheCall) { 6863 if (checkArgCount(*this, TheCall, 2)) 6864 return true; 6865 6866 bool Dependent = false; 6867 for (unsigned I = 0; I != 2; ++I) { 6868 Expr *Arg = TheCall->getArg(I); 6869 QualType T = Arg->getType(); 6870 if (T->isDependentType()) { 6871 Dependent = true; 6872 continue; 6873 } 6874 6875 // Despite supporting _Complex int, GCC requires a real floating point type 6876 // for the operands of __builtin_complex. 6877 if (!T->isRealFloatingType()) { 6878 return Diag(Arg->getBeginLoc(), diag::err_typecheck_call_requires_real_fp) 6879 << Arg->getType() << Arg->getSourceRange(); 6880 } 6881 6882 ExprResult Converted = DefaultLvalueConversion(Arg); 6883 if (Converted.isInvalid()) 6884 return true; 6885 TheCall->setArg(I, Converted.get()); 6886 } 6887 6888 if (Dependent) { 6889 TheCall->setType(Context.DependentTy); 6890 return false; 6891 } 6892 6893 Expr *Real = TheCall->getArg(0); 6894 Expr *Imag = TheCall->getArg(1); 6895 if (!Context.hasSameType(Real->getType(), Imag->getType())) { 6896 return Diag(Real->getBeginLoc(), 6897 diag::err_typecheck_call_different_arg_types) 6898 << Real->getType() << Imag->getType() 6899 << Real->getSourceRange() << Imag->getSourceRange(); 6900 } 6901 6902 // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers; 6903 // don't allow this builtin to form those types either. 6904 // FIXME: Should we allow these types? 6905 if (Real->getType()->isFloat16Type()) 6906 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 6907 << "_Float16"; 6908 if (Real->getType()->isHalfType()) 6909 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 6910 << "half"; 6911 6912 TheCall->setType(Context.getComplexType(Real->getType())); 6913 return false; 6914 } 6915 6916 // Customized Sema Checking for VSX builtins that have the following signature: 6917 // vector [...] builtinName(vector [...], vector [...], const int); 6918 // Which takes the same type of vectors (any legal vector type) for the first 6919 // two arguments and takes compile time constant for the third argument. 6920 // Example builtins are : 6921 // vector double vec_xxpermdi(vector double, vector double, int); 6922 // vector short vec_xxsldwi(vector short, vector short, int); 6923 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 6924 unsigned ExpectedNumArgs = 3; 6925 if (checkArgCount(*this, TheCall, ExpectedNumArgs)) 6926 return true; 6927 6928 // Check the third argument is a compile time constant 6929 if (!TheCall->getArg(2)->isIntegerConstantExpr(Context)) 6930 return Diag(TheCall->getBeginLoc(), 6931 diag::err_vsx_builtin_nonconstant_argument) 6932 << 3 /* argument index */ << TheCall->getDirectCallee() 6933 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 6934 TheCall->getArg(2)->getEndLoc()); 6935 6936 QualType Arg1Ty = TheCall->getArg(0)->getType(); 6937 QualType Arg2Ty = TheCall->getArg(1)->getType(); 6938 6939 // Check the type of argument 1 and argument 2 are vectors. 6940 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 6941 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 6942 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 6943 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 6944 << TheCall->getDirectCallee() 6945 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6946 TheCall->getArg(1)->getEndLoc()); 6947 } 6948 6949 // Check the first two arguments are the same type. 6950 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 6951 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 6952 << TheCall->getDirectCallee() 6953 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6954 TheCall->getArg(1)->getEndLoc()); 6955 } 6956 6957 // When default clang type checking is turned off and the customized type 6958 // checking is used, the returning type of the function must be explicitly 6959 // set. Otherwise it is _Bool by default. 6960 TheCall->setType(Arg1Ty); 6961 6962 return false; 6963 } 6964 6965 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 6966 // This is declared to take (...), so we have to check everything. 6967 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 6968 if (TheCall->getNumArgs() < 2) 6969 return ExprError(Diag(TheCall->getEndLoc(), 6970 diag::err_typecheck_call_too_few_args_at_least) 6971 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 6972 << TheCall->getSourceRange()); 6973 6974 // Determine which of the following types of shufflevector we're checking: 6975 // 1) unary, vector mask: (lhs, mask) 6976 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 6977 QualType resType = TheCall->getArg(0)->getType(); 6978 unsigned numElements = 0; 6979 6980 if (!TheCall->getArg(0)->isTypeDependent() && 6981 !TheCall->getArg(1)->isTypeDependent()) { 6982 QualType LHSType = TheCall->getArg(0)->getType(); 6983 QualType RHSType = TheCall->getArg(1)->getType(); 6984 6985 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 6986 return ExprError( 6987 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 6988 << TheCall->getDirectCallee() 6989 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6990 TheCall->getArg(1)->getEndLoc())); 6991 6992 numElements = LHSType->castAs<VectorType>()->getNumElements(); 6993 unsigned numResElements = TheCall->getNumArgs() - 2; 6994 6995 // Check to see if we have a call with 2 vector arguments, the unary shuffle 6996 // with mask. If so, verify that RHS is an integer vector type with the 6997 // same number of elts as lhs. 6998 if (TheCall->getNumArgs() == 2) { 6999 if (!RHSType->hasIntegerRepresentation() || 7000 RHSType->castAs<VectorType>()->getNumElements() != numElements) 7001 return ExprError(Diag(TheCall->getBeginLoc(), 7002 diag::err_vec_builtin_incompatible_vector) 7003 << TheCall->getDirectCallee() 7004 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 7005 TheCall->getArg(1)->getEndLoc())); 7006 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 7007 return ExprError(Diag(TheCall->getBeginLoc(), 7008 diag::err_vec_builtin_incompatible_vector) 7009 << TheCall->getDirectCallee() 7010 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7011 TheCall->getArg(1)->getEndLoc())); 7012 } else if (numElements != numResElements) { 7013 QualType eltType = LHSType->castAs<VectorType>()->getElementType(); 7014 resType = Context.getVectorType(eltType, numResElements, 7015 VectorType::GenericVector); 7016 } 7017 } 7018 7019 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 7020 if (TheCall->getArg(i)->isTypeDependent() || 7021 TheCall->getArg(i)->isValueDependent()) 7022 continue; 7023 7024 Optional<llvm::APSInt> Result; 7025 if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context))) 7026 return ExprError(Diag(TheCall->getBeginLoc(), 7027 diag::err_shufflevector_nonconstant_argument) 7028 << TheCall->getArg(i)->getSourceRange()); 7029 7030 // Allow -1 which will be translated to undef in the IR. 7031 if (Result->isSigned() && Result->isAllOnes()) 7032 continue; 7033 7034 if (Result->getActiveBits() > 64 || 7035 Result->getZExtValue() >= numElements * 2) 7036 return ExprError(Diag(TheCall->getBeginLoc(), 7037 diag::err_shufflevector_argument_too_large) 7038 << TheCall->getArg(i)->getSourceRange()); 7039 } 7040 7041 SmallVector<Expr*, 32> exprs; 7042 7043 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 7044 exprs.push_back(TheCall->getArg(i)); 7045 TheCall->setArg(i, nullptr); 7046 } 7047 7048 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 7049 TheCall->getCallee()->getBeginLoc(), 7050 TheCall->getRParenLoc()); 7051 } 7052 7053 /// SemaConvertVectorExpr - Handle __builtin_convertvector 7054 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 7055 SourceLocation BuiltinLoc, 7056 SourceLocation RParenLoc) { 7057 ExprValueKind VK = VK_PRValue; 7058 ExprObjectKind OK = OK_Ordinary; 7059 QualType DstTy = TInfo->getType(); 7060 QualType SrcTy = E->getType(); 7061 7062 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 7063 return ExprError(Diag(BuiltinLoc, 7064 diag::err_convertvector_non_vector) 7065 << E->getSourceRange()); 7066 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 7067 return ExprError(Diag(BuiltinLoc, 7068 diag::err_convertvector_non_vector_type)); 7069 7070 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 7071 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements(); 7072 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements(); 7073 if (SrcElts != DstElts) 7074 return ExprError(Diag(BuiltinLoc, 7075 diag::err_convertvector_incompatible_vector) 7076 << E->getSourceRange()); 7077 } 7078 7079 return new (Context) 7080 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 7081 } 7082 7083 /// SemaBuiltinPrefetch - Handle __builtin_prefetch. 7084 // This is declared to take (const void*, ...) and can take two 7085 // optional constant int args. 7086 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 7087 unsigned NumArgs = TheCall->getNumArgs(); 7088 7089 if (NumArgs > 3) 7090 return Diag(TheCall->getEndLoc(), 7091 diag::err_typecheck_call_too_many_args_at_most) 7092 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 7093 7094 // Argument 0 is checked for us and the remaining arguments must be 7095 // constant integers. 7096 for (unsigned i = 1; i != NumArgs; ++i) 7097 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 7098 return true; 7099 7100 return false; 7101 } 7102 7103 /// SemaBuiltinArithmeticFence - Handle __arithmetic_fence. 7104 bool Sema::SemaBuiltinArithmeticFence(CallExpr *TheCall) { 7105 if (!Context.getTargetInfo().checkArithmeticFenceSupported()) 7106 return Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 7107 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 7108 if (checkArgCount(*this, TheCall, 1)) 7109 return true; 7110 Expr *Arg = TheCall->getArg(0); 7111 if (Arg->isInstantiationDependent()) 7112 return false; 7113 7114 QualType ArgTy = Arg->getType(); 7115 if (!ArgTy->hasFloatingRepresentation()) 7116 return Diag(TheCall->getEndLoc(), diag::err_typecheck_expect_flt_or_vector) 7117 << ArgTy; 7118 if (Arg->isLValue()) { 7119 ExprResult FirstArg = DefaultLvalueConversion(Arg); 7120 TheCall->setArg(0, FirstArg.get()); 7121 } 7122 TheCall->setType(TheCall->getArg(0)->getType()); 7123 return false; 7124 } 7125 7126 /// SemaBuiltinAssume - Handle __assume (MS Extension). 7127 // __assume does not evaluate its arguments, and should warn if its argument 7128 // has side effects. 7129 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 7130 Expr *Arg = TheCall->getArg(0); 7131 if (Arg->isInstantiationDependent()) return false; 7132 7133 if (Arg->HasSideEffects(Context)) 7134 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 7135 << Arg->getSourceRange() 7136 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 7137 7138 return false; 7139 } 7140 7141 /// Handle __builtin_alloca_with_align. This is declared 7142 /// as (size_t, size_t) where the second size_t must be a power of 2 greater 7143 /// than 8. 7144 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 7145 // The alignment must be a constant integer. 7146 Expr *Arg = TheCall->getArg(1); 7147 7148 // We can't check the value of a dependent argument. 7149 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 7150 if (const auto *UE = 7151 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 7152 if (UE->getKind() == UETT_AlignOf || 7153 UE->getKind() == UETT_PreferredAlignOf) 7154 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 7155 << Arg->getSourceRange(); 7156 7157 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 7158 7159 if (!Result.isPowerOf2()) 7160 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 7161 << Arg->getSourceRange(); 7162 7163 if (Result < Context.getCharWidth()) 7164 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 7165 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 7166 7167 if (Result > std::numeric_limits<int32_t>::max()) 7168 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 7169 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 7170 } 7171 7172 return false; 7173 } 7174 7175 /// Handle __builtin_assume_aligned. This is declared 7176 /// as (const void*, size_t, ...) and can take one optional constant int arg. 7177 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 7178 unsigned NumArgs = TheCall->getNumArgs(); 7179 7180 if (NumArgs > 3) 7181 return Diag(TheCall->getEndLoc(), 7182 diag::err_typecheck_call_too_many_args_at_most) 7183 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 7184 7185 // The alignment must be a constant integer. 7186 Expr *Arg = TheCall->getArg(1); 7187 7188 // We can't check the value of a dependent argument. 7189 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 7190 llvm::APSInt Result; 7191 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 7192 return true; 7193 7194 if (!Result.isPowerOf2()) 7195 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 7196 << Arg->getSourceRange(); 7197 7198 if (Result > Sema::MaximumAlignment) 7199 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great) 7200 << Arg->getSourceRange() << Sema::MaximumAlignment; 7201 } 7202 7203 if (NumArgs > 2) { 7204 ExprResult Arg(TheCall->getArg(2)); 7205 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 7206 Context.getSizeType(), false); 7207 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 7208 if (Arg.isInvalid()) return true; 7209 TheCall->setArg(2, Arg.get()); 7210 } 7211 7212 return false; 7213 } 7214 7215 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 7216 unsigned BuiltinID = 7217 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 7218 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 7219 7220 unsigned NumArgs = TheCall->getNumArgs(); 7221 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 7222 if (NumArgs < NumRequiredArgs) { 7223 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 7224 << 0 /* function call */ << NumRequiredArgs << NumArgs 7225 << TheCall->getSourceRange(); 7226 } 7227 if (NumArgs >= NumRequiredArgs + 0x100) { 7228 return Diag(TheCall->getEndLoc(), 7229 diag::err_typecheck_call_too_many_args_at_most) 7230 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 7231 << TheCall->getSourceRange(); 7232 } 7233 unsigned i = 0; 7234 7235 // For formatting call, check buffer arg. 7236 if (!IsSizeCall) { 7237 ExprResult Arg(TheCall->getArg(i)); 7238 InitializedEntity Entity = InitializedEntity::InitializeParameter( 7239 Context, Context.VoidPtrTy, false); 7240 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 7241 if (Arg.isInvalid()) 7242 return true; 7243 TheCall->setArg(i, Arg.get()); 7244 i++; 7245 } 7246 7247 // Check string literal arg. 7248 unsigned FormatIdx = i; 7249 { 7250 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 7251 if (Arg.isInvalid()) 7252 return true; 7253 TheCall->setArg(i, Arg.get()); 7254 i++; 7255 } 7256 7257 // Make sure variadic args are scalar. 7258 unsigned FirstDataArg = i; 7259 while (i < NumArgs) { 7260 ExprResult Arg = DefaultVariadicArgumentPromotion( 7261 TheCall->getArg(i), VariadicFunction, nullptr); 7262 if (Arg.isInvalid()) 7263 return true; 7264 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 7265 if (ArgSize.getQuantity() >= 0x100) { 7266 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 7267 << i << (int)ArgSize.getQuantity() << 0xff 7268 << TheCall->getSourceRange(); 7269 } 7270 TheCall->setArg(i, Arg.get()); 7271 i++; 7272 } 7273 7274 // Check formatting specifiers. NOTE: We're only doing this for the non-size 7275 // call to avoid duplicate diagnostics. 7276 if (!IsSizeCall) { 7277 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 7278 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 7279 bool Success = CheckFormatArguments( 7280 Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog, 7281 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 7282 CheckedVarArgs); 7283 if (!Success) 7284 return true; 7285 } 7286 7287 if (IsSizeCall) { 7288 TheCall->setType(Context.getSizeType()); 7289 } else { 7290 TheCall->setType(Context.VoidPtrTy); 7291 } 7292 return false; 7293 } 7294 7295 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 7296 /// TheCall is a constant expression. 7297 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 7298 llvm::APSInt &Result) { 7299 Expr *Arg = TheCall->getArg(ArgNum); 7300 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 7301 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 7302 7303 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 7304 7305 Optional<llvm::APSInt> R; 7306 if (!(R = Arg->getIntegerConstantExpr(Context))) 7307 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 7308 << FDecl->getDeclName() << Arg->getSourceRange(); 7309 Result = *R; 7310 return false; 7311 } 7312 7313 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 7314 /// TheCall is a constant expression in the range [Low, High]. 7315 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 7316 int Low, int High, bool RangeIsError) { 7317 if (isConstantEvaluated()) 7318 return false; 7319 llvm::APSInt Result; 7320 7321 // We can't check the value of a dependent argument. 7322 Expr *Arg = TheCall->getArg(ArgNum); 7323 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7324 return false; 7325 7326 // Check constant-ness first. 7327 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7328 return true; 7329 7330 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 7331 if (RangeIsError) 7332 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 7333 << toString(Result, 10) << Low << High << Arg->getSourceRange(); 7334 else 7335 // Defer the warning until we know if the code will be emitted so that 7336 // dead code can ignore this. 7337 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 7338 PDiag(diag::warn_argument_invalid_range) 7339 << toString(Result, 10) << Low << High 7340 << Arg->getSourceRange()); 7341 } 7342 7343 return false; 7344 } 7345 7346 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 7347 /// TheCall is a constant expression is a multiple of Num.. 7348 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 7349 unsigned Num) { 7350 llvm::APSInt Result; 7351 7352 // We can't check the value of a dependent argument. 7353 Expr *Arg = TheCall->getArg(ArgNum); 7354 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7355 return false; 7356 7357 // Check constant-ness first. 7358 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7359 return true; 7360 7361 if (Result.getSExtValue() % Num != 0) 7362 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 7363 << Num << Arg->getSourceRange(); 7364 7365 return false; 7366 } 7367 7368 /// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a 7369 /// constant expression representing a power of 2. 7370 bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) { 7371 llvm::APSInt Result; 7372 7373 // We can't check the value of a dependent argument. 7374 Expr *Arg = TheCall->getArg(ArgNum); 7375 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7376 return false; 7377 7378 // Check constant-ness first. 7379 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7380 return true; 7381 7382 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if 7383 // and only if x is a power of 2. 7384 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0) 7385 return false; 7386 7387 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2) 7388 << Arg->getSourceRange(); 7389 } 7390 7391 static bool IsShiftedByte(llvm::APSInt Value) { 7392 if (Value.isNegative()) 7393 return false; 7394 7395 // Check if it's a shifted byte, by shifting it down 7396 while (true) { 7397 // If the value fits in the bottom byte, the check passes. 7398 if (Value < 0x100) 7399 return true; 7400 7401 // Otherwise, if the value has _any_ bits in the bottom byte, the check 7402 // fails. 7403 if ((Value & 0xFF) != 0) 7404 return false; 7405 7406 // If the bottom 8 bits are all 0, but something above that is nonzero, 7407 // then shifting the value right by 8 bits won't affect whether it's a 7408 // shifted byte or not. So do that, and go round again. 7409 Value >>= 8; 7410 } 7411 } 7412 7413 /// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is 7414 /// a constant expression representing an arbitrary byte value shifted left by 7415 /// a multiple of 8 bits. 7416 bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, 7417 unsigned ArgBits) { 7418 llvm::APSInt Result; 7419 7420 // We can't check the value of a dependent argument. 7421 Expr *Arg = TheCall->getArg(ArgNum); 7422 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7423 return false; 7424 7425 // Check constant-ness first. 7426 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7427 return true; 7428 7429 // Truncate to the given size. 7430 Result = Result.getLoBits(ArgBits); 7431 Result.setIsUnsigned(true); 7432 7433 if (IsShiftedByte(Result)) 7434 return false; 7435 7436 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte) 7437 << Arg->getSourceRange(); 7438 } 7439 7440 /// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of 7441 /// TheCall is a constant expression representing either a shifted byte value, 7442 /// or a value of the form 0x??FF (i.e. a member of the arithmetic progression 7443 /// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some 7444 /// Arm MVE intrinsics. 7445 bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, 7446 int ArgNum, 7447 unsigned ArgBits) { 7448 llvm::APSInt Result; 7449 7450 // We can't check the value of a dependent argument. 7451 Expr *Arg = TheCall->getArg(ArgNum); 7452 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7453 return false; 7454 7455 // Check constant-ness first. 7456 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7457 return true; 7458 7459 // Truncate to the given size. 7460 Result = Result.getLoBits(ArgBits); 7461 Result.setIsUnsigned(true); 7462 7463 // Check to see if it's in either of the required forms. 7464 if (IsShiftedByte(Result) || 7465 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF)) 7466 return false; 7467 7468 return Diag(TheCall->getBeginLoc(), 7469 diag::err_argument_not_shifted_byte_or_xxff) 7470 << Arg->getSourceRange(); 7471 } 7472 7473 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 7474 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { 7475 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 7476 if (checkArgCount(*this, TheCall, 2)) 7477 return true; 7478 Expr *Arg0 = TheCall->getArg(0); 7479 Expr *Arg1 = TheCall->getArg(1); 7480 7481 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7482 if (FirstArg.isInvalid()) 7483 return true; 7484 QualType FirstArgType = FirstArg.get()->getType(); 7485 if (!FirstArgType->isAnyPointerType()) 7486 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7487 << "first" << FirstArgType << Arg0->getSourceRange(); 7488 TheCall->setArg(0, FirstArg.get()); 7489 7490 ExprResult SecArg = DefaultLvalueConversion(Arg1); 7491 if (SecArg.isInvalid()) 7492 return true; 7493 QualType SecArgType = SecArg.get()->getType(); 7494 if (!SecArgType->isIntegerType()) 7495 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 7496 << "second" << SecArgType << Arg1->getSourceRange(); 7497 7498 // Derive the return type from the pointer argument. 7499 TheCall->setType(FirstArgType); 7500 return false; 7501 } 7502 7503 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 7504 if (checkArgCount(*this, TheCall, 2)) 7505 return true; 7506 7507 Expr *Arg0 = TheCall->getArg(0); 7508 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7509 if (FirstArg.isInvalid()) 7510 return true; 7511 QualType FirstArgType = FirstArg.get()->getType(); 7512 if (!FirstArgType->isAnyPointerType()) 7513 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7514 << "first" << FirstArgType << Arg0->getSourceRange(); 7515 TheCall->setArg(0, FirstArg.get()); 7516 7517 // Derive the return type from the pointer argument. 7518 TheCall->setType(FirstArgType); 7519 7520 // Second arg must be an constant in range [0,15] 7521 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 7522 } 7523 7524 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 7525 if (checkArgCount(*this, TheCall, 2)) 7526 return true; 7527 Expr *Arg0 = TheCall->getArg(0); 7528 Expr *Arg1 = TheCall->getArg(1); 7529 7530 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7531 if (FirstArg.isInvalid()) 7532 return true; 7533 QualType FirstArgType = FirstArg.get()->getType(); 7534 if (!FirstArgType->isAnyPointerType()) 7535 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7536 << "first" << FirstArgType << Arg0->getSourceRange(); 7537 7538 QualType SecArgType = Arg1->getType(); 7539 if (!SecArgType->isIntegerType()) 7540 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 7541 << "second" << SecArgType << Arg1->getSourceRange(); 7542 TheCall->setType(Context.IntTy); 7543 return false; 7544 } 7545 7546 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 7547 BuiltinID == AArch64::BI__builtin_arm_stg) { 7548 if (checkArgCount(*this, TheCall, 1)) 7549 return true; 7550 Expr *Arg0 = TheCall->getArg(0); 7551 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7552 if (FirstArg.isInvalid()) 7553 return true; 7554 7555 QualType FirstArgType = FirstArg.get()->getType(); 7556 if (!FirstArgType->isAnyPointerType()) 7557 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7558 << "first" << FirstArgType << Arg0->getSourceRange(); 7559 TheCall->setArg(0, FirstArg.get()); 7560 7561 // Derive the return type from the pointer argument. 7562 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 7563 TheCall->setType(FirstArgType); 7564 return false; 7565 } 7566 7567 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 7568 Expr *ArgA = TheCall->getArg(0); 7569 Expr *ArgB = TheCall->getArg(1); 7570 7571 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA); 7572 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB); 7573 7574 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 7575 return true; 7576 7577 QualType ArgTypeA = ArgExprA.get()->getType(); 7578 QualType ArgTypeB = ArgExprB.get()->getType(); 7579 7580 auto isNull = [&] (Expr *E) -> bool { 7581 return E->isNullPointerConstant( 7582 Context, Expr::NPC_ValueDependentIsNotNull); }; 7583 7584 // argument should be either a pointer or null 7585 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 7586 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 7587 << "first" << ArgTypeA << ArgA->getSourceRange(); 7588 7589 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 7590 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 7591 << "second" << ArgTypeB << ArgB->getSourceRange(); 7592 7593 // Ensure Pointee types are compatible 7594 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 7595 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 7596 QualType pointeeA = ArgTypeA->getPointeeType(); 7597 QualType pointeeB = ArgTypeB->getPointeeType(); 7598 if (!Context.typesAreCompatible( 7599 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 7600 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 7601 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) 7602 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 7603 << ArgB->getSourceRange(); 7604 } 7605 } 7606 7607 // at least one argument should be pointer type 7608 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 7609 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 7610 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 7611 7612 if (isNull(ArgA)) // adopt type of the other pointer 7613 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 7614 7615 if (isNull(ArgB)) 7616 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 7617 7618 TheCall->setArg(0, ArgExprA.get()); 7619 TheCall->setArg(1, ArgExprB.get()); 7620 TheCall->setType(Context.LongLongTy); 7621 return false; 7622 } 7623 assert(false && "Unhandled ARM MTE intrinsic"); 7624 return true; 7625 } 7626 7627 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 7628 /// TheCall is an ARM/AArch64 special register string literal. 7629 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 7630 int ArgNum, unsigned ExpectedFieldNum, 7631 bool AllowName) { 7632 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 7633 BuiltinID == ARM::BI__builtin_arm_wsr64 || 7634 BuiltinID == ARM::BI__builtin_arm_rsr || 7635 BuiltinID == ARM::BI__builtin_arm_rsrp || 7636 BuiltinID == ARM::BI__builtin_arm_wsr || 7637 BuiltinID == ARM::BI__builtin_arm_wsrp; 7638 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 7639 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 7640 BuiltinID == AArch64::BI__builtin_arm_rsr || 7641 BuiltinID == AArch64::BI__builtin_arm_rsrp || 7642 BuiltinID == AArch64::BI__builtin_arm_wsr || 7643 BuiltinID == AArch64::BI__builtin_arm_wsrp; 7644 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 7645 7646 // We can't check the value of a dependent argument. 7647 Expr *Arg = TheCall->getArg(ArgNum); 7648 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7649 return false; 7650 7651 // Check if the argument is a string literal. 7652 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 7653 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 7654 << Arg->getSourceRange(); 7655 7656 // Check the type of special register given. 7657 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 7658 SmallVector<StringRef, 6> Fields; 7659 Reg.split(Fields, ":"); 7660 7661 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 7662 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 7663 << Arg->getSourceRange(); 7664 7665 // If the string is the name of a register then we cannot check that it is 7666 // valid here but if the string is of one the forms described in ACLE then we 7667 // can check that the supplied fields are integers and within the valid 7668 // ranges. 7669 if (Fields.size() > 1) { 7670 bool FiveFields = Fields.size() == 5; 7671 7672 bool ValidString = true; 7673 if (IsARMBuiltin) { 7674 ValidString &= Fields[0].startswith_insensitive("cp") || 7675 Fields[0].startswith_insensitive("p"); 7676 if (ValidString) 7677 Fields[0] = Fields[0].drop_front( 7678 Fields[0].startswith_insensitive("cp") ? 2 : 1); 7679 7680 ValidString &= Fields[2].startswith_insensitive("c"); 7681 if (ValidString) 7682 Fields[2] = Fields[2].drop_front(1); 7683 7684 if (FiveFields) { 7685 ValidString &= Fields[3].startswith_insensitive("c"); 7686 if (ValidString) 7687 Fields[3] = Fields[3].drop_front(1); 7688 } 7689 } 7690 7691 SmallVector<int, 5> Ranges; 7692 if (FiveFields) 7693 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 7694 else 7695 Ranges.append({15, 7, 15}); 7696 7697 for (unsigned i=0; i<Fields.size(); ++i) { 7698 int IntField; 7699 ValidString &= !Fields[i].getAsInteger(10, IntField); 7700 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 7701 } 7702 7703 if (!ValidString) 7704 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 7705 << Arg->getSourceRange(); 7706 } else if (IsAArch64Builtin && Fields.size() == 1) { 7707 // If the register name is one of those that appear in the condition below 7708 // and the special register builtin being used is one of the write builtins, 7709 // then we require that the argument provided for writing to the register 7710 // is an integer constant expression. This is because it will be lowered to 7711 // an MSR (immediate) instruction, so we need to know the immediate at 7712 // compile time. 7713 if (TheCall->getNumArgs() != 2) 7714 return false; 7715 7716 std::string RegLower = Reg.lower(); 7717 if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" && 7718 RegLower != "pan" && RegLower != "uao") 7719 return false; 7720 7721 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 7722 } 7723 7724 return false; 7725 } 7726 7727 /// SemaBuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity. 7728 /// Emit an error and return true on failure; return false on success. 7729 /// TypeStr is a string containing the type descriptor of the value returned by 7730 /// the builtin and the descriptors of the expected type of the arguments. 7731 bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID, 7732 const char *TypeStr) { 7733 7734 assert((TypeStr[0] != '\0') && 7735 "Invalid types in PPC MMA builtin declaration"); 7736 7737 switch (BuiltinID) { 7738 default: 7739 // This function is called in CheckPPCBuiltinFunctionCall where the 7740 // BuiltinID is guaranteed to be an MMA or pair vector memop builtin, here 7741 // we are isolating the pair vector memop builtins that can be used with mma 7742 // off so the default case is every builtin that requires mma and paired 7743 // vector memops. 7744 if (SemaFeatureCheck(*this, TheCall, "paired-vector-memops", 7745 diag::err_ppc_builtin_only_on_arch, "10") || 7746 SemaFeatureCheck(*this, TheCall, "mma", 7747 diag::err_ppc_builtin_only_on_arch, "10")) 7748 return true; 7749 break; 7750 case PPC::BI__builtin_vsx_lxvp: 7751 case PPC::BI__builtin_vsx_stxvp: 7752 case PPC::BI__builtin_vsx_assemble_pair: 7753 case PPC::BI__builtin_vsx_disassemble_pair: 7754 if (SemaFeatureCheck(*this, TheCall, "paired-vector-memops", 7755 diag::err_ppc_builtin_only_on_arch, "10")) 7756 return true; 7757 break; 7758 } 7759 7760 unsigned Mask = 0; 7761 unsigned ArgNum = 0; 7762 7763 // The first type in TypeStr is the type of the value returned by the 7764 // builtin. So we first read that type and change the type of TheCall. 7765 QualType type = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 7766 TheCall->setType(type); 7767 7768 while (*TypeStr != '\0') { 7769 Mask = 0; 7770 QualType ExpectedType = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 7771 if (ArgNum >= TheCall->getNumArgs()) { 7772 ArgNum++; 7773 break; 7774 } 7775 7776 Expr *Arg = TheCall->getArg(ArgNum); 7777 QualType PassedType = Arg->getType(); 7778 QualType StrippedRVType = PassedType.getCanonicalType(); 7779 7780 // Strip Restrict/Volatile qualifiers. 7781 if (StrippedRVType.isRestrictQualified() || 7782 StrippedRVType.isVolatileQualified()) 7783 StrippedRVType = StrippedRVType.getCanonicalType().getUnqualifiedType(); 7784 7785 // The only case where the argument type and expected type are allowed to 7786 // mismatch is if the argument type is a non-void pointer (or array) and 7787 // expected type is a void pointer. 7788 if (StrippedRVType != ExpectedType) 7789 if (!(ExpectedType->isVoidPointerType() && 7790 (StrippedRVType->isPointerType() || StrippedRVType->isArrayType()))) 7791 return Diag(Arg->getBeginLoc(), 7792 diag::err_typecheck_convert_incompatible) 7793 << PassedType << ExpectedType << 1 << 0 << 0; 7794 7795 // If the value of the Mask is not 0, we have a constraint in the size of 7796 // the integer argument so here we ensure the argument is a constant that 7797 // is in the valid range. 7798 if (Mask != 0 && 7799 SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true)) 7800 return true; 7801 7802 ArgNum++; 7803 } 7804 7805 // In case we exited early from the previous loop, there are other types to 7806 // read from TypeStr. So we need to read them all to ensure we have the right 7807 // number of arguments in TheCall and if it is not the case, to display a 7808 // better error message. 7809 while (*TypeStr != '\0') { 7810 (void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 7811 ArgNum++; 7812 } 7813 if (checkArgCount(*this, TheCall, ArgNum)) 7814 return true; 7815 7816 return false; 7817 } 7818 7819 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 7820 /// This checks that the target supports __builtin_longjmp and 7821 /// that val is a constant 1. 7822 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 7823 if (!Context.getTargetInfo().hasSjLjLowering()) 7824 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 7825 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 7826 7827 Expr *Arg = TheCall->getArg(1); 7828 llvm::APSInt Result; 7829 7830 // TODO: This is less than ideal. Overload this to take a value. 7831 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 7832 return true; 7833 7834 if (Result != 1) 7835 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 7836 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 7837 7838 return false; 7839 } 7840 7841 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 7842 /// This checks that the target supports __builtin_setjmp. 7843 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 7844 if (!Context.getTargetInfo().hasSjLjLowering()) 7845 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 7846 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 7847 return false; 7848 } 7849 7850 namespace { 7851 7852 class UncoveredArgHandler { 7853 enum { Unknown = -1, AllCovered = -2 }; 7854 7855 signed FirstUncoveredArg = Unknown; 7856 SmallVector<const Expr *, 4> DiagnosticExprs; 7857 7858 public: 7859 UncoveredArgHandler() = default; 7860 7861 bool hasUncoveredArg() const { 7862 return (FirstUncoveredArg >= 0); 7863 } 7864 7865 unsigned getUncoveredArg() const { 7866 assert(hasUncoveredArg() && "no uncovered argument"); 7867 return FirstUncoveredArg; 7868 } 7869 7870 void setAllCovered() { 7871 // A string has been found with all arguments covered, so clear out 7872 // the diagnostics. 7873 DiagnosticExprs.clear(); 7874 FirstUncoveredArg = AllCovered; 7875 } 7876 7877 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 7878 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 7879 7880 // Don't update if a previous string covers all arguments. 7881 if (FirstUncoveredArg == AllCovered) 7882 return; 7883 7884 // UncoveredArgHandler tracks the highest uncovered argument index 7885 // and with it all the strings that match this index. 7886 if (NewFirstUncoveredArg == FirstUncoveredArg) 7887 DiagnosticExprs.push_back(StrExpr); 7888 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 7889 DiagnosticExprs.clear(); 7890 DiagnosticExprs.push_back(StrExpr); 7891 FirstUncoveredArg = NewFirstUncoveredArg; 7892 } 7893 } 7894 7895 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 7896 }; 7897 7898 enum StringLiteralCheckType { 7899 SLCT_NotALiteral, 7900 SLCT_UncheckedLiteral, 7901 SLCT_CheckedLiteral 7902 }; 7903 7904 } // namespace 7905 7906 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 7907 BinaryOperatorKind BinOpKind, 7908 bool AddendIsRight) { 7909 unsigned BitWidth = Offset.getBitWidth(); 7910 unsigned AddendBitWidth = Addend.getBitWidth(); 7911 // There might be negative interim results. 7912 if (Addend.isUnsigned()) { 7913 Addend = Addend.zext(++AddendBitWidth); 7914 Addend.setIsSigned(true); 7915 } 7916 // Adjust the bit width of the APSInts. 7917 if (AddendBitWidth > BitWidth) { 7918 Offset = Offset.sext(AddendBitWidth); 7919 BitWidth = AddendBitWidth; 7920 } else if (BitWidth > AddendBitWidth) { 7921 Addend = Addend.sext(BitWidth); 7922 } 7923 7924 bool Ov = false; 7925 llvm::APSInt ResOffset = Offset; 7926 if (BinOpKind == BO_Add) 7927 ResOffset = Offset.sadd_ov(Addend, Ov); 7928 else { 7929 assert(AddendIsRight && BinOpKind == BO_Sub && 7930 "operator must be add or sub with addend on the right"); 7931 ResOffset = Offset.ssub_ov(Addend, Ov); 7932 } 7933 7934 // We add an offset to a pointer here so we should support an offset as big as 7935 // possible. 7936 if (Ov) { 7937 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 7938 "index (intermediate) result too big"); 7939 Offset = Offset.sext(2 * BitWidth); 7940 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 7941 return; 7942 } 7943 7944 Offset = ResOffset; 7945 } 7946 7947 namespace { 7948 7949 // This is a wrapper class around StringLiteral to support offsetted string 7950 // literals as format strings. It takes the offset into account when returning 7951 // the string and its length or the source locations to display notes correctly. 7952 class FormatStringLiteral { 7953 const StringLiteral *FExpr; 7954 int64_t Offset; 7955 7956 public: 7957 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 7958 : FExpr(fexpr), Offset(Offset) {} 7959 7960 StringRef getString() const { 7961 return FExpr->getString().drop_front(Offset); 7962 } 7963 7964 unsigned getByteLength() const { 7965 return FExpr->getByteLength() - getCharByteWidth() * Offset; 7966 } 7967 7968 unsigned getLength() const { return FExpr->getLength() - Offset; } 7969 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 7970 7971 StringLiteral::StringKind getKind() const { return FExpr->getKind(); } 7972 7973 QualType getType() const { return FExpr->getType(); } 7974 7975 bool isAscii() const { return FExpr->isAscii(); } 7976 bool isWide() const { return FExpr->isWide(); } 7977 bool isUTF8() const { return FExpr->isUTF8(); } 7978 bool isUTF16() const { return FExpr->isUTF16(); } 7979 bool isUTF32() const { return FExpr->isUTF32(); } 7980 bool isPascal() const { return FExpr->isPascal(); } 7981 7982 SourceLocation getLocationOfByte( 7983 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 7984 const TargetInfo &Target, unsigned *StartToken = nullptr, 7985 unsigned *StartTokenByteOffset = nullptr) const { 7986 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 7987 StartToken, StartTokenByteOffset); 7988 } 7989 7990 SourceLocation getBeginLoc() const LLVM_READONLY { 7991 return FExpr->getBeginLoc().getLocWithOffset(Offset); 7992 } 7993 7994 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 7995 }; 7996 7997 } // namespace 7998 7999 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 8000 const Expr *OrigFormatExpr, 8001 ArrayRef<const Expr *> Args, 8002 bool HasVAListArg, unsigned format_idx, 8003 unsigned firstDataArg, 8004 Sema::FormatStringType Type, 8005 bool inFunctionCall, 8006 Sema::VariadicCallType CallType, 8007 llvm::SmallBitVector &CheckedVarArgs, 8008 UncoveredArgHandler &UncoveredArg, 8009 bool IgnoreStringsWithoutSpecifiers); 8010 8011 // Determine if an expression is a string literal or constant string. 8012 // If this function returns false on the arguments to a function expecting a 8013 // format string, we will usually need to emit a warning. 8014 // True string literals are then checked by CheckFormatString. 8015 static StringLiteralCheckType 8016 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 8017 bool HasVAListArg, unsigned format_idx, 8018 unsigned firstDataArg, Sema::FormatStringType Type, 8019 Sema::VariadicCallType CallType, bool InFunctionCall, 8020 llvm::SmallBitVector &CheckedVarArgs, 8021 UncoveredArgHandler &UncoveredArg, 8022 llvm::APSInt Offset, 8023 bool IgnoreStringsWithoutSpecifiers = false) { 8024 if (S.isConstantEvaluated()) 8025 return SLCT_NotALiteral; 8026 tryAgain: 8027 assert(Offset.isSigned() && "invalid offset"); 8028 8029 if (E->isTypeDependent() || E->isValueDependent()) 8030 return SLCT_NotALiteral; 8031 8032 E = E->IgnoreParenCasts(); 8033 8034 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 8035 // Technically -Wformat-nonliteral does not warn about this case. 8036 // The behavior of printf and friends in this case is implementation 8037 // dependent. Ideally if the format string cannot be null then 8038 // it should have a 'nonnull' attribute in the function prototype. 8039 return SLCT_UncheckedLiteral; 8040 8041 switch (E->getStmtClass()) { 8042 case Stmt::BinaryConditionalOperatorClass: 8043 case Stmt::ConditionalOperatorClass: { 8044 // The expression is a literal if both sub-expressions were, and it was 8045 // completely checked only if both sub-expressions were checked. 8046 const AbstractConditionalOperator *C = 8047 cast<AbstractConditionalOperator>(E); 8048 8049 // Determine whether it is necessary to check both sub-expressions, for 8050 // example, because the condition expression is a constant that can be 8051 // evaluated at compile time. 8052 bool CheckLeft = true, CheckRight = true; 8053 8054 bool Cond; 8055 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(), 8056 S.isConstantEvaluated())) { 8057 if (Cond) 8058 CheckRight = false; 8059 else 8060 CheckLeft = false; 8061 } 8062 8063 // We need to maintain the offsets for the right and the left hand side 8064 // separately to check if every possible indexed expression is a valid 8065 // string literal. They might have different offsets for different string 8066 // literals in the end. 8067 StringLiteralCheckType Left; 8068 if (!CheckLeft) 8069 Left = SLCT_UncheckedLiteral; 8070 else { 8071 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, 8072 HasVAListArg, format_idx, firstDataArg, 8073 Type, CallType, InFunctionCall, 8074 CheckedVarArgs, UncoveredArg, Offset, 8075 IgnoreStringsWithoutSpecifiers); 8076 if (Left == SLCT_NotALiteral || !CheckRight) { 8077 return Left; 8078 } 8079 } 8080 8081 StringLiteralCheckType Right = checkFormatStringExpr( 8082 S, C->getFalseExpr(), Args, HasVAListArg, format_idx, firstDataArg, 8083 Type, CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8084 IgnoreStringsWithoutSpecifiers); 8085 8086 return (CheckLeft && Left < Right) ? Left : Right; 8087 } 8088 8089 case Stmt::ImplicitCastExprClass: 8090 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 8091 goto tryAgain; 8092 8093 case Stmt::OpaqueValueExprClass: 8094 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 8095 E = src; 8096 goto tryAgain; 8097 } 8098 return SLCT_NotALiteral; 8099 8100 case Stmt::PredefinedExprClass: 8101 // While __func__, etc., are technically not string literals, they 8102 // cannot contain format specifiers and thus are not a security 8103 // liability. 8104 return SLCT_UncheckedLiteral; 8105 8106 case Stmt::DeclRefExprClass: { 8107 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 8108 8109 // As an exception, do not flag errors for variables binding to 8110 // const string literals. 8111 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 8112 bool isConstant = false; 8113 QualType T = DR->getType(); 8114 8115 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 8116 isConstant = AT->getElementType().isConstant(S.Context); 8117 } else if (const PointerType *PT = T->getAs<PointerType>()) { 8118 isConstant = T.isConstant(S.Context) && 8119 PT->getPointeeType().isConstant(S.Context); 8120 } else if (T->isObjCObjectPointerType()) { 8121 // In ObjC, there is usually no "const ObjectPointer" type, 8122 // so don't check if the pointee type is constant. 8123 isConstant = T.isConstant(S.Context); 8124 } 8125 8126 if (isConstant) { 8127 if (const Expr *Init = VD->getAnyInitializer()) { 8128 // Look through initializers like const char c[] = { "foo" } 8129 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 8130 if (InitList->isStringLiteralInit()) 8131 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 8132 } 8133 return checkFormatStringExpr(S, Init, Args, 8134 HasVAListArg, format_idx, 8135 firstDataArg, Type, CallType, 8136 /*InFunctionCall*/ false, CheckedVarArgs, 8137 UncoveredArg, Offset); 8138 } 8139 } 8140 8141 // For vprintf* functions (i.e., HasVAListArg==true), we add a 8142 // special check to see if the format string is a function parameter 8143 // of the function calling the printf function. If the function 8144 // has an attribute indicating it is a printf-like function, then we 8145 // should suppress warnings concerning non-literals being used in a call 8146 // to a vprintf function. For example: 8147 // 8148 // void 8149 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){ 8150 // va_list ap; 8151 // va_start(ap, fmt); 8152 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt". 8153 // ... 8154 // } 8155 if (HasVAListArg) { 8156 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) { 8157 if (const Decl *D = dyn_cast<Decl>(PV->getDeclContext())) { 8158 int PVIndex = PV->getFunctionScopeIndex() + 1; 8159 for (const auto *PVFormat : D->specific_attrs<FormatAttr>()) { 8160 // adjust for implicit parameter 8161 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) 8162 if (MD->isInstance()) 8163 ++PVIndex; 8164 // We also check if the formats are compatible. 8165 // We can't pass a 'scanf' string to a 'printf' function. 8166 if (PVIndex == PVFormat->getFormatIdx() && 8167 Type == S.GetFormatStringType(PVFormat)) 8168 return SLCT_UncheckedLiteral; 8169 } 8170 } 8171 } 8172 } 8173 } 8174 8175 return SLCT_NotALiteral; 8176 } 8177 8178 case Stmt::CallExprClass: 8179 case Stmt::CXXMemberCallExprClass: { 8180 const CallExpr *CE = cast<CallExpr>(E); 8181 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 8182 bool IsFirst = true; 8183 StringLiteralCheckType CommonResult; 8184 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 8185 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 8186 StringLiteralCheckType Result = checkFormatStringExpr( 8187 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 8188 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8189 IgnoreStringsWithoutSpecifiers); 8190 if (IsFirst) { 8191 CommonResult = Result; 8192 IsFirst = false; 8193 } 8194 } 8195 if (!IsFirst) 8196 return CommonResult; 8197 8198 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 8199 unsigned BuiltinID = FD->getBuiltinID(); 8200 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 8201 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 8202 const Expr *Arg = CE->getArg(0); 8203 return checkFormatStringExpr(S, Arg, Args, 8204 HasVAListArg, format_idx, 8205 firstDataArg, Type, CallType, 8206 InFunctionCall, CheckedVarArgs, 8207 UncoveredArg, Offset, 8208 IgnoreStringsWithoutSpecifiers); 8209 } 8210 } 8211 } 8212 8213 return SLCT_NotALiteral; 8214 } 8215 case Stmt::ObjCMessageExprClass: { 8216 const auto *ME = cast<ObjCMessageExpr>(E); 8217 if (const auto *MD = ME->getMethodDecl()) { 8218 if (const auto *FA = MD->getAttr<FormatArgAttr>()) { 8219 // As a special case heuristic, if we're using the method -[NSBundle 8220 // localizedStringForKey:value:table:], ignore any key strings that lack 8221 // format specifiers. The idea is that if the key doesn't have any 8222 // format specifiers then its probably just a key to map to the 8223 // localized strings. If it does have format specifiers though, then its 8224 // likely that the text of the key is the format string in the 8225 // programmer's language, and should be checked. 8226 const ObjCInterfaceDecl *IFace; 8227 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) && 8228 IFace->getIdentifier()->isStr("NSBundle") && 8229 MD->getSelector().isKeywordSelector( 8230 {"localizedStringForKey", "value", "table"})) { 8231 IgnoreStringsWithoutSpecifiers = true; 8232 } 8233 8234 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 8235 return checkFormatStringExpr( 8236 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 8237 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8238 IgnoreStringsWithoutSpecifiers); 8239 } 8240 } 8241 8242 return SLCT_NotALiteral; 8243 } 8244 case Stmt::ObjCStringLiteralClass: 8245 case Stmt::StringLiteralClass: { 8246 const StringLiteral *StrE = nullptr; 8247 8248 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 8249 StrE = ObjCFExpr->getString(); 8250 else 8251 StrE = cast<StringLiteral>(E); 8252 8253 if (StrE) { 8254 if (Offset.isNegative() || Offset > StrE->getLength()) { 8255 // TODO: It would be better to have an explicit warning for out of 8256 // bounds literals. 8257 return SLCT_NotALiteral; 8258 } 8259 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 8260 CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx, 8261 firstDataArg, Type, InFunctionCall, CallType, 8262 CheckedVarArgs, UncoveredArg, 8263 IgnoreStringsWithoutSpecifiers); 8264 return SLCT_CheckedLiteral; 8265 } 8266 8267 return SLCT_NotALiteral; 8268 } 8269 case Stmt::BinaryOperatorClass: { 8270 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 8271 8272 // A string literal + an int offset is still a string literal. 8273 if (BinOp->isAdditiveOp()) { 8274 Expr::EvalResult LResult, RResult; 8275 8276 bool LIsInt = BinOp->getLHS()->EvaluateAsInt( 8277 LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 8278 bool RIsInt = BinOp->getRHS()->EvaluateAsInt( 8279 RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 8280 8281 if (LIsInt != RIsInt) { 8282 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 8283 8284 if (LIsInt) { 8285 if (BinOpKind == BO_Add) { 8286 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt); 8287 E = BinOp->getRHS(); 8288 goto tryAgain; 8289 } 8290 } else { 8291 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt); 8292 E = BinOp->getLHS(); 8293 goto tryAgain; 8294 } 8295 } 8296 } 8297 8298 return SLCT_NotALiteral; 8299 } 8300 case Stmt::UnaryOperatorClass: { 8301 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 8302 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 8303 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 8304 Expr::EvalResult IndexResult; 8305 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context, 8306 Expr::SE_NoSideEffects, 8307 S.isConstantEvaluated())) { 8308 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add, 8309 /*RHS is int*/ true); 8310 E = ASE->getBase(); 8311 goto tryAgain; 8312 } 8313 } 8314 8315 return SLCT_NotALiteral; 8316 } 8317 8318 default: 8319 return SLCT_NotALiteral; 8320 } 8321 } 8322 8323 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 8324 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 8325 .Case("scanf", FST_Scanf) 8326 .Cases("printf", "printf0", FST_Printf) 8327 .Cases("NSString", "CFString", FST_NSString) 8328 .Case("strftime", FST_Strftime) 8329 .Case("strfmon", FST_Strfmon) 8330 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 8331 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 8332 .Case("os_trace", FST_OSLog) 8333 .Case("os_log", FST_OSLog) 8334 .Default(FST_Unknown); 8335 } 8336 8337 /// CheckFormatArguments - Check calls to printf and scanf (and similar 8338 /// functions) for correct use of format strings. 8339 /// Returns true if a format string has been fully checked. 8340 bool Sema::CheckFormatArguments(const FormatAttr *Format, 8341 ArrayRef<const Expr *> Args, 8342 bool IsCXXMember, 8343 VariadicCallType CallType, 8344 SourceLocation Loc, SourceRange Range, 8345 llvm::SmallBitVector &CheckedVarArgs) { 8346 FormatStringInfo FSI; 8347 if (getFormatStringInfo(Format, IsCXXMember, &FSI)) 8348 return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx, 8349 FSI.FirstDataArg, GetFormatStringType(Format), 8350 CallType, Loc, Range, CheckedVarArgs); 8351 return false; 8352 } 8353 8354 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 8355 bool HasVAListArg, unsigned format_idx, 8356 unsigned firstDataArg, FormatStringType Type, 8357 VariadicCallType CallType, 8358 SourceLocation Loc, SourceRange Range, 8359 llvm::SmallBitVector &CheckedVarArgs) { 8360 // CHECK: printf/scanf-like function is called with no format string. 8361 if (format_idx >= Args.size()) { 8362 Diag(Loc, diag::warn_missing_format_string) << Range; 8363 return false; 8364 } 8365 8366 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 8367 8368 // CHECK: format string is not a string literal. 8369 // 8370 // Dynamically generated format strings are difficult to 8371 // automatically vet at compile time. Requiring that format strings 8372 // are string literals: (1) permits the checking of format strings by 8373 // the compiler and thereby (2) can practically remove the source of 8374 // many format string exploits. 8375 8376 // Format string can be either ObjC string (e.g. @"%d") or 8377 // C string (e.g. "%d") 8378 // ObjC string uses the same format specifiers as C string, so we can use 8379 // the same format string checking logic for both ObjC and C strings. 8380 UncoveredArgHandler UncoveredArg; 8381 StringLiteralCheckType CT = 8382 checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg, 8383 format_idx, firstDataArg, Type, CallType, 8384 /*IsFunctionCall*/ true, CheckedVarArgs, 8385 UncoveredArg, 8386 /*no string offset*/ llvm::APSInt(64, false) = 0); 8387 8388 // Generate a diagnostic where an uncovered argument is detected. 8389 if (UncoveredArg.hasUncoveredArg()) { 8390 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 8391 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 8392 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 8393 } 8394 8395 if (CT != SLCT_NotALiteral) 8396 // Literal format string found, check done! 8397 return CT == SLCT_CheckedLiteral; 8398 8399 // Strftime is particular as it always uses a single 'time' argument, 8400 // so it is safe to pass a non-literal string. 8401 if (Type == FST_Strftime) 8402 return false; 8403 8404 // Do not emit diag when the string param is a macro expansion and the 8405 // format is either NSString or CFString. This is a hack to prevent 8406 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 8407 // which are usually used in place of NS and CF string literals. 8408 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 8409 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 8410 return false; 8411 8412 // If there are no arguments specified, warn with -Wformat-security, otherwise 8413 // warn only with -Wformat-nonliteral. 8414 if (Args.size() == firstDataArg) { 8415 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 8416 << OrigFormatExpr->getSourceRange(); 8417 switch (Type) { 8418 default: 8419 break; 8420 case FST_Kprintf: 8421 case FST_FreeBSDKPrintf: 8422 case FST_Printf: 8423 Diag(FormatLoc, diag::note_format_security_fixit) 8424 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 8425 break; 8426 case FST_NSString: 8427 Diag(FormatLoc, diag::note_format_security_fixit) 8428 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 8429 break; 8430 } 8431 } else { 8432 Diag(FormatLoc, diag::warn_format_nonliteral) 8433 << OrigFormatExpr->getSourceRange(); 8434 } 8435 return false; 8436 } 8437 8438 namespace { 8439 8440 class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 8441 protected: 8442 Sema &S; 8443 const FormatStringLiteral *FExpr; 8444 const Expr *OrigFormatExpr; 8445 const Sema::FormatStringType FSType; 8446 const unsigned FirstDataArg; 8447 const unsigned NumDataArgs; 8448 const char *Beg; // Start of format string. 8449 const bool HasVAListArg; 8450 ArrayRef<const Expr *> Args; 8451 unsigned FormatIdx; 8452 llvm::SmallBitVector CoveredArgs; 8453 bool usesPositionalArgs = false; 8454 bool atFirstArg = true; 8455 bool inFunctionCall; 8456 Sema::VariadicCallType CallType; 8457 llvm::SmallBitVector &CheckedVarArgs; 8458 UncoveredArgHandler &UncoveredArg; 8459 8460 public: 8461 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 8462 const Expr *origFormatExpr, 8463 const Sema::FormatStringType type, unsigned firstDataArg, 8464 unsigned numDataArgs, const char *beg, bool hasVAListArg, 8465 ArrayRef<const Expr *> Args, unsigned formatIdx, 8466 bool inFunctionCall, Sema::VariadicCallType callType, 8467 llvm::SmallBitVector &CheckedVarArgs, 8468 UncoveredArgHandler &UncoveredArg) 8469 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 8470 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 8471 HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx), 8472 inFunctionCall(inFunctionCall), CallType(callType), 8473 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 8474 CoveredArgs.resize(numDataArgs); 8475 CoveredArgs.reset(); 8476 } 8477 8478 void DoneProcessing(); 8479 8480 void HandleIncompleteSpecifier(const char *startSpecifier, 8481 unsigned specifierLen) override; 8482 8483 void HandleInvalidLengthModifier( 8484 const analyze_format_string::FormatSpecifier &FS, 8485 const analyze_format_string::ConversionSpecifier &CS, 8486 const char *startSpecifier, unsigned specifierLen, 8487 unsigned DiagID); 8488 8489 void HandleNonStandardLengthModifier( 8490 const analyze_format_string::FormatSpecifier &FS, 8491 const char *startSpecifier, unsigned specifierLen); 8492 8493 void HandleNonStandardConversionSpecifier( 8494 const analyze_format_string::ConversionSpecifier &CS, 8495 const char *startSpecifier, unsigned specifierLen); 8496 8497 void HandlePosition(const char *startPos, unsigned posLen) override; 8498 8499 void HandleInvalidPosition(const char *startSpecifier, 8500 unsigned specifierLen, 8501 analyze_format_string::PositionContext p) override; 8502 8503 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 8504 8505 void HandleNullChar(const char *nullCharacter) override; 8506 8507 template <typename Range> 8508 static void 8509 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 8510 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 8511 bool IsStringLocation, Range StringRange, 8512 ArrayRef<FixItHint> Fixit = None); 8513 8514 protected: 8515 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 8516 const char *startSpec, 8517 unsigned specifierLen, 8518 const char *csStart, unsigned csLen); 8519 8520 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 8521 const char *startSpec, 8522 unsigned specifierLen); 8523 8524 SourceRange getFormatStringRange(); 8525 CharSourceRange getSpecifierRange(const char *startSpecifier, 8526 unsigned specifierLen); 8527 SourceLocation getLocationOfByte(const char *x); 8528 8529 const Expr *getDataArg(unsigned i) const; 8530 8531 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 8532 const analyze_format_string::ConversionSpecifier &CS, 8533 const char *startSpecifier, unsigned specifierLen, 8534 unsigned argIndex); 8535 8536 template <typename Range> 8537 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 8538 bool IsStringLocation, Range StringRange, 8539 ArrayRef<FixItHint> Fixit = None); 8540 }; 8541 8542 } // namespace 8543 8544 SourceRange CheckFormatHandler::getFormatStringRange() { 8545 return OrigFormatExpr->getSourceRange(); 8546 } 8547 8548 CharSourceRange CheckFormatHandler:: 8549 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 8550 SourceLocation Start = getLocationOfByte(startSpecifier); 8551 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 8552 8553 // Advance the end SourceLocation by one due to half-open ranges. 8554 End = End.getLocWithOffset(1); 8555 8556 return CharSourceRange::getCharRange(Start, End); 8557 } 8558 8559 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 8560 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 8561 S.getLangOpts(), S.Context.getTargetInfo()); 8562 } 8563 8564 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 8565 unsigned specifierLen){ 8566 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 8567 getLocationOfByte(startSpecifier), 8568 /*IsStringLocation*/true, 8569 getSpecifierRange(startSpecifier, specifierLen)); 8570 } 8571 8572 void CheckFormatHandler::HandleInvalidLengthModifier( 8573 const analyze_format_string::FormatSpecifier &FS, 8574 const analyze_format_string::ConversionSpecifier &CS, 8575 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 8576 using namespace analyze_format_string; 8577 8578 const LengthModifier &LM = FS.getLengthModifier(); 8579 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 8580 8581 // See if we know how to fix this length modifier. 8582 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 8583 if (FixedLM) { 8584 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 8585 getLocationOfByte(LM.getStart()), 8586 /*IsStringLocation*/true, 8587 getSpecifierRange(startSpecifier, specifierLen)); 8588 8589 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 8590 << FixedLM->toString() 8591 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 8592 8593 } else { 8594 FixItHint Hint; 8595 if (DiagID == diag::warn_format_nonsensical_length) 8596 Hint = FixItHint::CreateRemoval(LMRange); 8597 8598 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 8599 getLocationOfByte(LM.getStart()), 8600 /*IsStringLocation*/true, 8601 getSpecifierRange(startSpecifier, specifierLen), 8602 Hint); 8603 } 8604 } 8605 8606 void CheckFormatHandler::HandleNonStandardLengthModifier( 8607 const analyze_format_string::FormatSpecifier &FS, 8608 const char *startSpecifier, unsigned specifierLen) { 8609 using namespace analyze_format_string; 8610 8611 const LengthModifier &LM = FS.getLengthModifier(); 8612 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 8613 8614 // See if we know how to fix this length modifier. 8615 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 8616 if (FixedLM) { 8617 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8618 << LM.toString() << 0, 8619 getLocationOfByte(LM.getStart()), 8620 /*IsStringLocation*/true, 8621 getSpecifierRange(startSpecifier, specifierLen)); 8622 8623 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 8624 << FixedLM->toString() 8625 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 8626 8627 } else { 8628 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8629 << LM.toString() << 0, 8630 getLocationOfByte(LM.getStart()), 8631 /*IsStringLocation*/true, 8632 getSpecifierRange(startSpecifier, specifierLen)); 8633 } 8634 } 8635 8636 void CheckFormatHandler::HandleNonStandardConversionSpecifier( 8637 const analyze_format_string::ConversionSpecifier &CS, 8638 const char *startSpecifier, unsigned specifierLen) { 8639 using namespace analyze_format_string; 8640 8641 // See if we know how to fix this conversion specifier. 8642 Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 8643 if (FixedCS) { 8644 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8645 << CS.toString() << /*conversion specifier*/1, 8646 getLocationOfByte(CS.getStart()), 8647 /*IsStringLocation*/true, 8648 getSpecifierRange(startSpecifier, specifierLen)); 8649 8650 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 8651 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 8652 << FixedCS->toString() 8653 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 8654 } else { 8655 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8656 << CS.toString() << /*conversion specifier*/1, 8657 getLocationOfByte(CS.getStart()), 8658 /*IsStringLocation*/true, 8659 getSpecifierRange(startSpecifier, specifierLen)); 8660 } 8661 } 8662 8663 void CheckFormatHandler::HandlePosition(const char *startPos, 8664 unsigned posLen) { 8665 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 8666 getLocationOfByte(startPos), 8667 /*IsStringLocation*/true, 8668 getSpecifierRange(startPos, posLen)); 8669 } 8670 8671 void 8672 CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, 8673 analyze_format_string::PositionContext p) { 8674 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) 8675 << (unsigned) p, 8676 getLocationOfByte(startPos), /*IsStringLocation*/true, 8677 getSpecifierRange(startPos, posLen)); 8678 } 8679 8680 void CheckFormatHandler::HandleZeroPosition(const char *startPos, 8681 unsigned posLen) { 8682 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 8683 getLocationOfByte(startPos), 8684 /*IsStringLocation*/true, 8685 getSpecifierRange(startPos, posLen)); 8686 } 8687 8688 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 8689 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 8690 // The presence of a null character is likely an error. 8691 EmitFormatDiagnostic( 8692 S.PDiag(diag::warn_printf_format_string_contains_null_char), 8693 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 8694 getFormatStringRange()); 8695 } 8696 } 8697 8698 // Note that this may return NULL if there was an error parsing or building 8699 // one of the argument expressions. 8700 const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 8701 return Args[FirstDataArg + i]; 8702 } 8703 8704 void CheckFormatHandler::DoneProcessing() { 8705 // Does the number of data arguments exceed the number of 8706 // format conversions in the format string? 8707 if (!HasVAListArg) { 8708 // Find any arguments that weren't covered. 8709 CoveredArgs.flip(); 8710 signed notCoveredArg = CoveredArgs.find_first(); 8711 if (notCoveredArg >= 0) { 8712 assert((unsigned)notCoveredArg < NumDataArgs); 8713 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 8714 } else { 8715 UncoveredArg.setAllCovered(); 8716 } 8717 } 8718 } 8719 8720 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 8721 const Expr *ArgExpr) { 8722 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 && 8723 "Invalid state"); 8724 8725 if (!ArgExpr) 8726 return; 8727 8728 SourceLocation Loc = ArgExpr->getBeginLoc(); 8729 8730 if (S.getSourceManager().isInSystemMacro(Loc)) 8731 return; 8732 8733 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 8734 for (auto E : DiagnosticExprs) 8735 PDiag << E->getSourceRange(); 8736 8737 CheckFormatHandler::EmitFormatDiagnostic( 8738 S, IsFunctionCall, DiagnosticExprs[0], 8739 PDiag, Loc, /*IsStringLocation*/false, 8740 DiagnosticExprs[0]->getSourceRange()); 8741 } 8742 8743 bool 8744 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 8745 SourceLocation Loc, 8746 const char *startSpec, 8747 unsigned specifierLen, 8748 const char *csStart, 8749 unsigned csLen) { 8750 bool keepGoing = true; 8751 if (argIndex < NumDataArgs) { 8752 // Consider the argument coverered, even though the specifier doesn't 8753 // make sense. 8754 CoveredArgs.set(argIndex); 8755 } 8756 else { 8757 // If argIndex exceeds the number of data arguments we 8758 // don't issue a warning because that is just a cascade of warnings (and 8759 // they may have intended '%%' anyway). We don't want to continue processing 8760 // the format string after this point, however, as we will like just get 8761 // gibberish when trying to match arguments. 8762 keepGoing = false; 8763 } 8764 8765 StringRef Specifier(csStart, csLen); 8766 8767 // If the specifier in non-printable, it could be the first byte of a UTF-8 8768 // sequence. In that case, print the UTF-8 code point. If not, print the byte 8769 // hex value. 8770 std::string CodePointStr; 8771 if (!llvm::sys::locale::isPrint(*csStart)) { 8772 llvm::UTF32 CodePoint; 8773 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 8774 const llvm::UTF8 *E = 8775 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 8776 llvm::ConversionResult Result = 8777 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 8778 8779 if (Result != llvm::conversionOK) { 8780 unsigned char FirstChar = *csStart; 8781 CodePoint = (llvm::UTF32)FirstChar; 8782 } 8783 8784 llvm::raw_string_ostream OS(CodePointStr); 8785 if (CodePoint < 256) 8786 OS << "\\x" << llvm::format("%02x", CodePoint); 8787 else if (CodePoint <= 0xFFFF) 8788 OS << "\\u" << llvm::format("%04x", CodePoint); 8789 else 8790 OS << "\\U" << llvm::format("%08x", CodePoint); 8791 OS.flush(); 8792 Specifier = CodePointStr; 8793 } 8794 8795 EmitFormatDiagnostic( 8796 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 8797 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 8798 8799 return keepGoing; 8800 } 8801 8802 void 8803 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 8804 const char *startSpec, 8805 unsigned specifierLen) { 8806 EmitFormatDiagnostic( 8807 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 8808 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 8809 } 8810 8811 bool 8812 CheckFormatHandler::CheckNumArgs( 8813 const analyze_format_string::FormatSpecifier &FS, 8814 const analyze_format_string::ConversionSpecifier &CS, 8815 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 8816 8817 if (argIndex >= NumDataArgs) { 8818 PartialDiagnostic PDiag = FS.usesPositionalArg() 8819 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 8820 << (argIndex+1) << NumDataArgs) 8821 : S.PDiag(diag::warn_printf_insufficient_data_args); 8822 EmitFormatDiagnostic( 8823 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 8824 getSpecifierRange(startSpecifier, specifierLen)); 8825 8826 // Since more arguments than conversion tokens are given, by extension 8827 // all arguments are covered, so mark this as so. 8828 UncoveredArg.setAllCovered(); 8829 return false; 8830 } 8831 return true; 8832 } 8833 8834 template<typename Range> 8835 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 8836 SourceLocation Loc, 8837 bool IsStringLocation, 8838 Range StringRange, 8839 ArrayRef<FixItHint> FixIt) { 8840 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 8841 Loc, IsStringLocation, StringRange, FixIt); 8842 } 8843 8844 /// If the format string is not within the function call, emit a note 8845 /// so that the function call and string are in diagnostic messages. 8846 /// 8847 /// \param InFunctionCall if true, the format string is within the function 8848 /// call and only one diagnostic message will be produced. Otherwise, an 8849 /// extra note will be emitted pointing to location of the format string. 8850 /// 8851 /// \param ArgumentExpr the expression that is passed as the format string 8852 /// argument in the function call. Used for getting locations when two 8853 /// diagnostics are emitted. 8854 /// 8855 /// \param PDiag the callee should already have provided any strings for the 8856 /// diagnostic message. This function only adds locations and fixits 8857 /// to diagnostics. 8858 /// 8859 /// \param Loc primary location for diagnostic. If two diagnostics are 8860 /// required, one will be at Loc and a new SourceLocation will be created for 8861 /// the other one. 8862 /// 8863 /// \param IsStringLocation if true, Loc points to the format string should be 8864 /// used for the note. Otherwise, Loc points to the argument list and will 8865 /// be used with PDiag. 8866 /// 8867 /// \param StringRange some or all of the string to highlight. This is 8868 /// templated so it can accept either a CharSourceRange or a SourceRange. 8869 /// 8870 /// \param FixIt optional fix it hint for the format string. 8871 template <typename Range> 8872 void CheckFormatHandler::EmitFormatDiagnostic( 8873 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 8874 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 8875 Range StringRange, ArrayRef<FixItHint> FixIt) { 8876 if (InFunctionCall) { 8877 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 8878 D << StringRange; 8879 D << FixIt; 8880 } else { 8881 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 8882 << ArgumentExpr->getSourceRange(); 8883 8884 const Sema::SemaDiagnosticBuilder &Note = 8885 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 8886 diag::note_format_string_defined); 8887 8888 Note << StringRange; 8889 Note << FixIt; 8890 } 8891 } 8892 8893 //===--- CHECK: Printf format string checking ------------------------------===// 8894 8895 namespace { 8896 8897 class CheckPrintfHandler : public CheckFormatHandler { 8898 public: 8899 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 8900 const Expr *origFormatExpr, 8901 const Sema::FormatStringType type, unsigned firstDataArg, 8902 unsigned numDataArgs, bool isObjC, const char *beg, 8903 bool hasVAListArg, ArrayRef<const Expr *> Args, 8904 unsigned formatIdx, bool inFunctionCall, 8905 Sema::VariadicCallType CallType, 8906 llvm::SmallBitVector &CheckedVarArgs, 8907 UncoveredArgHandler &UncoveredArg) 8908 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 8909 numDataArgs, beg, hasVAListArg, Args, formatIdx, 8910 inFunctionCall, CallType, CheckedVarArgs, 8911 UncoveredArg) {} 8912 8913 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 8914 8915 /// Returns true if '%@' specifiers are allowed in the format string. 8916 bool allowsObjCArg() const { 8917 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 8918 FSType == Sema::FST_OSTrace; 8919 } 8920 8921 bool HandleInvalidPrintfConversionSpecifier( 8922 const analyze_printf::PrintfSpecifier &FS, 8923 const char *startSpecifier, 8924 unsigned specifierLen) override; 8925 8926 void handleInvalidMaskType(StringRef MaskType) override; 8927 8928 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 8929 const char *startSpecifier, unsigned specifierLen, 8930 const TargetInfo &Target) override; 8931 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 8932 const char *StartSpecifier, 8933 unsigned SpecifierLen, 8934 const Expr *E); 8935 8936 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 8937 const char *startSpecifier, unsigned specifierLen); 8938 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 8939 const analyze_printf::OptionalAmount &Amt, 8940 unsigned type, 8941 const char *startSpecifier, unsigned specifierLen); 8942 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 8943 const analyze_printf::OptionalFlag &flag, 8944 const char *startSpecifier, unsigned specifierLen); 8945 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 8946 const analyze_printf::OptionalFlag &ignoredFlag, 8947 const analyze_printf::OptionalFlag &flag, 8948 const char *startSpecifier, unsigned specifierLen); 8949 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 8950 const Expr *E); 8951 8952 void HandleEmptyObjCModifierFlag(const char *startFlag, 8953 unsigned flagLen) override; 8954 8955 void HandleInvalidObjCModifierFlag(const char *startFlag, 8956 unsigned flagLen) override; 8957 8958 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 8959 const char *flagsEnd, 8960 const char *conversionPosition) 8961 override; 8962 }; 8963 8964 } // namespace 8965 8966 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 8967 const analyze_printf::PrintfSpecifier &FS, 8968 const char *startSpecifier, 8969 unsigned specifierLen) { 8970 const analyze_printf::PrintfConversionSpecifier &CS = 8971 FS.getConversionSpecifier(); 8972 8973 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 8974 getLocationOfByte(CS.getStart()), 8975 startSpecifier, specifierLen, 8976 CS.getStart(), CS.getLength()); 8977 } 8978 8979 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 8980 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 8981 } 8982 8983 bool CheckPrintfHandler::HandleAmount( 8984 const analyze_format_string::OptionalAmount &Amt, 8985 unsigned k, const char *startSpecifier, 8986 unsigned specifierLen) { 8987 if (Amt.hasDataArgument()) { 8988 if (!HasVAListArg) { 8989 unsigned argIndex = Amt.getArgIndex(); 8990 if (argIndex >= NumDataArgs) { 8991 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 8992 << k, 8993 getLocationOfByte(Amt.getStart()), 8994 /*IsStringLocation*/true, 8995 getSpecifierRange(startSpecifier, specifierLen)); 8996 // Don't do any more checking. We will just emit 8997 // spurious errors. 8998 return false; 8999 } 9000 9001 // Type check the data argument. It should be an 'int'. 9002 // Although not in conformance with C99, we also allow the argument to be 9003 // an 'unsigned int' as that is a reasonably safe case. GCC also 9004 // doesn't emit a warning for that case. 9005 CoveredArgs.set(argIndex); 9006 const Expr *Arg = getDataArg(argIndex); 9007 if (!Arg) 9008 return false; 9009 9010 QualType T = Arg->getType(); 9011 9012 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 9013 assert(AT.isValid()); 9014 9015 if (!AT.matchesType(S.Context, T)) { 9016 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 9017 << k << AT.getRepresentativeTypeName(S.Context) 9018 << T << Arg->getSourceRange(), 9019 getLocationOfByte(Amt.getStart()), 9020 /*IsStringLocation*/true, 9021 getSpecifierRange(startSpecifier, specifierLen)); 9022 // Don't do any more checking. We will just emit 9023 // spurious errors. 9024 return false; 9025 } 9026 } 9027 } 9028 return true; 9029 } 9030 9031 void CheckPrintfHandler::HandleInvalidAmount( 9032 const analyze_printf::PrintfSpecifier &FS, 9033 const analyze_printf::OptionalAmount &Amt, 9034 unsigned type, 9035 const char *startSpecifier, 9036 unsigned specifierLen) { 9037 const analyze_printf::PrintfConversionSpecifier &CS = 9038 FS.getConversionSpecifier(); 9039 9040 FixItHint fixit = 9041 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 9042 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 9043 Amt.getConstantLength())) 9044 : FixItHint(); 9045 9046 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 9047 << type << CS.toString(), 9048 getLocationOfByte(Amt.getStart()), 9049 /*IsStringLocation*/true, 9050 getSpecifierRange(startSpecifier, specifierLen), 9051 fixit); 9052 } 9053 9054 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 9055 const analyze_printf::OptionalFlag &flag, 9056 const char *startSpecifier, 9057 unsigned specifierLen) { 9058 // Warn about pointless flag with a fixit removal. 9059 const analyze_printf::PrintfConversionSpecifier &CS = 9060 FS.getConversionSpecifier(); 9061 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 9062 << flag.toString() << CS.toString(), 9063 getLocationOfByte(flag.getPosition()), 9064 /*IsStringLocation*/true, 9065 getSpecifierRange(startSpecifier, specifierLen), 9066 FixItHint::CreateRemoval( 9067 getSpecifierRange(flag.getPosition(), 1))); 9068 } 9069 9070 void CheckPrintfHandler::HandleIgnoredFlag( 9071 const analyze_printf::PrintfSpecifier &FS, 9072 const analyze_printf::OptionalFlag &ignoredFlag, 9073 const analyze_printf::OptionalFlag &flag, 9074 const char *startSpecifier, 9075 unsigned specifierLen) { 9076 // Warn about ignored flag with a fixit removal. 9077 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 9078 << ignoredFlag.toString() << flag.toString(), 9079 getLocationOfByte(ignoredFlag.getPosition()), 9080 /*IsStringLocation*/true, 9081 getSpecifierRange(startSpecifier, specifierLen), 9082 FixItHint::CreateRemoval( 9083 getSpecifierRange(ignoredFlag.getPosition(), 1))); 9084 } 9085 9086 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 9087 unsigned flagLen) { 9088 // Warn about an empty flag. 9089 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 9090 getLocationOfByte(startFlag), 9091 /*IsStringLocation*/true, 9092 getSpecifierRange(startFlag, flagLen)); 9093 } 9094 9095 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 9096 unsigned flagLen) { 9097 // Warn about an invalid flag. 9098 auto Range = getSpecifierRange(startFlag, flagLen); 9099 StringRef flag(startFlag, flagLen); 9100 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 9101 getLocationOfByte(startFlag), 9102 /*IsStringLocation*/true, 9103 Range, FixItHint::CreateRemoval(Range)); 9104 } 9105 9106 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 9107 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 9108 // Warn about using '[...]' without a '@' conversion. 9109 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 9110 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 9111 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 9112 getLocationOfByte(conversionPosition), 9113 /*IsStringLocation*/true, 9114 Range, FixItHint::CreateRemoval(Range)); 9115 } 9116 9117 // Determines if the specified is a C++ class or struct containing 9118 // a member with the specified name and kind (e.g. a CXXMethodDecl named 9119 // "c_str()"). 9120 template<typename MemberKind> 9121 static llvm::SmallPtrSet<MemberKind*, 1> 9122 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 9123 const RecordType *RT = Ty->getAs<RecordType>(); 9124 llvm::SmallPtrSet<MemberKind*, 1> Results; 9125 9126 if (!RT) 9127 return Results; 9128 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 9129 if (!RD || !RD->getDefinition()) 9130 return Results; 9131 9132 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 9133 Sema::LookupMemberName); 9134 R.suppressDiagnostics(); 9135 9136 // We just need to include all members of the right kind turned up by the 9137 // filter, at this point. 9138 if (S.LookupQualifiedName(R, RT->getDecl())) 9139 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 9140 NamedDecl *decl = (*I)->getUnderlyingDecl(); 9141 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 9142 Results.insert(FK); 9143 } 9144 return Results; 9145 } 9146 9147 /// Check if we could call '.c_str()' on an object. 9148 /// 9149 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 9150 /// allow the call, or if it would be ambiguous). 9151 bool Sema::hasCStrMethod(const Expr *E) { 9152 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 9153 9154 MethodSet Results = 9155 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 9156 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 9157 MI != ME; ++MI) 9158 if ((*MI)->getMinRequiredArguments() == 0) 9159 return true; 9160 return false; 9161 } 9162 9163 // Check if a (w)string was passed when a (w)char* was needed, and offer a 9164 // better diagnostic if so. AT is assumed to be valid. 9165 // Returns true when a c_str() conversion method is found. 9166 bool CheckPrintfHandler::checkForCStrMembers( 9167 const analyze_printf::ArgType &AT, const Expr *E) { 9168 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 9169 9170 MethodSet Results = 9171 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 9172 9173 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 9174 MI != ME; ++MI) { 9175 const CXXMethodDecl *Method = *MI; 9176 if (Method->getMinRequiredArguments() == 0 && 9177 AT.matchesType(S.Context, Method->getReturnType())) { 9178 // FIXME: Suggest parens if the expression needs them. 9179 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 9180 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 9181 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 9182 return true; 9183 } 9184 } 9185 9186 return false; 9187 } 9188 9189 bool CheckPrintfHandler::HandlePrintfSpecifier( 9190 const analyze_printf::PrintfSpecifier &FS, const char *startSpecifier, 9191 unsigned specifierLen, const TargetInfo &Target) { 9192 using namespace analyze_format_string; 9193 using namespace analyze_printf; 9194 9195 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 9196 9197 if (FS.consumesDataArgument()) { 9198 if (atFirstArg) { 9199 atFirstArg = false; 9200 usesPositionalArgs = FS.usesPositionalArg(); 9201 } 9202 else if (usesPositionalArgs != FS.usesPositionalArg()) { 9203 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 9204 startSpecifier, specifierLen); 9205 return false; 9206 } 9207 } 9208 9209 // First check if the field width, precision, and conversion specifier 9210 // have matching data arguments. 9211 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 9212 startSpecifier, specifierLen)) { 9213 return false; 9214 } 9215 9216 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 9217 startSpecifier, specifierLen)) { 9218 return false; 9219 } 9220 9221 if (!CS.consumesDataArgument()) { 9222 // FIXME: Technically specifying a precision or field width here 9223 // makes no sense. Worth issuing a warning at some point. 9224 return true; 9225 } 9226 9227 // Consume the argument. 9228 unsigned argIndex = FS.getArgIndex(); 9229 if (argIndex < NumDataArgs) { 9230 // The check to see if the argIndex is valid will come later. 9231 // We set the bit here because we may exit early from this 9232 // function if we encounter some other error. 9233 CoveredArgs.set(argIndex); 9234 } 9235 9236 // FreeBSD kernel extensions. 9237 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 9238 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 9239 // We need at least two arguments. 9240 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 9241 return false; 9242 9243 // Claim the second argument. 9244 CoveredArgs.set(argIndex + 1); 9245 9246 // Type check the first argument (int for %b, pointer for %D) 9247 const Expr *Ex = getDataArg(argIndex); 9248 const analyze_printf::ArgType &AT = 9249 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? 9250 ArgType(S.Context.IntTy) : ArgType::CPointerTy; 9251 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 9252 EmitFormatDiagnostic( 9253 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 9254 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 9255 << false << Ex->getSourceRange(), 9256 Ex->getBeginLoc(), /*IsStringLocation*/ false, 9257 getSpecifierRange(startSpecifier, specifierLen)); 9258 9259 // Type check the second argument (char * for both %b and %D) 9260 Ex = getDataArg(argIndex + 1); 9261 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 9262 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 9263 EmitFormatDiagnostic( 9264 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 9265 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 9266 << false << Ex->getSourceRange(), 9267 Ex->getBeginLoc(), /*IsStringLocation*/ false, 9268 getSpecifierRange(startSpecifier, specifierLen)); 9269 9270 return true; 9271 } 9272 9273 // Check for using an Objective-C specific conversion specifier 9274 // in a non-ObjC literal. 9275 if (!allowsObjCArg() && CS.isObjCArg()) { 9276 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9277 specifierLen); 9278 } 9279 9280 // %P can only be used with os_log. 9281 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 9282 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9283 specifierLen); 9284 } 9285 9286 // %n is not allowed with os_log. 9287 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 9288 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 9289 getLocationOfByte(CS.getStart()), 9290 /*IsStringLocation*/ false, 9291 getSpecifierRange(startSpecifier, specifierLen)); 9292 9293 return true; 9294 } 9295 9296 // Only scalars are allowed for os_trace. 9297 if (FSType == Sema::FST_OSTrace && 9298 (CS.getKind() == ConversionSpecifier::PArg || 9299 CS.getKind() == ConversionSpecifier::sArg || 9300 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 9301 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9302 specifierLen); 9303 } 9304 9305 // Check for use of public/private annotation outside of os_log(). 9306 if (FSType != Sema::FST_OSLog) { 9307 if (FS.isPublic().isSet()) { 9308 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 9309 << "public", 9310 getLocationOfByte(FS.isPublic().getPosition()), 9311 /*IsStringLocation*/ false, 9312 getSpecifierRange(startSpecifier, specifierLen)); 9313 } 9314 if (FS.isPrivate().isSet()) { 9315 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 9316 << "private", 9317 getLocationOfByte(FS.isPrivate().getPosition()), 9318 /*IsStringLocation*/ false, 9319 getSpecifierRange(startSpecifier, specifierLen)); 9320 } 9321 } 9322 9323 const llvm::Triple &Triple = Target.getTriple(); 9324 if (CS.getKind() == ConversionSpecifier::nArg && 9325 (Triple.isAndroid() || Triple.isOSFuchsia())) { 9326 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_narg_not_supported), 9327 getLocationOfByte(CS.getStart()), 9328 /*IsStringLocation*/ false, 9329 getSpecifierRange(startSpecifier, specifierLen)); 9330 } 9331 9332 // Check for invalid use of field width 9333 if (!FS.hasValidFieldWidth()) { 9334 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 9335 startSpecifier, specifierLen); 9336 } 9337 9338 // Check for invalid use of precision 9339 if (!FS.hasValidPrecision()) { 9340 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 9341 startSpecifier, specifierLen); 9342 } 9343 9344 // Precision is mandatory for %P specifier. 9345 if (CS.getKind() == ConversionSpecifier::PArg && 9346 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 9347 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 9348 getLocationOfByte(startSpecifier), 9349 /*IsStringLocation*/ false, 9350 getSpecifierRange(startSpecifier, specifierLen)); 9351 } 9352 9353 // Check each flag does not conflict with any other component. 9354 if (!FS.hasValidThousandsGroupingPrefix()) 9355 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 9356 if (!FS.hasValidLeadingZeros()) 9357 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 9358 if (!FS.hasValidPlusPrefix()) 9359 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 9360 if (!FS.hasValidSpacePrefix()) 9361 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 9362 if (!FS.hasValidAlternativeForm()) 9363 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 9364 if (!FS.hasValidLeftJustified()) 9365 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 9366 9367 // Check that flags are not ignored by another flag 9368 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 9369 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 9370 startSpecifier, specifierLen); 9371 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 9372 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 9373 startSpecifier, specifierLen); 9374 9375 // Check the length modifier is valid with the given conversion specifier. 9376 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 9377 S.getLangOpts())) 9378 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9379 diag::warn_format_nonsensical_length); 9380 else if (!FS.hasStandardLengthModifier()) 9381 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 9382 else if (!FS.hasStandardLengthConversionCombination()) 9383 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9384 diag::warn_format_non_standard_conversion_spec); 9385 9386 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 9387 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 9388 9389 // The remaining checks depend on the data arguments. 9390 if (HasVAListArg) 9391 return true; 9392 9393 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 9394 return false; 9395 9396 const Expr *Arg = getDataArg(argIndex); 9397 if (!Arg) 9398 return true; 9399 9400 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 9401 } 9402 9403 static bool requiresParensToAddCast(const Expr *E) { 9404 // FIXME: We should have a general way to reason about operator 9405 // precedence and whether parens are actually needed here. 9406 // Take care of a few common cases where they aren't. 9407 const Expr *Inside = E->IgnoreImpCasts(); 9408 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 9409 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 9410 9411 switch (Inside->getStmtClass()) { 9412 case Stmt::ArraySubscriptExprClass: 9413 case Stmt::CallExprClass: 9414 case Stmt::CharacterLiteralClass: 9415 case Stmt::CXXBoolLiteralExprClass: 9416 case Stmt::DeclRefExprClass: 9417 case Stmt::FloatingLiteralClass: 9418 case Stmt::IntegerLiteralClass: 9419 case Stmt::MemberExprClass: 9420 case Stmt::ObjCArrayLiteralClass: 9421 case Stmt::ObjCBoolLiteralExprClass: 9422 case Stmt::ObjCBoxedExprClass: 9423 case Stmt::ObjCDictionaryLiteralClass: 9424 case Stmt::ObjCEncodeExprClass: 9425 case Stmt::ObjCIvarRefExprClass: 9426 case Stmt::ObjCMessageExprClass: 9427 case Stmt::ObjCPropertyRefExprClass: 9428 case Stmt::ObjCStringLiteralClass: 9429 case Stmt::ObjCSubscriptRefExprClass: 9430 case Stmt::ParenExprClass: 9431 case Stmt::StringLiteralClass: 9432 case Stmt::UnaryOperatorClass: 9433 return false; 9434 default: 9435 return true; 9436 } 9437 } 9438 9439 static std::pair<QualType, StringRef> 9440 shouldNotPrintDirectly(const ASTContext &Context, 9441 QualType IntendedTy, 9442 const Expr *E) { 9443 // Use a 'while' to peel off layers of typedefs. 9444 QualType TyTy = IntendedTy; 9445 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 9446 StringRef Name = UserTy->getDecl()->getName(); 9447 QualType CastTy = llvm::StringSwitch<QualType>(Name) 9448 .Case("CFIndex", Context.getNSIntegerType()) 9449 .Case("NSInteger", Context.getNSIntegerType()) 9450 .Case("NSUInteger", Context.getNSUIntegerType()) 9451 .Case("SInt32", Context.IntTy) 9452 .Case("UInt32", Context.UnsignedIntTy) 9453 .Default(QualType()); 9454 9455 if (!CastTy.isNull()) 9456 return std::make_pair(CastTy, Name); 9457 9458 TyTy = UserTy->desugar(); 9459 } 9460 9461 // Strip parens if necessary. 9462 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 9463 return shouldNotPrintDirectly(Context, 9464 PE->getSubExpr()->getType(), 9465 PE->getSubExpr()); 9466 9467 // If this is a conditional expression, then its result type is constructed 9468 // via usual arithmetic conversions and thus there might be no necessary 9469 // typedef sugar there. Recurse to operands to check for NSInteger & 9470 // Co. usage condition. 9471 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 9472 QualType TrueTy, FalseTy; 9473 StringRef TrueName, FalseName; 9474 9475 std::tie(TrueTy, TrueName) = 9476 shouldNotPrintDirectly(Context, 9477 CO->getTrueExpr()->getType(), 9478 CO->getTrueExpr()); 9479 std::tie(FalseTy, FalseName) = 9480 shouldNotPrintDirectly(Context, 9481 CO->getFalseExpr()->getType(), 9482 CO->getFalseExpr()); 9483 9484 if (TrueTy == FalseTy) 9485 return std::make_pair(TrueTy, TrueName); 9486 else if (TrueTy.isNull()) 9487 return std::make_pair(FalseTy, FalseName); 9488 else if (FalseTy.isNull()) 9489 return std::make_pair(TrueTy, TrueName); 9490 } 9491 9492 return std::make_pair(QualType(), StringRef()); 9493 } 9494 9495 /// Return true if \p ICE is an implicit argument promotion of an arithmetic 9496 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked 9497 /// type do not count. 9498 static bool 9499 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { 9500 QualType From = ICE->getSubExpr()->getType(); 9501 QualType To = ICE->getType(); 9502 // It's an integer promotion if the destination type is the promoted 9503 // source type. 9504 if (ICE->getCastKind() == CK_IntegralCast && 9505 From->isPromotableIntegerType() && 9506 S.Context.getPromotedIntegerType(From) == To) 9507 return true; 9508 // Look through vector types, since we do default argument promotion for 9509 // those in OpenCL. 9510 if (const auto *VecTy = From->getAs<ExtVectorType>()) 9511 From = VecTy->getElementType(); 9512 if (const auto *VecTy = To->getAs<ExtVectorType>()) 9513 To = VecTy->getElementType(); 9514 // It's a floating promotion if the source type is a lower rank. 9515 return ICE->getCastKind() == CK_FloatingCast && 9516 S.Context.getFloatingTypeOrder(From, To) < 0; 9517 } 9518 9519 bool 9520 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 9521 const char *StartSpecifier, 9522 unsigned SpecifierLen, 9523 const Expr *E) { 9524 using namespace analyze_format_string; 9525 using namespace analyze_printf; 9526 9527 // Now type check the data expression that matches the 9528 // format specifier. 9529 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 9530 if (!AT.isValid()) 9531 return true; 9532 9533 QualType ExprTy = E->getType(); 9534 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 9535 ExprTy = TET->getUnderlyingExpr()->getType(); 9536 } 9537 9538 // Diagnose attempts to print a boolean value as a character. Unlike other 9539 // -Wformat diagnostics, this is fine from a type perspective, but it still 9540 // doesn't make sense. 9541 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg && 9542 E->isKnownToHaveBooleanValue()) { 9543 const CharSourceRange &CSR = 9544 getSpecifierRange(StartSpecifier, SpecifierLen); 9545 SmallString<4> FSString; 9546 llvm::raw_svector_ostream os(FSString); 9547 FS.toString(os); 9548 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character) 9549 << FSString, 9550 E->getExprLoc(), false, CSR); 9551 return true; 9552 } 9553 9554 analyze_printf::ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy); 9555 if (Match == analyze_printf::ArgType::Match) 9556 return true; 9557 9558 // Look through argument promotions for our error message's reported type. 9559 // This includes the integral and floating promotions, but excludes array 9560 // and function pointer decay (seeing that an argument intended to be a 9561 // string has type 'char [6]' is probably more confusing than 'char *') and 9562 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type). 9563 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 9564 if (isArithmeticArgumentPromotion(S, ICE)) { 9565 E = ICE->getSubExpr(); 9566 ExprTy = E->getType(); 9567 9568 // Check if we didn't match because of an implicit cast from a 'char' 9569 // or 'short' to an 'int'. This is done because printf is a varargs 9570 // function. 9571 if (ICE->getType() == S.Context.IntTy || 9572 ICE->getType() == S.Context.UnsignedIntTy) { 9573 // All further checking is done on the subexpression 9574 const analyze_printf::ArgType::MatchKind ImplicitMatch = 9575 AT.matchesType(S.Context, ExprTy); 9576 if (ImplicitMatch == analyze_printf::ArgType::Match) 9577 return true; 9578 if (ImplicitMatch == ArgType::NoMatchPedantic || 9579 ImplicitMatch == ArgType::NoMatchTypeConfusion) 9580 Match = ImplicitMatch; 9581 } 9582 } 9583 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 9584 // Special case for 'a', which has type 'int' in C. 9585 // Note, however, that we do /not/ want to treat multibyte constants like 9586 // 'MooV' as characters! This form is deprecated but still exists. In 9587 // addition, don't treat expressions as of type 'char' if one byte length 9588 // modifier is provided. 9589 if (ExprTy == S.Context.IntTy && 9590 FS.getLengthModifier().getKind() != LengthModifier::AsChar) 9591 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) 9592 ExprTy = S.Context.CharTy; 9593 } 9594 9595 // Look through enums to their underlying type. 9596 bool IsEnum = false; 9597 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 9598 ExprTy = EnumTy->getDecl()->getIntegerType(); 9599 IsEnum = true; 9600 } 9601 9602 // %C in an Objective-C context prints a unichar, not a wchar_t. 9603 // If the argument is an integer of some kind, believe the %C and suggest 9604 // a cast instead of changing the conversion specifier. 9605 QualType IntendedTy = ExprTy; 9606 if (isObjCContext() && 9607 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 9608 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 9609 !ExprTy->isCharType()) { 9610 // 'unichar' is defined as a typedef of unsigned short, but we should 9611 // prefer using the typedef if it is visible. 9612 IntendedTy = S.Context.UnsignedShortTy; 9613 9614 // While we are here, check if the value is an IntegerLiteral that happens 9615 // to be within the valid range. 9616 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 9617 const llvm::APInt &V = IL->getValue(); 9618 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 9619 return true; 9620 } 9621 9622 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 9623 Sema::LookupOrdinaryName); 9624 if (S.LookupName(Result, S.getCurScope())) { 9625 NamedDecl *ND = Result.getFoundDecl(); 9626 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 9627 if (TD->getUnderlyingType() == IntendedTy) 9628 IntendedTy = S.Context.getTypedefType(TD); 9629 } 9630 } 9631 } 9632 9633 // Special-case some of Darwin's platform-independence types by suggesting 9634 // casts to primitive types that are known to be large enough. 9635 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 9636 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 9637 QualType CastTy; 9638 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 9639 if (!CastTy.isNull()) { 9640 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 9641 // (long in ASTContext). Only complain to pedants. 9642 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 9643 (AT.isSizeT() || AT.isPtrdiffT()) && 9644 AT.matchesType(S.Context, CastTy)) 9645 Match = ArgType::NoMatchPedantic; 9646 IntendedTy = CastTy; 9647 ShouldNotPrintDirectly = true; 9648 } 9649 } 9650 9651 // We may be able to offer a FixItHint if it is a supported type. 9652 PrintfSpecifier fixedFS = FS; 9653 bool Success = 9654 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 9655 9656 if (Success) { 9657 // Get the fix string from the fixed format specifier 9658 SmallString<16> buf; 9659 llvm::raw_svector_ostream os(buf); 9660 fixedFS.toString(os); 9661 9662 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 9663 9664 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) { 9665 unsigned Diag; 9666 switch (Match) { 9667 case ArgType::Match: llvm_unreachable("expected non-matching"); 9668 case ArgType::NoMatchPedantic: 9669 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 9670 break; 9671 case ArgType::NoMatchTypeConfusion: 9672 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 9673 break; 9674 case ArgType::NoMatch: 9675 Diag = diag::warn_format_conversion_argument_type_mismatch; 9676 break; 9677 } 9678 9679 // In this case, the specifier is wrong and should be changed to match 9680 // the argument. 9681 EmitFormatDiagnostic(S.PDiag(Diag) 9682 << AT.getRepresentativeTypeName(S.Context) 9683 << IntendedTy << IsEnum << E->getSourceRange(), 9684 E->getBeginLoc(), 9685 /*IsStringLocation*/ false, SpecRange, 9686 FixItHint::CreateReplacement(SpecRange, os.str())); 9687 } else { 9688 // The canonical type for formatting this value is different from the 9689 // actual type of the expression. (This occurs, for example, with Darwin's 9690 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 9691 // should be printed as 'long' for 64-bit compatibility.) 9692 // Rather than emitting a normal format/argument mismatch, we want to 9693 // add a cast to the recommended type (and correct the format string 9694 // if necessary). 9695 SmallString<16> CastBuf; 9696 llvm::raw_svector_ostream CastFix(CastBuf); 9697 CastFix << "("; 9698 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 9699 CastFix << ")"; 9700 9701 SmallVector<FixItHint,4> Hints; 9702 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly) 9703 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 9704 9705 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 9706 // If there's already a cast present, just replace it. 9707 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 9708 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 9709 9710 } else if (!requiresParensToAddCast(E)) { 9711 // If the expression has high enough precedence, 9712 // just write the C-style cast. 9713 Hints.push_back( 9714 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 9715 } else { 9716 // Otherwise, add parens around the expression as well as the cast. 9717 CastFix << "("; 9718 Hints.push_back( 9719 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 9720 9721 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc()); 9722 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 9723 } 9724 9725 if (ShouldNotPrintDirectly) { 9726 // The expression has a type that should not be printed directly. 9727 // We extract the name from the typedef because we don't want to show 9728 // the underlying type in the diagnostic. 9729 StringRef Name; 9730 if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy)) 9731 Name = TypedefTy->getDecl()->getName(); 9732 else 9733 Name = CastTyName; 9734 unsigned Diag = Match == ArgType::NoMatchPedantic 9735 ? diag::warn_format_argument_needs_cast_pedantic 9736 : diag::warn_format_argument_needs_cast; 9737 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 9738 << E->getSourceRange(), 9739 E->getBeginLoc(), /*IsStringLocation=*/false, 9740 SpecRange, Hints); 9741 } else { 9742 // In this case, the expression could be printed using a different 9743 // specifier, but we've decided that the specifier is probably correct 9744 // and we should cast instead. Just use the normal warning message. 9745 EmitFormatDiagnostic( 9746 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 9747 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 9748 << E->getSourceRange(), 9749 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 9750 } 9751 } 9752 } else { 9753 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 9754 SpecifierLen); 9755 // Since the warning for passing non-POD types to variadic functions 9756 // was deferred until now, we emit a warning for non-POD 9757 // arguments here. 9758 switch (S.isValidVarArgType(ExprTy)) { 9759 case Sema::VAK_Valid: 9760 case Sema::VAK_ValidInCXX11: { 9761 unsigned Diag; 9762 switch (Match) { 9763 case ArgType::Match: llvm_unreachable("expected non-matching"); 9764 case ArgType::NoMatchPedantic: 9765 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 9766 break; 9767 case ArgType::NoMatchTypeConfusion: 9768 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 9769 break; 9770 case ArgType::NoMatch: 9771 Diag = diag::warn_format_conversion_argument_type_mismatch; 9772 break; 9773 } 9774 9775 EmitFormatDiagnostic( 9776 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 9777 << IsEnum << CSR << E->getSourceRange(), 9778 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 9779 break; 9780 } 9781 case Sema::VAK_Undefined: 9782 case Sema::VAK_MSVCUndefined: 9783 EmitFormatDiagnostic(S.PDiag(diag::warn_non_pod_vararg_with_format_string) 9784 << S.getLangOpts().CPlusPlus11 << ExprTy 9785 << CallType 9786 << AT.getRepresentativeTypeName(S.Context) << CSR 9787 << E->getSourceRange(), 9788 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 9789 checkForCStrMembers(AT, E); 9790 break; 9791 9792 case Sema::VAK_Invalid: 9793 if (ExprTy->isObjCObjectType()) 9794 EmitFormatDiagnostic( 9795 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 9796 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 9797 << AT.getRepresentativeTypeName(S.Context) << CSR 9798 << E->getSourceRange(), 9799 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 9800 else 9801 // FIXME: If this is an initializer list, suggest removing the braces 9802 // or inserting a cast to the target type. 9803 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 9804 << isa<InitListExpr>(E) << ExprTy << CallType 9805 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 9806 break; 9807 } 9808 9809 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 9810 "format string specifier index out of range"); 9811 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 9812 } 9813 9814 return true; 9815 } 9816 9817 //===--- CHECK: Scanf format string checking ------------------------------===// 9818 9819 namespace { 9820 9821 class CheckScanfHandler : public CheckFormatHandler { 9822 public: 9823 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 9824 const Expr *origFormatExpr, Sema::FormatStringType type, 9825 unsigned firstDataArg, unsigned numDataArgs, 9826 const char *beg, bool hasVAListArg, 9827 ArrayRef<const Expr *> Args, unsigned formatIdx, 9828 bool inFunctionCall, Sema::VariadicCallType CallType, 9829 llvm::SmallBitVector &CheckedVarArgs, 9830 UncoveredArgHandler &UncoveredArg) 9831 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 9832 numDataArgs, beg, hasVAListArg, Args, formatIdx, 9833 inFunctionCall, CallType, CheckedVarArgs, 9834 UncoveredArg) {} 9835 9836 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 9837 const char *startSpecifier, 9838 unsigned specifierLen) override; 9839 9840 bool HandleInvalidScanfConversionSpecifier( 9841 const analyze_scanf::ScanfSpecifier &FS, 9842 const char *startSpecifier, 9843 unsigned specifierLen) override; 9844 9845 void HandleIncompleteScanList(const char *start, const char *end) override; 9846 }; 9847 9848 } // namespace 9849 9850 void CheckScanfHandler::HandleIncompleteScanList(const char *start, 9851 const char *end) { 9852 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 9853 getLocationOfByte(end), /*IsStringLocation*/true, 9854 getSpecifierRange(start, end - start)); 9855 } 9856 9857 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 9858 const analyze_scanf::ScanfSpecifier &FS, 9859 const char *startSpecifier, 9860 unsigned specifierLen) { 9861 const analyze_scanf::ScanfConversionSpecifier &CS = 9862 FS.getConversionSpecifier(); 9863 9864 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 9865 getLocationOfByte(CS.getStart()), 9866 startSpecifier, specifierLen, 9867 CS.getStart(), CS.getLength()); 9868 } 9869 9870 bool CheckScanfHandler::HandleScanfSpecifier( 9871 const analyze_scanf::ScanfSpecifier &FS, 9872 const char *startSpecifier, 9873 unsigned specifierLen) { 9874 using namespace analyze_scanf; 9875 using namespace analyze_format_string; 9876 9877 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 9878 9879 // Handle case where '%' and '*' don't consume an argument. These shouldn't 9880 // be used to decide if we are using positional arguments consistently. 9881 if (FS.consumesDataArgument()) { 9882 if (atFirstArg) { 9883 atFirstArg = false; 9884 usesPositionalArgs = FS.usesPositionalArg(); 9885 } 9886 else if (usesPositionalArgs != FS.usesPositionalArg()) { 9887 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 9888 startSpecifier, specifierLen); 9889 return false; 9890 } 9891 } 9892 9893 // Check if the field with is non-zero. 9894 const OptionalAmount &Amt = FS.getFieldWidth(); 9895 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 9896 if (Amt.getConstantAmount() == 0) { 9897 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 9898 Amt.getConstantLength()); 9899 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 9900 getLocationOfByte(Amt.getStart()), 9901 /*IsStringLocation*/true, R, 9902 FixItHint::CreateRemoval(R)); 9903 } 9904 } 9905 9906 if (!FS.consumesDataArgument()) { 9907 // FIXME: Technically specifying a precision or field width here 9908 // makes no sense. Worth issuing a warning at some point. 9909 return true; 9910 } 9911 9912 // Consume the argument. 9913 unsigned argIndex = FS.getArgIndex(); 9914 if (argIndex < NumDataArgs) { 9915 // The check to see if the argIndex is valid will come later. 9916 // We set the bit here because we may exit early from this 9917 // function if we encounter some other error. 9918 CoveredArgs.set(argIndex); 9919 } 9920 9921 // Check the length modifier is valid with the given conversion specifier. 9922 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 9923 S.getLangOpts())) 9924 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9925 diag::warn_format_nonsensical_length); 9926 else if (!FS.hasStandardLengthModifier()) 9927 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 9928 else if (!FS.hasStandardLengthConversionCombination()) 9929 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9930 diag::warn_format_non_standard_conversion_spec); 9931 9932 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 9933 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 9934 9935 // The remaining checks depend on the data arguments. 9936 if (HasVAListArg) 9937 return true; 9938 9939 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 9940 return false; 9941 9942 // Check that the argument type matches the format specifier. 9943 const Expr *Ex = getDataArg(argIndex); 9944 if (!Ex) 9945 return true; 9946 9947 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 9948 9949 if (!AT.isValid()) { 9950 return true; 9951 } 9952 9953 analyze_format_string::ArgType::MatchKind Match = 9954 AT.matchesType(S.Context, Ex->getType()); 9955 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 9956 if (Match == analyze_format_string::ArgType::Match) 9957 return true; 9958 9959 ScanfSpecifier fixedFS = FS; 9960 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 9961 S.getLangOpts(), S.Context); 9962 9963 unsigned Diag = 9964 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 9965 : diag::warn_format_conversion_argument_type_mismatch; 9966 9967 if (Success) { 9968 // Get the fix string from the fixed format specifier. 9969 SmallString<128> buf; 9970 llvm::raw_svector_ostream os(buf); 9971 fixedFS.toString(os); 9972 9973 EmitFormatDiagnostic( 9974 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 9975 << Ex->getType() << false << Ex->getSourceRange(), 9976 Ex->getBeginLoc(), 9977 /*IsStringLocation*/ false, 9978 getSpecifierRange(startSpecifier, specifierLen), 9979 FixItHint::CreateReplacement( 9980 getSpecifierRange(startSpecifier, specifierLen), os.str())); 9981 } else { 9982 EmitFormatDiagnostic(S.PDiag(Diag) 9983 << AT.getRepresentativeTypeName(S.Context) 9984 << Ex->getType() << false << Ex->getSourceRange(), 9985 Ex->getBeginLoc(), 9986 /*IsStringLocation*/ false, 9987 getSpecifierRange(startSpecifier, specifierLen)); 9988 } 9989 9990 return true; 9991 } 9992 9993 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 9994 const Expr *OrigFormatExpr, 9995 ArrayRef<const Expr *> Args, 9996 bool HasVAListArg, unsigned format_idx, 9997 unsigned firstDataArg, 9998 Sema::FormatStringType Type, 9999 bool inFunctionCall, 10000 Sema::VariadicCallType CallType, 10001 llvm::SmallBitVector &CheckedVarArgs, 10002 UncoveredArgHandler &UncoveredArg, 10003 bool IgnoreStringsWithoutSpecifiers) { 10004 // CHECK: is the format string a wide literal? 10005 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 10006 CheckFormatHandler::EmitFormatDiagnostic( 10007 S, inFunctionCall, Args[format_idx], 10008 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 10009 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 10010 return; 10011 } 10012 10013 // Str - The format string. NOTE: this is NOT null-terminated! 10014 StringRef StrRef = FExpr->getString(); 10015 const char *Str = StrRef.data(); 10016 // Account for cases where the string literal is truncated in a declaration. 10017 const ConstantArrayType *T = 10018 S.Context.getAsConstantArrayType(FExpr->getType()); 10019 assert(T && "String literal not of constant array type!"); 10020 size_t TypeSize = T->getSize().getZExtValue(); 10021 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 10022 const unsigned numDataArgs = Args.size() - firstDataArg; 10023 10024 if (IgnoreStringsWithoutSpecifiers && 10025 !analyze_format_string::parseFormatStringHasFormattingSpecifiers( 10026 Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 10027 return; 10028 10029 // Emit a warning if the string literal is truncated and does not contain an 10030 // embedded null character. 10031 if (TypeSize <= StrRef.size() && !StrRef.substr(0, TypeSize).contains('\0')) { 10032 CheckFormatHandler::EmitFormatDiagnostic( 10033 S, inFunctionCall, Args[format_idx], 10034 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 10035 FExpr->getBeginLoc(), 10036 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 10037 return; 10038 } 10039 10040 // CHECK: empty format string? 10041 if (StrLen == 0 && numDataArgs > 0) { 10042 CheckFormatHandler::EmitFormatDiagnostic( 10043 S, inFunctionCall, Args[format_idx], 10044 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 10045 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 10046 return; 10047 } 10048 10049 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 10050 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog || 10051 Type == Sema::FST_OSTrace) { 10052 CheckPrintfHandler H( 10053 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 10054 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, 10055 HasVAListArg, Args, format_idx, inFunctionCall, CallType, 10056 CheckedVarArgs, UncoveredArg); 10057 10058 if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen, 10059 S.getLangOpts(), 10060 S.Context.getTargetInfo(), 10061 Type == Sema::FST_FreeBSDKPrintf)) 10062 H.DoneProcessing(); 10063 } else if (Type == Sema::FST_Scanf) { 10064 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 10065 numDataArgs, Str, HasVAListArg, Args, format_idx, 10066 inFunctionCall, CallType, CheckedVarArgs, UncoveredArg); 10067 10068 if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen, 10069 S.getLangOpts(), 10070 S.Context.getTargetInfo())) 10071 H.DoneProcessing(); 10072 } // TODO: handle other formats 10073 } 10074 10075 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 10076 // Str - The format string. NOTE: this is NOT null-terminated! 10077 StringRef StrRef = FExpr->getString(); 10078 const char *Str = StrRef.data(); 10079 // Account for cases where the string literal is truncated in a declaration. 10080 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 10081 assert(T && "String literal not of constant array type!"); 10082 size_t TypeSize = T->getSize().getZExtValue(); 10083 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 10084 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 10085 getLangOpts(), 10086 Context.getTargetInfo()); 10087 } 10088 10089 //===--- CHECK: Warn on use of wrong absolute value function. -------------===// 10090 10091 // Returns the related absolute value function that is larger, of 0 if one 10092 // does not exist. 10093 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 10094 switch (AbsFunction) { 10095 default: 10096 return 0; 10097 10098 case Builtin::BI__builtin_abs: 10099 return Builtin::BI__builtin_labs; 10100 case Builtin::BI__builtin_labs: 10101 return Builtin::BI__builtin_llabs; 10102 case Builtin::BI__builtin_llabs: 10103 return 0; 10104 10105 case Builtin::BI__builtin_fabsf: 10106 return Builtin::BI__builtin_fabs; 10107 case Builtin::BI__builtin_fabs: 10108 return Builtin::BI__builtin_fabsl; 10109 case Builtin::BI__builtin_fabsl: 10110 return 0; 10111 10112 case Builtin::BI__builtin_cabsf: 10113 return Builtin::BI__builtin_cabs; 10114 case Builtin::BI__builtin_cabs: 10115 return Builtin::BI__builtin_cabsl; 10116 case Builtin::BI__builtin_cabsl: 10117 return 0; 10118 10119 case Builtin::BIabs: 10120 return Builtin::BIlabs; 10121 case Builtin::BIlabs: 10122 return Builtin::BIllabs; 10123 case Builtin::BIllabs: 10124 return 0; 10125 10126 case Builtin::BIfabsf: 10127 return Builtin::BIfabs; 10128 case Builtin::BIfabs: 10129 return Builtin::BIfabsl; 10130 case Builtin::BIfabsl: 10131 return 0; 10132 10133 case Builtin::BIcabsf: 10134 return Builtin::BIcabs; 10135 case Builtin::BIcabs: 10136 return Builtin::BIcabsl; 10137 case Builtin::BIcabsl: 10138 return 0; 10139 } 10140 } 10141 10142 // Returns the argument type of the absolute value function. 10143 static QualType getAbsoluteValueArgumentType(ASTContext &Context, 10144 unsigned AbsType) { 10145 if (AbsType == 0) 10146 return QualType(); 10147 10148 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 10149 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 10150 if (Error != ASTContext::GE_None) 10151 return QualType(); 10152 10153 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 10154 if (!FT) 10155 return QualType(); 10156 10157 if (FT->getNumParams() != 1) 10158 return QualType(); 10159 10160 return FT->getParamType(0); 10161 } 10162 10163 // Returns the best absolute value function, or zero, based on type and 10164 // current absolute value function. 10165 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 10166 unsigned AbsFunctionKind) { 10167 unsigned BestKind = 0; 10168 uint64_t ArgSize = Context.getTypeSize(ArgType); 10169 for (unsigned Kind = AbsFunctionKind; Kind != 0; 10170 Kind = getLargerAbsoluteValueFunction(Kind)) { 10171 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 10172 if (Context.getTypeSize(ParamType) >= ArgSize) { 10173 if (BestKind == 0) 10174 BestKind = Kind; 10175 else if (Context.hasSameType(ParamType, ArgType)) { 10176 BestKind = Kind; 10177 break; 10178 } 10179 } 10180 } 10181 return BestKind; 10182 } 10183 10184 enum AbsoluteValueKind { 10185 AVK_Integer, 10186 AVK_Floating, 10187 AVK_Complex 10188 }; 10189 10190 static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 10191 if (T->isIntegralOrEnumerationType()) 10192 return AVK_Integer; 10193 if (T->isRealFloatingType()) 10194 return AVK_Floating; 10195 if (T->isAnyComplexType()) 10196 return AVK_Complex; 10197 10198 llvm_unreachable("Type not integer, floating, or complex"); 10199 } 10200 10201 // Changes the absolute value function to a different type. Preserves whether 10202 // the function is a builtin. 10203 static unsigned changeAbsFunction(unsigned AbsKind, 10204 AbsoluteValueKind ValueKind) { 10205 switch (ValueKind) { 10206 case AVK_Integer: 10207 switch (AbsKind) { 10208 default: 10209 return 0; 10210 case Builtin::BI__builtin_fabsf: 10211 case Builtin::BI__builtin_fabs: 10212 case Builtin::BI__builtin_fabsl: 10213 case Builtin::BI__builtin_cabsf: 10214 case Builtin::BI__builtin_cabs: 10215 case Builtin::BI__builtin_cabsl: 10216 return Builtin::BI__builtin_abs; 10217 case Builtin::BIfabsf: 10218 case Builtin::BIfabs: 10219 case Builtin::BIfabsl: 10220 case Builtin::BIcabsf: 10221 case Builtin::BIcabs: 10222 case Builtin::BIcabsl: 10223 return Builtin::BIabs; 10224 } 10225 case AVK_Floating: 10226 switch (AbsKind) { 10227 default: 10228 return 0; 10229 case Builtin::BI__builtin_abs: 10230 case Builtin::BI__builtin_labs: 10231 case Builtin::BI__builtin_llabs: 10232 case Builtin::BI__builtin_cabsf: 10233 case Builtin::BI__builtin_cabs: 10234 case Builtin::BI__builtin_cabsl: 10235 return Builtin::BI__builtin_fabsf; 10236 case Builtin::BIabs: 10237 case Builtin::BIlabs: 10238 case Builtin::BIllabs: 10239 case Builtin::BIcabsf: 10240 case Builtin::BIcabs: 10241 case Builtin::BIcabsl: 10242 return Builtin::BIfabsf; 10243 } 10244 case AVK_Complex: 10245 switch (AbsKind) { 10246 default: 10247 return 0; 10248 case Builtin::BI__builtin_abs: 10249 case Builtin::BI__builtin_labs: 10250 case Builtin::BI__builtin_llabs: 10251 case Builtin::BI__builtin_fabsf: 10252 case Builtin::BI__builtin_fabs: 10253 case Builtin::BI__builtin_fabsl: 10254 return Builtin::BI__builtin_cabsf; 10255 case Builtin::BIabs: 10256 case Builtin::BIlabs: 10257 case Builtin::BIllabs: 10258 case Builtin::BIfabsf: 10259 case Builtin::BIfabs: 10260 case Builtin::BIfabsl: 10261 return Builtin::BIcabsf; 10262 } 10263 } 10264 llvm_unreachable("Unable to convert function"); 10265 } 10266 10267 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 10268 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 10269 if (!FnInfo) 10270 return 0; 10271 10272 switch (FDecl->getBuiltinID()) { 10273 default: 10274 return 0; 10275 case Builtin::BI__builtin_abs: 10276 case Builtin::BI__builtin_fabs: 10277 case Builtin::BI__builtin_fabsf: 10278 case Builtin::BI__builtin_fabsl: 10279 case Builtin::BI__builtin_labs: 10280 case Builtin::BI__builtin_llabs: 10281 case Builtin::BI__builtin_cabs: 10282 case Builtin::BI__builtin_cabsf: 10283 case Builtin::BI__builtin_cabsl: 10284 case Builtin::BIabs: 10285 case Builtin::BIlabs: 10286 case Builtin::BIllabs: 10287 case Builtin::BIfabs: 10288 case Builtin::BIfabsf: 10289 case Builtin::BIfabsl: 10290 case Builtin::BIcabs: 10291 case Builtin::BIcabsf: 10292 case Builtin::BIcabsl: 10293 return FDecl->getBuiltinID(); 10294 } 10295 llvm_unreachable("Unknown Builtin type"); 10296 } 10297 10298 // If the replacement is valid, emit a note with replacement function. 10299 // Additionally, suggest including the proper header if not already included. 10300 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 10301 unsigned AbsKind, QualType ArgType) { 10302 bool EmitHeaderHint = true; 10303 const char *HeaderName = nullptr; 10304 const char *FunctionName = nullptr; 10305 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 10306 FunctionName = "std::abs"; 10307 if (ArgType->isIntegralOrEnumerationType()) { 10308 HeaderName = "cstdlib"; 10309 } else if (ArgType->isRealFloatingType()) { 10310 HeaderName = "cmath"; 10311 } else { 10312 llvm_unreachable("Invalid Type"); 10313 } 10314 10315 // Lookup all std::abs 10316 if (NamespaceDecl *Std = S.getStdNamespace()) { 10317 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 10318 R.suppressDiagnostics(); 10319 S.LookupQualifiedName(R, Std); 10320 10321 for (const auto *I : R) { 10322 const FunctionDecl *FDecl = nullptr; 10323 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 10324 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 10325 } else { 10326 FDecl = dyn_cast<FunctionDecl>(I); 10327 } 10328 if (!FDecl) 10329 continue; 10330 10331 // Found std::abs(), check that they are the right ones. 10332 if (FDecl->getNumParams() != 1) 10333 continue; 10334 10335 // Check that the parameter type can handle the argument. 10336 QualType ParamType = FDecl->getParamDecl(0)->getType(); 10337 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 10338 S.Context.getTypeSize(ArgType) <= 10339 S.Context.getTypeSize(ParamType)) { 10340 // Found a function, don't need the header hint. 10341 EmitHeaderHint = false; 10342 break; 10343 } 10344 } 10345 } 10346 } else { 10347 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 10348 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 10349 10350 if (HeaderName) { 10351 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 10352 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 10353 R.suppressDiagnostics(); 10354 S.LookupName(R, S.getCurScope()); 10355 10356 if (R.isSingleResult()) { 10357 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 10358 if (FD && FD->getBuiltinID() == AbsKind) { 10359 EmitHeaderHint = false; 10360 } else { 10361 return; 10362 } 10363 } else if (!R.empty()) { 10364 return; 10365 } 10366 } 10367 } 10368 10369 S.Diag(Loc, diag::note_replace_abs_function) 10370 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 10371 10372 if (!HeaderName) 10373 return; 10374 10375 if (!EmitHeaderHint) 10376 return; 10377 10378 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 10379 << FunctionName; 10380 } 10381 10382 template <std::size_t StrLen> 10383 static bool IsStdFunction(const FunctionDecl *FDecl, 10384 const char (&Str)[StrLen]) { 10385 if (!FDecl) 10386 return false; 10387 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 10388 return false; 10389 if (!FDecl->isInStdNamespace()) 10390 return false; 10391 10392 return true; 10393 } 10394 10395 // Warn when using the wrong abs() function. 10396 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 10397 const FunctionDecl *FDecl) { 10398 if (Call->getNumArgs() != 1) 10399 return; 10400 10401 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 10402 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 10403 if (AbsKind == 0 && !IsStdAbs) 10404 return; 10405 10406 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 10407 QualType ParamType = Call->getArg(0)->getType(); 10408 10409 // Unsigned types cannot be negative. Suggest removing the absolute value 10410 // function call. 10411 if (ArgType->isUnsignedIntegerType()) { 10412 const char *FunctionName = 10413 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 10414 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 10415 Diag(Call->getExprLoc(), diag::note_remove_abs) 10416 << FunctionName 10417 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 10418 return; 10419 } 10420 10421 // Taking the absolute value of a pointer is very suspicious, they probably 10422 // wanted to index into an array, dereference a pointer, call a function, etc. 10423 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 10424 unsigned DiagType = 0; 10425 if (ArgType->isFunctionType()) 10426 DiagType = 1; 10427 else if (ArgType->isArrayType()) 10428 DiagType = 2; 10429 10430 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 10431 return; 10432 } 10433 10434 // std::abs has overloads which prevent most of the absolute value problems 10435 // from occurring. 10436 if (IsStdAbs) 10437 return; 10438 10439 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 10440 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 10441 10442 // The argument and parameter are the same kind. Check if they are the right 10443 // size. 10444 if (ArgValueKind == ParamValueKind) { 10445 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 10446 return; 10447 10448 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 10449 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 10450 << FDecl << ArgType << ParamType; 10451 10452 if (NewAbsKind == 0) 10453 return; 10454 10455 emitReplacement(*this, Call->getExprLoc(), 10456 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 10457 return; 10458 } 10459 10460 // ArgValueKind != ParamValueKind 10461 // The wrong type of absolute value function was used. Attempt to find the 10462 // proper one. 10463 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 10464 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 10465 if (NewAbsKind == 0) 10466 return; 10467 10468 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 10469 << FDecl << ParamValueKind << ArgValueKind; 10470 10471 emitReplacement(*this, Call->getExprLoc(), 10472 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 10473 } 10474 10475 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 10476 void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 10477 const FunctionDecl *FDecl) { 10478 if (!Call || !FDecl) return; 10479 10480 // Ignore template specializations and macros. 10481 if (inTemplateInstantiation()) return; 10482 if (Call->getExprLoc().isMacroID()) return; 10483 10484 // Only care about the one template argument, two function parameter std::max 10485 if (Call->getNumArgs() != 2) return; 10486 if (!IsStdFunction(FDecl, "max")) return; 10487 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 10488 if (!ArgList) return; 10489 if (ArgList->size() != 1) return; 10490 10491 // Check that template type argument is unsigned integer. 10492 const auto& TA = ArgList->get(0); 10493 if (TA.getKind() != TemplateArgument::Type) return; 10494 QualType ArgType = TA.getAsType(); 10495 if (!ArgType->isUnsignedIntegerType()) return; 10496 10497 // See if either argument is a literal zero. 10498 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 10499 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 10500 if (!MTE) return false; 10501 const auto *Num = dyn_cast<IntegerLiteral>(MTE->getSubExpr()); 10502 if (!Num) return false; 10503 if (Num->getValue() != 0) return false; 10504 return true; 10505 }; 10506 10507 const Expr *FirstArg = Call->getArg(0); 10508 const Expr *SecondArg = Call->getArg(1); 10509 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 10510 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 10511 10512 // Only warn when exactly one argument is zero. 10513 if (IsFirstArgZero == IsSecondArgZero) return; 10514 10515 SourceRange FirstRange = FirstArg->getSourceRange(); 10516 SourceRange SecondRange = SecondArg->getSourceRange(); 10517 10518 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 10519 10520 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 10521 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 10522 10523 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 10524 SourceRange RemovalRange; 10525 if (IsFirstArgZero) { 10526 RemovalRange = SourceRange(FirstRange.getBegin(), 10527 SecondRange.getBegin().getLocWithOffset(-1)); 10528 } else { 10529 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 10530 SecondRange.getEnd()); 10531 } 10532 10533 Diag(Call->getExprLoc(), diag::note_remove_max_call) 10534 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 10535 << FixItHint::CreateRemoval(RemovalRange); 10536 } 10537 10538 //===--- CHECK: Standard memory functions ---------------------------------===// 10539 10540 /// Takes the expression passed to the size_t parameter of functions 10541 /// such as memcmp, strncat, etc and warns if it's a comparison. 10542 /// 10543 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 10544 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 10545 IdentifierInfo *FnName, 10546 SourceLocation FnLoc, 10547 SourceLocation RParenLoc) { 10548 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 10549 if (!Size) 10550 return false; 10551 10552 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 10553 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 10554 return false; 10555 10556 SourceRange SizeRange = Size->getSourceRange(); 10557 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 10558 << SizeRange << FnName; 10559 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 10560 << FnName 10561 << FixItHint::CreateInsertion( 10562 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 10563 << FixItHint::CreateRemoval(RParenLoc); 10564 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 10565 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 10566 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 10567 ")"); 10568 10569 return true; 10570 } 10571 10572 /// Determine whether the given type is or contains a dynamic class type 10573 /// (e.g., whether it has a vtable). 10574 static const CXXRecordDecl *getContainedDynamicClass(QualType T, 10575 bool &IsContained) { 10576 // Look through array types while ignoring qualifiers. 10577 const Type *Ty = T->getBaseElementTypeUnsafe(); 10578 IsContained = false; 10579 10580 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 10581 RD = RD ? RD->getDefinition() : nullptr; 10582 if (!RD || RD->isInvalidDecl()) 10583 return nullptr; 10584 10585 if (RD->isDynamicClass()) 10586 return RD; 10587 10588 // Check all the fields. If any bases were dynamic, the class is dynamic. 10589 // It's impossible for a class to transitively contain itself by value, so 10590 // infinite recursion is impossible. 10591 for (auto *FD : RD->fields()) { 10592 bool SubContained; 10593 if (const CXXRecordDecl *ContainedRD = 10594 getContainedDynamicClass(FD->getType(), SubContained)) { 10595 IsContained = true; 10596 return ContainedRD; 10597 } 10598 } 10599 10600 return nullptr; 10601 } 10602 10603 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 10604 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 10605 if (Unary->getKind() == UETT_SizeOf) 10606 return Unary; 10607 return nullptr; 10608 } 10609 10610 /// If E is a sizeof expression, returns its argument expression, 10611 /// otherwise returns NULL. 10612 static const Expr *getSizeOfExprArg(const Expr *E) { 10613 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 10614 if (!SizeOf->isArgumentType()) 10615 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 10616 return nullptr; 10617 } 10618 10619 /// If E is a sizeof expression, returns its argument type. 10620 static QualType getSizeOfArgType(const Expr *E) { 10621 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 10622 return SizeOf->getTypeOfArgument(); 10623 return QualType(); 10624 } 10625 10626 namespace { 10627 10628 struct SearchNonTrivialToInitializeField 10629 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 10630 using Super = 10631 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 10632 10633 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 10634 10635 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 10636 SourceLocation SL) { 10637 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 10638 asDerived().visitArray(PDIK, AT, SL); 10639 return; 10640 } 10641 10642 Super::visitWithKind(PDIK, FT, SL); 10643 } 10644 10645 void visitARCStrong(QualType FT, SourceLocation SL) { 10646 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 10647 } 10648 void visitARCWeak(QualType FT, SourceLocation SL) { 10649 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 10650 } 10651 void visitStruct(QualType FT, SourceLocation SL) { 10652 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 10653 visit(FD->getType(), FD->getLocation()); 10654 } 10655 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 10656 const ArrayType *AT, SourceLocation SL) { 10657 visit(getContext().getBaseElementType(AT), SL); 10658 } 10659 void visitTrivial(QualType FT, SourceLocation SL) {} 10660 10661 static void diag(QualType RT, const Expr *E, Sema &S) { 10662 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 10663 } 10664 10665 ASTContext &getContext() { return S.getASTContext(); } 10666 10667 const Expr *E; 10668 Sema &S; 10669 }; 10670 10671 struct SearchNonTrivialToCopyField 10672 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 10673 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 10674 10675 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 10676 10677 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 10678 SourceLocation SL) { 10679 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 10680 asDerived().visitArray(PCK, AT, SL); 10681 return; 10682 } 10683 10684 Super::visitWithKind(PCK, FT, SL); 10685 } 10686 10687 void visitARCStrong(QualType FT, SourceLocation SL) { 10688 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 10689 } 10690 void visitARCWeak(QualType FT, SourceLocation SL) { 10691 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 10692 } 10693 void visitStruct(QualType FT, SourceLocation SL) { 10694 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 10695 visit(FD->getType(), FD->getLocation()); 10696 } 10697 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 10698 SourceLocation SL) { 10699 visit(getContext().getBaseElementType(AT), SL); 10700 } 10701 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 10702 SourceLocation SL) {} 10703 void visitTrivial(QualType FT, SourceLocation SL) {} 10704 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 10705 10706 static void diag(QualType RT, const Expr *E, Sema &S) { 10707 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 10708 } 10709 10710 ASTContext &getContext() { return S.getASTContext(); } 10711 10712 const Expr *E; 10713 Sema &S; 10714 }; 10715 10716 } 10717 10718 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 10719 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 10720 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 10721 10722 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 10723 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 10724 return false; 10725 10726 return doesExprLikelyComputeSize(BO->getLHS()) || 10727 doesExprLikelyComputeSize(BO->getRHS()); 10728 } 10729 10730 return getAsSizeOfExpr(SizeofExpr) != nullptr; 10731 } 10732 10733 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 10734 /// 10735 /// \code 10736 /// #define MACRO 0 10737 /// foo(MACRO); 10738 /// foo(0); 10739 /// \endcode 10740 /// 10741 /// This should return true for the first call to foo, but not for the second 10742 /// (regardless of whether foo is a macro or function). 10743 static bool isArgumentExpandedFromMacro(SourceManager &SM, 10744 SourceLocation CallLoc, 10745 SourceLocation ArgLoc) { 10746 if (!CallLoc.isMacroID()) 10747 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 10748 10749 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 10750 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 10751 } 10752 10753 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 10754 /// last two arguments transposed. 10755 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 10756 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 10757 return; 10758 10759 const Expr *SizeArg = 10760 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 10761 10762 auto isLiteralZero = [](const Expr *E) { 10763 return isa<IntegerLiteral>(E) && cast<IntegerLiteral>(E)->getValue() == 0; 10764 }; 10765 10766 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 10767 SourceLocation CallLoc = Call->getRParenLoc(); 10768 SourceManager &SM = S.getSourceManager(); 10769 if (isLiteralZero(SizeArg) && 10770 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 10771 10772 SourceLocation DiagLoc = SizeArg->getExprLoc(); 10773 10774 // Some platforms #define bzero to __builtin_memset. See if this is the 10775 // case, and if so, emit a better diagnostic. 10776 if (BId == Builtin::BIbzero || 10777 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 10778 CallLoc, SM, S.getLangOpts()) == "bzero")) { 10779 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 10780 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 10781 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 10782 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 10783 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 10784 } 10785 return; 10786 } 10787 10788 // If the second argument to a memset is a sizeof expression and the third 10789 // isn't, this is also likely an error. This should catch 10790 // 'memset(buf, sizeof(buf), 0xff)'. 10791 if (BId == Builtin::BImemset && 10792 doesExprLikelyComputeSize(Call->getArg(1)) && 10793 !doesExprLikelyComputeSize(Call->getArg(2))) { 10794 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 10795 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 10796 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 10797 return; 10798 } 10799 } 10800 10801 /// Check for dangerous or invalid arguments to memset(). 10802 /// 10803 /// This issues warnings on known problematic, dangerous or unspecified 10804 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 10805 /// function calls. 10806 /// 10807 /// \param Call The call expression to diagnose. 10808 void Sema::CheckMemaccessArguments(const CallExpr *Call, 10809 unsigned BId, 10810 IdentifierInfo *FnName) { 10811 assert(BId != 0); 10812 10813 // It is possible to have a non-standard definition of memset. Validate 10814 // we have enough arguments, and if not, abort further checking. 10815 unsigned ExpectedNumArgs = 10816 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 10817 if (Call->getNumArgs() < ExpectedNumArgs) 10818 return; 10819 10820 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 10821 BId == Builtin::BIstrndup ? 1 : 2); 10822 unsigned LenArg = 10823 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 10824 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 10825 10826 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 10827 Call->getBeginLoc(), Call->getRParenLoc())) 10828 return; 10829 10830 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 10831 CheckMemaccessSize(*this, BId, Call); 10832 10833 // We have special checking when the length is a sizeof expression. 10834 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 10835 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 10836 llvm::FoldingSetNodeID SizeOfArgID; 10837 10838 // Although widely used, 'bzero' is not a standard function. Be more strict 10839 // with the argument types before allowing diagnostics and only allow the 10840 // form bzero(ptr, sizeof(...)). 10841 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 10842 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 10843 return; 10844 10845 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 10846 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 10847 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 10848 10849 QualType DestTy = Dest->getType(); 10850 QualType PointeeTy; 10851 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 10852 PointeeTy = DestPtrTy->getPointeeType(); 10853 10854 // Never warn about void type pointers. This can be used to suppress 10855 // false positives. 10856 if (PointeeTy->isVoidType()) 10857 continue; 10858 10859 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 10860 // actually comparing the expressions for equality. Because computing the 10861 // expression IDs can be expensive, we only do this if the diagnostic is 10862 // enabled. 10863 if (SizeOfArg && 10864 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 10865 SizeOfArg->getExprLoc())) { 10866 // We only compute IDs for expressions if the warning is enabled, and 10867 // cache the sizeof arg's ID. 10868 if (SizeOfArgID == llvm::FoldingSetNodeID()) 10869 SizeOfArg->Profile(SizeOfArgID, Context, true); 10870 llvm::FoldingSetNodeID DestID; 10871 Dest->Profile(DestID, Context, true); 10872 if (DestID == SizeOfArgID) { 10873 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 10874 // over sizeof(src) as well. 10875 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 10876 StringRef ReadableName = FnName->getName(); 10877 10878 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 10879 if (UnaryOp->getOpcode() == UO_AddrOf) 10880 ActionIdx = 1; // If its an address-of operator, just remove it. 10881 if (!PointeeTy->isIncompleteType() && 10882 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 10883 ActionIdx = 2; // If the pointee's size is sizeof(char), 10884 // suggest an explicit length. 10885 10886 // If the function is defined as a builtin macro, do not show macro 10887 // expansion. 10888 SourceLocation SL = SizeOfArg->getExprLoc(); 10889 SourceRange DSR = Dest->getSourceRange(); 10890 SourceRange SSR = SizeOfArg->getSourceRange(); 10891 SourceManager &SM = getSourceManager(); 10892 10893 if (SM.isMacroArgExpansion(SL)) { 10894 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 10895 SL = SM.getSpellingLoc(SL); 10896 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 10897 SM.getSpellingLoc(DSR.getEnd())); 10898 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 10899 SM.getSpellingLoc(SSR.getEnd())); 10900 } 10901 10902 DiagRuntimeBehavior(SL, SizeOfArg, 10903 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 10904 << ReadableName 10905 << PointeeTy 10906 << DestTy 10907 << DSR 10908 << SSR); 10909 DiagRuntimeBehavior(SL, SizeOfArg, 10910 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 10911 << ActionIdx 10912 << SSR); 10913 10914 break; 10915 } 10916 } 10917 10918 // Also check for cases where the sizeof argument is the exact same 10919 // type as the memory argument, and where it points to a user-defined 10920 // record type. 10921 if (SizeOfArgTy != QualType()) { 10922 if (PointeeTy->isRecordType() && 10923 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 10924 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 10925 PDiag(diag::warn_sizeof_pointer_type_memaccess) 10926 << FnName << SizeOfArgTy << ArgIdx 10927 << PointeeTy << Dest->getSourceRange() 10928 << LenExpr->getSourceRange()); 10929 break; 10930 } 10931 } 10932 } else if (DestTy->isArrayType()) { 10933 PointeeTy = DestTy; 10934 } 10935 10936 if (PointeeTy == QualType()) 10937 continue; 10938 10939 // Always complain about dynamic classes. 10940 bool IsContained; 10941 if (const CXXRecordDecl *ContainedRD = 10942 getContainedDynamicClass(PointeeTy, IsContained)) { 10943 10944 unsigned OperationType = 0; 10945 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp; 10946 // "overwritten" if we're warning about the destination for any call 10947 // but memcmp; otherwise a verb appropriate to the call. 10948 if (ArgIdx != 0 || IsCmp) { 10949 if (BId == Builtin::BImemcpy) 10950 OperationType = 1; 10951 else if(BId == Builtin::BImemmove) 10952 OperationType = 2; 10953 else if (IsCmp) 10954 OperationType = 3; 10955 } 10956 10957 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 10958 PDiag(diag::warn_dyn_class_memaccess) 10959 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName 10960 << IsContained << ContainedRD << OperationType 10961 << Call->getCallee()->getSourceRange()); 10962 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 10963 BId != Builtin::BImemset) 10964 DiagRuntimeBehavior( 10965 Dest->getExprLoc(), Dest, 10966 PDiag(diag::warn_arc_object_memaccess) 10967 << ArgIdx << FnName << PointeeTy 10968 << Call->getCallee()->getSourceRange()); 10969 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 10970 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 10971 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 10972 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 10973 PDiag(diag::warn_cstruct_memaccess) 10974 << ArgIdx << FnName << PointeeTy << 0); 10975 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 10976 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 10977 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 10978 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 10979 PDiag(diag::warn_cstruct_memaccess) 10980 << ArgIdx << FnName << PointeeTy << 1); 10981 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 10982 } else { 10983 continue; 10984 } 10985 } else 10986 continue; 10987 10988 DiagRuntimeBehavior( 10989 Dest->getExprLoc(), Dest, 10990 PDiag(diag::note_bad_memaccess_silence) 10991 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 10992 break; 10993 } 10994 } 10995 10996 // A little helper routine: ignore addition and subtraction of integer literals. 10997 // This intentionally does not ignore all integer constant expressions because 10998 // we don't want to remove sizeof(). 10999 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 11000 Ex = Ex->IgnoreParenCasts(); 11001 11002 while (true) { 11003 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 11004 if (!BO || !BO->isAdditiveOp()) 11005 break; 11006 11007 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 11008 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 11009 11010 if (isa<IntegerLiteral>(RHS)) 11011 Ex = LHS; 11012 else if (isa<IntegerLiteral>(LHS)) 11013 Ex = RHS; 11014 else 11015 break; 11016 } 11017 11018 return Ex; 11019 } 11020 11021 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 11022 ASTContext &Context) { 11023 // Only handle constant-sized or VLAs, but not flexible members. 11024 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 11025 // Only issue the FIXIT for arrays of size > 1. 11026 if (CAT->getSize().getSExtValue() <= 1) 11027 return false; 11028 } else if (!Ty->isVariableArrayType()) { 11029 return false; 11030 } 11031 return true; 11032 } 11033 11034 // Warn if the user has made the 'size' argument to strlcpy or strlcat 11035 // be the size of the source, instead of the destination. 11036 void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 11037 IdentifierInfo *FnName) { 11038 11039 // Don't crash if the user has the wrong number of arguments 11040 unsigned NumArgs = Call->getNumArgs(); 11041 if ((NumArgs != 3) && (NumArgs != 4)) 11042 return; 11043 11044 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 11045 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 11046 const Expr *CompareWithSrc = nullptr; 11047 11048 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 11049 Call->getBeginLoc(), Call->getRParenLoc())) 11050 return; 11051 11052 // Look for 'strlcpy(dst, x, sizeof(x))' 11053 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 11054 CompareWithSrc = Ex; 11055 else { 11056 // Look for 'strlcpy(dst, x, strlen(x))' 11057 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 11058 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 11059 SizeCall->getNumArgs() == 1) 11060 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 11061 } 11062 } 11063 11064 if (!CompareWithSrc) 11065 return; 11066 11067 // Determine if the argument to sizeof/strlen is equal to the source 11068 // argument. In principle there's all kinds of things you could do 11069 // here, for instance creating an == expression and evaluating it with 11070 // EvaluateAsBooleanCondition, but this uses a more direct technique: 11071 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 11072 if (!SrcArgDRE) 11073 return; 11074 11075 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 11076 if (!CompareWithSrcDRE || 11077 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 11078 return; 11079 11080 const Expr *OriginalSizeArg = Call->getArg(2); 11081 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 11082 << OriginalSizeArg->getSourceRange() << FnName; 11083 11084 // Output a FIXIT hint if the destination is an array (rather than a 11085 // pointer to an array). This could be enhanced to handle some 11086 // pointers if we know the actual size, like if DstArg is 'array+2' 11087 // we could say 'sizeof(array)-2'. 11088 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 11089 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 11090 return; 11091 11092 SmallString<128> sizeString; 11093 llvm::raw_svector_ostream OS(sizeString); 11094 OS << "sizeof("; 11095 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11096 OS << ")"; 11097 11098 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 11099 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 11100 OS.str()); 11101 } 11102 11103 /// Check if two expressions refer to the same declaration. 11104 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 11105 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 11106 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 11107 return D1->getDecl() == D2->getDecl(); 11108 return false; 11109 } 11110 11111 static const Expr *getStrlenExprArg(const Expr *E) { 11112 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 11113 const FunctionDecl *FD = CE->getDirectCallee(); 11114 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 11115 return nullptr; 11116 return CE->getArg(0)->IgnoreParenCasts(); 11117 } 11118 return nullptr; 11119 } 11120 11121 // Warn on anti-patterns as the 'size' argument to strncat. 11122 // The correct size argument should look like following: 11123 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 11124 void Sema::CheckStrncatArguments(const CallExpr *CE, 11125 IdentifierInfo *FnName) { 11126 // Don't crash if the user has the wrong number of arguments. 11127 if (CE->getNumArgs() < 3) 11128 return; 11129 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 11130 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 11131 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 11132 11133 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 11134 CE->getRParenLoc())) 11135 return; 11136 11137 // Identify common expressions, which are wrongly used as the size argument 11138 // to strncat and may lead to buffer overflows. 11139 unsigned PatternType = 0; 11140 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 11141 // - sizeof(dst) 11142 if (referToTheSameDecl(SizeOfArg, DstArg)) 11143 PatternType = 1; 11144 // - sizeof(src) 11145 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 11146 PatternType = 2; 11147 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 11148 if (BE->getOpcode() == BO_Sub) { 11149 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 11150 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 11151 // - sizeof(dst) - strlen(dst) 11152 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 11153 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 11154 PatternType = 1; 11155 // - sizeof(src) - (anything) 11156 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 11157 PatternType = 2; 11158 } 11159 } 11160 11161 if (PatternType == 0) 11162 return; 11163 11164 // Generate the diagnostic. 11165 SourceLocation SL = LenArg->getBeginLoc(); 11166 SourceRange SR = LenArg->getSourceRange(); 11167 SourceManager &SM = getSourceManager(); 11168 11169 // If the function is defined as a builtin macro, do not show macro expansion. 11170 if (SM.isMacroArgExpansion(SL)) { 11171 SL = SM.getSpellingLoc(SL); 11172 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 11173 SM.getSpellingLoc(SR.getEnd())); 11174 } 11175 11176 // Check if the destination is an array (rather than a pointer to an array). 11177 QualType DstTy = DstArg->getType(); 11178 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 11179 Context); 11180 if (!isKnownSizeArray) { 11181 if (PatternType == 1) 11182 Diag(SL, diag::warn_strncat_wrong_size) << SR; 11183 else 11184 Diag(SL, diag::warn_strncat_src_size) << SR; 11185 return; 11186 } 11187 11188 if (PatternType == 1) 11189 Diag(SL, diag::warn_strncat_large_size) << SR; 11190 else 11191 Diag(SL, diag::warn_strncat_src_size) << SR; 11192 11193 SmallString<128> sizeString; 11194 llvm::raw_svector_ostream OS(sizeString); 11195 OS << "sizeof("; 11196 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11197 OS << ") - "; 11198 OS << "strlen("; 11199 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11200 OS << ") - 1"; 11201 11202 Diag(SL, diag::note_strncat_wrong_size) 11203 << FixItHint::CreateReplacement(SR, OS.str()); 11204 } 11205 11206 namespace { 11207 void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName, 11208 const UnaryOperator *UnaryExpr, const Decl *D) { 11209 if (isa<FieldDecl, FunctionDecl, VarDecl>(D)) { 11210 S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object) 11211 << CalleeName << 0 /*object: */ << cast<NamedDecl>(D); 11212 return; 11213 } 11214 } 11215 11216 void CheckFreeArgumentsAddressof(Sema &S, const std::string &CalleeName, 11217 const UnaryOperator *UnaryExpr) { 11218 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(UnaryExpr->getSubExpr())) { 11219 const Decl *D = Lvalue->getDecl(); 11220 if (isa<DeclaratorDecl>(D)) 11221 if (!dyn_cast<DeclaratorDecl>(D)->getType()->isReferenceType()) 11222 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, D); 11223 } 11224 11225 if (const auto *Lvalue = dyn_cast<MemberExpr>(UnaryExpr->getSubExpr())) 11226 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, 11227 Lvalue->getMemberDecl()); 11228 } 11229 11230 void CheckFreeArgumentsPlus(Sema &S, const std::string &CalleeName, 11231 const UnaryOperator *UnaryExpr) { 11232 const auto *Lambda = dyn_cast<LambdaExpr>( 11233 UnaryExpr->getSubExpr()->IgnoreImplicitAsWritten()->IgnoreParens()); 11234 if (!Lambda) 11235 return; 11236 11237 S.Diag(Lambda->getBeginLoc(), diag::warn_free_nonheap_object) 11238 << CalleeName << 2 /*object: lambda expression*/; 11239 } 11240 11241 void CheckFreeArgumentsStackArray(Sema &S, const std::string &CalleeName, 11242 const DeclRefExpr *Lvalue) { 11243 const auto *Var = dyn_cast<VarDecl>(Lvalue->getDecl()); 11244 if (Var == nullptr) 11245 return; 11246 11247 S.Diag(Lvalue->getBeginLoc(), diag::warn_free_nonheap_object) 11248 << CalleeName << 0 /*object: */ << Var; 11249 } 11250 11251 void CheckFreeArgumentsCast(Sema &S, const std::string &CalleeName, 11252 const CastExpr *Cast) { 11253 SmallString<128> SizeString; 11254 llvm::raw_svector_ostream OS(SizeString); 11255 11256 clang::CastKind Kind = Cast->getCastKind(); 11257 if (Kind == clang::CK_BitCast && 11258 !Cast->getSubExpr()->getType()->isFunctionPointerType()) 11259 return; 11260 if (Kind == clang::CK_IntegralToPointer && 11261 !isa<IntegerLiteral>( 11262 Cast->getSubExpr()->IgnoreParenImpCasts()->IgnoreParens())) 11263 return; 11264 11265 switch (Cast->getCastKind()) { 11266 case clang::CK_BitCast: 11267 case clang::CK_IntegralToPointer: 11268 case clang::CK_FunctionToPointerDecay: 11269 OS << '\''; 11270 Cast->printPretty(OS, nullptr, S.getPrintingPolicy()); 11271 OS << '\''; 11272 break; 11273 default: 11274 return; 11275 } 11276 11277 S.Diag(Cast->getBeginLoc(), diag::warn_free_nonheap_object) 11278 << CalleeName << 0 /*object: */ << OS.str(); 11279 } 11280 } // namespace 11281 11282 /// Alerts the user that they are attempting to free a non-malloc'd object. 11283 void Sema::CheckFreeArguments(const CallExpr *E) { 11284 const std::string CalleeName = 11285 dyn_cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString(); 11286 11287 { // Prefer something that doesn't involve a cast to make things simpler. 11288 const Expr *Arg = E->getArg(0)->IgnoreParenCasts(); 11289 if (const auto *UnaryExpr = dyn_cast<UnaryOperator>(Arg)) 11290 switch (UnaryExpr->getOpcode()) { 11291 case UnaryOperator::Opcode::UO_AddrOf: 11292 return CheckFreeArgumentsAddressof(*this, CalleeName, UnaryExpr); 11293 case UnaryOperator::Opcode::UO_Plus: 11294 return CheckFreeArgumentsPlus(*this, CalleeName, UnaryExpr); 11295 default: 11296 break; 11297 } 11298 11299 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Arg)) 11300 if (Lvalue->getType()->isArrayType()) 11301 return CheckFreeArgumentsStackArray(*this, CalleeName, Lvalue); 11302 11303 if (const auto *Label = dyn_cast<AddrLabelExpr>(Arg)) { 11304 Diag(Label->getBeginLoc(), diag::warn_free_nonheap_object) 11305 << CalleeName << 0 /*object: */ << Label->getLabel()->getIdentifier(); 11306 return; 11307 } 11308 11309 if (isa<BlockExpr>(Arg)) { 11310 Diag(Arg->getBeginLoc(), diag::warn_free_nonheap_object) 11311 << CalleeName << 1 /*object: block*/; 11312 return; 11313 } 11314 } 11315 // Maybe the cast was important, check after the other cases. 11316 if (const auto *Cast = dyn_cast<CastExpr>(E->getArg(0))) 11317 return CheckFreeArgumentsCast(*this, CalleeName, Cast); 11318 } 11319 11320 void 11321 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 11322 SourceLocation ReturnLoc, 11323 bool isObjCMethod, 11324 const AttrVec *Attrs, 11325 const FunctionDecl *FD) { 11326 // Check if the return value is null but should not be. 11327 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 11328 (!isObjCMethod && isNonNullType(Context, lhsType))) && 11329 CheckNonNullExpr(*this, RetValExp)) 11330 Diag(ReturnLoc, diag::warn_null_ret) 11331 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 11332 11333 // C++11 [basic.stc.dynamic.allocation]p4: 11334 // If an allocation function declared with a non-throwing 11335 // exception-specification fails to allocate storage, it shall return 11336 // a null pointer. Any other allocation function that fails to allocate 11337 // storage shall indicate failure only by throwing an exception [...] 11338 if (FD) { 11339 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 11340 if (Op == OO_New || Op == OO_Array_New) { 11341 const FunctionProtoType *Proto 11342 = FD->getType()->castAs<FunctionProtoType>(); 11343 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 11344 CheckNonNullExpr(*this, RetValExp)) 11345 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 11346 << FD << getLangOpts().CPlusPlus11; 11347 } 11348 } 11349 11350 // PPC MMA non-pointer types are not allowed as return type. Checking the type 11351 // here prevent the user from using a PPC MMA type as trailing return type. 11352 if (Context.getTargetInfo().getTriple().isPPC64()) 11353 CheckPPCMMAType(RetValExp->getType(), ReturnLoc); 11354 } 11355 11356 //===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===// 11357 11358 /// Check for comparisons of floating point operands using != and ==. 11359 /// Issue a warning if these are no self-comparisons, as they are not likely 11360 /// to do what the programmer intended. 11361 void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) { 11362 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 11363 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 11364 11365 // Special case: check for x == x (which is OK). 11366 // Do not emit warnings for such cases. 11367 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 11368 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 11369 if (DRL->getDecl() == DRR->getDecl()) 11370 return; 11371 11372 // Special case: check for comparisons against literals that can be exactly 11373 // represented by APFloat. In such cases, do not emit a warning. This 11374 // is a heuristic: often comparison against such literals are used to 11375 // detect if a value in a variable has not changed. This clearly can 11376 // lead to false negatives. 11377 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 11378 if (FLL->isExact()) 11379 return; 11380 } else 11381 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 11382 if (FLR->isExact()) 11383 return; 11384 11385 // Check for comparisons with builtin types. 11386 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 11387 if (CL->getBuiltinCallee()) 11388 return; 11389 11390 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 11391 if (CR->getBuiltinCallee()) 11392 return; 11393 11394 // Emit the diagnostic. 11395 Diag(Loc, diag::warn_floatingpoint_eq) 11396 << LHS->getSourceRange() << RHS->getSourceRange(); 11397 } 11398 11399 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 11400 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 11401 11402 namespace { 11403 11404 /// Structure recording the 'active' range of an integer-valued 11405 /// expression. 11406 struct IntRange { 11407 /// The number of bits active in the int. Note that this includes exactly one 11408 /// sign bit if !NonNegative. 11409 unsigned Width; 11410 11411 /// True if the int is known not to have negative values. If so, all leading 11412 /// bits before Width are known zero, otherwise they are known to be the 11413 /// same as the MSB within Width. 11414 bool NonNegative; 11415 11416 IntRange(unsigned Width, bool NonNegative) 11417 : Width(Width), NonNegative(NonNegative) {} 11418 11419 /// Number of bits excluding the sign bit. 11420 unsigned valueBits() const { 11421 return NonNegative ? Width : Width - 1; 11422 } 11423 11424 /// Returns the range of the bool type. 11425 static IntRange forBoolType() { 11426 return IntRange(1, true); 11427 } 11428 11429 /// Returns the range of an opaque value of the given integral type. 11430 static IntRange forValueOfType(ASTContext &C, QualType T) { 11431 return forValueOfCanonicalType(C, 11432 T->getCanonicalTypeInternal().getTypePtr()); 11433 } 11434 11435 /// Returns the range of an opaque value of a canonical integral type. 11436 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 11437 assert(T->isCanonicalUnqualified()); 11438 11439 if (const VectorType *VT = dyn_cast<VectorType>(T)) 11440 T = VT->getElementType().getTypePtr(); 11441 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 11442 T = CT->getElementType().getTypePtr(); 11443 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 11444 T = AT->getValueType().getTypePtr(); 11445 11446 if (!C.getLangOpts().CPlusPlus) { 11447 // For enum types in C code, use the underlying datatype. 11448 if (const EnumType *ET = dyn_cast<EnumType>(T)) 11449 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 11450 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 11451 // For enum types in C++, use the known bit width of the enumerators. 11452 EnumDecl *Enum = ET->getDecl(); 11453 // In C++11, enums can have a fixed underlying type. Use this type to 11454 // compute the range. 11455 if (Enum->isFixed()) { 11456 return IntRange(C.getIntWidth(QualType(T, 0)), 11457 !ET->isSignedIntegerOrEnumerationType()); 11458 } 11459 11460 unsigned NumPositive = Enum->getNumPositiveBits(); 11461 unsigned NumNegative = Enum->getNumNegativeBits(); 11462 11463 if (NumNegative == 0) 11464 return IntRange(NumPositive, true/*NonNegative*/); 11465 else 11466 return IntRange(std::max(NumPositive + 1, NumNegative), 11467 false/*NonNegative*/); 11468 } 11469 11470 if (const auto *EIT = dyn_cast<BitIntType>(T)) 11471 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 11472 11473 const BuiltinType *BT = cast<BuiltinType>(T); 11474 assert(BT->isInteger()); 11475 11476 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 11477 } 11478 11479 /// Returns the "target" range of a canonical integral type, i.e. 11480 /// the range of values expressible in the type. 11481 /// 11482 /// This matches forValueOfCanonicalType except that enums have the 11483 /// full range of their type, not the range of their enumerators. 11484 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 11485 assert(T->isCanonicalUnqualified()); 11486 11487 if (const VectorType *VT = dyn_cast<VectorType>(T)) 11488 T = VT->getElementType().getTypePtr(); 11489 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 11490 T = CT->getElementType().getTypePtr(); 11491 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 11492 T = AT->getValueType().getTypePtr(); 11493 if (const EnumType *ET = dyn_cast<EnumType>(T)) 11494 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 11495 11496 if (const auto *EIT = dyn_cast<BitIntType>(T)) 11497 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 11498 11499 const BuiltinType *BT = cast<BuiltinType>(T); 11500 assert(BT->isInteger()); 11501 11502 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 11503 } 11504 11505 /// Returns the supremum of two ranges: i.e. their conservative merge. 11506 static IntRange join(IntRange L, IntRange R) { 11507 bool Unsigned = L.NonNegative && R.NonNegative; 11508 return IntRange(std::max(L.valueBits(), R.valueBits()) + !Unsigned, 11509 L.NonNegative && R.NonNegative); 11510 } 11511 11512 /// Return the range of a bitwise-AND of the two ranges. 11513 static IntRange bit_and(IntRange L, IntRange R) { 11514 unsigned Bits = std::max(L.Width, R.Width); 11515 bool NonNegative = false; 11516 if (L.NonNegative) { 11517 Bits = std::min(Bits, L.Width); 11518 NonNegative = true; 11519 } 11520 if (R.NonNegative) { 11521 Bits = std::min(Bits, R.Width); 11522 NonNegative = true; 11523 } 11524 return IntRange(Bits, NonNegative); 11525 } 11526 11527 /// Return the range of a sum of the two ranges. 11528 static IntRange sum(IntRange L, IntRange R) { 11529 bool Unsigned = L.NonNegative && R.NonNegative; 11530 return IntRange(std::max(L.valueBits(), R.valueBits()) + 1 + !Unsigned, 11531 Unsigned); 11532 } 11533 11534 /// Return the range of a difference of the two ranges. 11535 static IntRange difference(IntRange L, IntRange R) { 11536 // We need a 1-bit-wider range if: 11537 // 1) LHS can be negative: least value can be reduced. 11538 // 2) RHS can be negative: greatest value can be increased. 11539 bool CanWiden = !L.NonNegative || !R.NonNegative; 11540 bool Unsigned = L.NonNegative && R.Width == 0; 11541 return IntRange(std::max(L.valueBits(), R.valueBits()) + CanWiden + 11542 !Unsigned, 11543 Unsigned); 11544 } 11545 11546 /// Return the range of a product of the two ranges. 11547 static IntRange product(IntRange L, IntRange R) { 11548 // If both LHS and RHS can be negative, we can form 11549 // -2^L * -2^R = 2^(L + R) 11550 // which requires L + R + 1 value bits to represent. 11551 bool CanWiden = !L.NonNegative && !R.NonNegative; 11552 bool Unsigned = L.NonNegative && R.NonNegative; 11553 return IntRange(L.valueBits() + R.valueBits() + CanWiden + !Unsigned, 11554 Unsigned); 11555 } 11556 11557 /// Return the range of a remainder operation between the two ranges. 11558 static IntRange rem(IntRange L, IntRange R) { 11559 // The result of a remainder can't be larger than the result of 11560 // either side. The sign of the result is the sign of the LHS. 11561 bool Unsigned = L.NonNegative; 11562 return IntRange(std::min(L.valueBits(), R.valueBits()) + !Unsigned, 11563 Unsigned); 11564 } 11565 }; 11566 11567 } // namespace 11568 11569 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 11570 unsigned MaxWidth) { 11571 if (value.isSigned() && value.isNegative()) 11572 return IntRange(value.getMinSignedBits(), false); 11573 11574 if (value.getBitWidth() > MaxWidth) 11575 value = value.trunc(MaxWidth); 11576 11577 // isNonNegative() just checks the sign bit without considering 11578 // signedness. 11579 return IntRange(value.getActiveBits(), true); 11580 } 11581 11582 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 11583 unsigned MaxWidth) { 11584 if (result.isInt()) 11585 return GetValueRange(C, result.getInt(), MaxWidth); 11586 11587 if (result.isVector()) { 11588 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 11589 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 11590 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 11591 R = IntRange::join(R, El); 11592 } 11593 return R; 11594 } 11595 11596 if (result.isComplexInt()) { 11597 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 11598 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 11599 return IntRange::join(R, I); 11600 } 11601 11602 // This can happen with lossless casts to intptr_t of "based" lvalues. 11603 // Assume it might use arbitrary bits. 11604 // FIXME: The only reason we need to pass the type in here is to get 11605 // the sign right on this one case. It would be nice if APValue 11606 // preserved this. 11607 assert(result.isLValue() || result.isAddrLabelDiff()); 11608 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 11609 } 11610 11611 static QualType GetExprType(const Expr *E) { 11612 QualType Ty = E->getType(); 11613 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 11614 Ty = AtomicRHS->getValueType(); 11615 return Ty; 11616 } 11617 11618 /// Pseudo-evaluate the given integer expression, estimating the 11619 /// range of values it might take. 11620 /// 11621 /// \param MaxWidth The width to which the value will be truncated. 11622 /// \param Approximate If \c true, return a likely range for the result: in 11623 /// particular, assume that arithmetic on narrower types doesn't leave 11624 /// those types. If \c false, return a range including all possible 11625 /// result values. 11626 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth, 11627 bool InConstantContext, bool Approximate) { 11628 E = E->IgnoreParens(); 11629 11630 // Try a full evaluation first. 11631 Expr::EvalResult result; 11632 if (E->EvaluateAsRValue(result, C, InConstantContext)) 11633 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 11634 11635 // I think we only want to look through implicit casts here; if the 11636 // user has an explicit widening cast, we should treat the value as 11637 // being of the new, wider type. 11638 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 11639 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 11640 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext, 11641 Approximate); 11642 11643 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 11644 11645 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 11646 CE->getCastKind() == CK_BooleanToSignedIntegral; 11647 11648 // Assume that non-integer casts can span the full range of the type. 11649 if (!isIntegerCast) 11650 return OutputTypeRange; 11651 11652 IntRange SubRange = GetExprRange(C, CE->getSubExpr(), 11653 std::min(MaxWidth, OutputTypeRange.Width), 11654 InConstantContext, Approximate); 11655 11656 // Bail out if the subexpr's range is as wide as the cast type. 11657 if (SubRange.Width >= OutputTypeRange.Width) 11658 return OutputTypeRange; 11659 11660 // Otherwise, we take the smaller width, and we're non-negative if 11661 // either the output type or the subexpr is. 11662 return IntRange(SubRange.Width, 11663 SubRange.NonNegative || OutputTypeRange.NonNegative); 11664 } 11665 11666 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 11667 // If we can fold the condition, just take that operand. 11668 bool CondResult; 11669 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 11670 return GetExprRange(C, 11671 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(), 11672 MaxWidth, InConstantContext, Approximate); 11673 11674 // Otherwise, conservatively merge. 11675 // GetExprRange requires an integer expression, but a throw expression 11676 // results in a void type. 11677 Expr *E = CO->getTrueExpr(); 11678 IntRange L = E->getType()->isVoidType() 11679 ? IntRange{0, true} 11680 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 11681 E = CO->getFalseExpr(); 11682 IntRange R = E->getType()->isVoidType() 11683 ? IntRange{0, true} 11684 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 11685 return IntRange::join(L, R); 11686 } 11687 11688 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 11689 IntRange (*Combine)(IntRange, IntRange) = IntRange::join; 11690 11691 switch (BO->getOpcode()) { 11692 case BO_Cmp: 11693 llvm_unreachable("builtin <=> should have class type"); 11694 11695 // Boolean-valued operations are single-bit and positive. 11696 case BO_LAnd: 11697 case BO_LOr: 11698 case BO_LT: 11699 case BO_GT: 11700 case BO_LE: 11701 case BO_GE: 11702 case BO_EQ: 11703 case BO_NE: 11704 return IntRange::forBoolType(); 11705 11706 // The type of the assignments is the type of the LHS, so the RHS 11707 // is not necessarily the same type. 11708 case BO_MulAssign: 11709 case BO_DivAssign: 11710 case BO_RemAssign: 11711 case BO_AddAssign: 11712 case BO_SubAssign: 11713 case BO_XorAssign: 11714 case BO_OrAssign: 11715 // TODO: bitfields? 11716 return IntRange::forValueOfType(C, GetExprType(E)); 11717 11718 // Simple assignments just pass through the RHS, which will have 11719 // been coerced to the LHS type. 11720 case BO_Assign: 11721 // TODO: bitfields? 11722 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 11723 Approximate); 11724 11725 // Operations with opaque sources are black-listed. 11726 case BO_PtrMemD: 11727 case BO_PtrMemI: 11728 return IntRange::forValueOfType(C, GetExprType(E)); 11729 11730 // Bitwise-and uses the *infinum* of the two source ranges. 11731 case BO_And: 11732 case BO_AndAssign: 11733 Combine = IntRange::bit_and; 11734 break; 11735 11736 // Left shift gets black-listed based on a judgement call. 11737 case BO_Shl: 11738 // ...except that we want to treat '1 << (blah)' as logically 11739 // positive. It's an important idiom. 11740 if (IntegerLiteral *I 11741 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 11742 if (I->getValue() == 1) { 11743 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 11744 return IntRange(R.Width, /*NonNegative*/ true); 11745 } 11746 } 11747 LLVM_FALLTHROUGH; 11748 11749 case BO_ShlAssign: 11750 return IntRange::forValueOfType(C, GetExprType(E)); 11751 11752 // Right shift by a constant can narrow its left argument. 11753 case BO_Shr: 11754 case BO_ShrAssign: { 11755 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext, 11756 Approximate); 11757 11758 // If the shift amount is a positive constant, drop the width by 11759 // that much. 11760 if (Optional<llvm::APSInt> shift = 11761 BO->getRHS()->getIntegerConstantExpr(C)) { 11762 if (shift->isNonNegative()) { 11763 unsigned zext = shift->getZExtValue(); 11764 if (zext >= L.Width) 11765 L.Width = (L.NonNegative ? 0 : 1); 11766 else 11767 L.Width -= zext; 11768 } 11769 } 11770 11771 return L; 11772 } 11773 11774 // Comma acts as its right operand. 11775 case BO_Comma: 11776 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 11777 Approximate); 11778 11779 case BO_Add: 11780 if (!Approximate) 11781 Combine = IntRange::sum; 11782 break; 11783 11784 case BO_Sub: 11785 if (BO->getLHS()->getType()->isPointerType()) 11786 return IntRange::forValueOfType(C, GetExprType(E)); 11787 if (!Approximate) 11788 Combine = IntRange::difference; 11789 break; 11790 11791 case BO_Mul: 11792 if (!Approximate) 11793 Combine = IntRange::product; 11794 break; 11795 11796 // The width of a division result is mostly determined by the size 11797 // of the LHS. 11798 case BO_Div: { 11799 // Don't 'pre-truncate' the operands. 11800 unsigned opWidth = C.getIntWidth(GetExprType(E)); 11801 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, 11802 Approximate); 11803 11804 // If the divisor is constant, use that. 11805 if (Optional<llvm::APSInt> divisor = 11806 BO->getRHS()->getIntegerConstantExpr(C)) { 11807 unsigned log2 = divisor->logBase2(); // floor(log_2(divisor)) 11808 if (log2 >= L.Width) 11809 L.Width = (L.NonNegative ? 0 : 1); 11810 else 11811 L.Width = std::min(L.Width - log2, MaxWidth); 11812 return L; 11813 } 11814 11815 // Otherwise, just use the LHS's width. 11816 // FIXME: This is wrong if the LHS could be its minimal value and the RHS 11817 // could be -1. 11818 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, 11819 Approximate); 11820 return IntRange(L.Width, L.NonNegative && R.NonNegative); 11821 } 11822 11823 case BO_Rem: 11824 Combine = IntRange::rem; 11825 break; 11826 11827 // The default behavior is okay for these. 11828 case BO_Xor: 11829 case BO_Or: 11830 break; 11831 } 11832 11833 // Combine the two ranges, but limit the result to the type in which we 11834 // performed the computation. 11835 QualType T = GetExprType(E); 11836 unsigned opWidth = C.getIntWidth(T); 11837 IntRange L = 11838 GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, Approximate); 11839 IntRange R = 11840 GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, Approximate); 11841 IntRange C = Combine(L, R); 11842 C.NonNegative |= T->isUnsignedIntegerOrEnumerationType(); 11843 C.Width = std::min(C.Width, MaxWidth); 11844 return C; 11845 } 11846 11847 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 11848 switch (UO->getOpcode()) { 11849 // Boolean-valued operations are white-listed. 11850 case UO_LNot: 11851 return IntRange::forBoolType(); 11852 11853 // Operations with opaque sources are black-listed. 11854 case UO_Deref: 11855 case UO_AddrOf: // should be impossible 11856 return IntRange::forValueOfType(C, GetExprType(E)); 11857 11858 default: 11859 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext, 11860 Approximate); 11861 } 11862 } 11863 11864 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 11865 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext, 11866 Approximate); 11867 11868 if (const auto *BitField = E->getSourceBitField()) 11869 return IntRange(BitField->getBitWidthValue(C), 11870 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 11871 11872 return IntRange::forValueOfType(C, GetExprType(E)); 11873 } 11874 11875 static IntRange GetExprRange(ASTContext &C, const Expr *E, 11876 bool InConstantContext, bool Approximate) { 11877 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext, 11878 Approximate); 11879 } 11880 11881 /// Checks whether the given value, which currently has the given 11882 /// source semantics, has the same value when coerced through the 11883 /// target semantics. 11884 static bool IsSameFloatAfterCast(const llvm::APFloat &value, 11885 const llvm::fltSemantics &Src, 11886 const llvm::fltSemantics &Tgt) { 11887 llvm::APFloat truncated = value; 11888 11889 bool ignored; 11890 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 11891 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 11892 11893 return truncated.bitwiseIsEqual(value); 11894 } 11895 11896 /// Checks whether the given value, which currently has the given 11897 /// source semantics, has the same value when coerced through the 11898 /// target semantics. 11899 /// 11900 /// The value might be a vector of floats (or a complex number). 11901 static bool IsSameFloatAfterCast(const APValue &value, 11902 const llvm::fltSemantics &Src, 11903 const llvm::fltSemantics &Tgt) { 11904 if (value.isFloat()) 11905 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 11906 11907 if (value.isVector()) { 11908 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 11909 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 11910 return false; 11911 return true; 11912 } 11913 11914 assert(value.isComplexFloat()); 11915 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 11916 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 11917 } 11918 11919 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC, 11920 bool IsListInit = false); 11921 11922 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 11923 // Suppress cases where we are comparing against an enum constant. 11924 if (const DeclRefExpr *DR = 11925 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 11926 if (isa<EnumConstantDecl>(DR->getDecl())) 11927 return true; 11928 11929 // Suppress cases where the value is expanded from a macro, unless that macro 11930 // is how a language represents a boolean literal. This is the case in both C 11931 // and Objective-C. 11932 SourceLocation BeginLoc = E->getBeginLoc(); 11933 if (BeginLoc.isMacroID()) { 11934 StringRef MacroName = Lexer::getImmediateMacroName( 11935 BeginLoc, S.getSourceManager(), S.getLangOpts()); 11936 return MacroName != "YES" && MacroName != "NO" && 11937 MacroName != "true" && MacroName != "false"; 11938 } 11939 11940 return false; 11941 } 11942 11943 static bool isKnownToHaveUnsignedValue(Expr *E) { 11944 return E->getType()->isIntegerType() && 11945 (!E->getType()->isSignedIntegerType() || 11946 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 11947 } 11948 11949 namespace { 11950 /// The promoted range of values of a type. In general this has the 11951 /// following structure: 11952 /// 11953 /// |-----------| . . . |-----------| 11954 /// ^ ^ ^ ^ 11955 /// Min HoleMin HoleMax Max 11956 /// 11957 /// ... where there is only a hole if a signed type is promoted to unsigned 11958 /// (in which case Min and Max are the smallest and largest representable 11959 /// values). 11960 struct PromotedRange { 11961 // Min, or HoleMax if there is a hole. 11962 llvm::APSInt PromotedMin; 11963 // Max, or HoleMin if there is a hole. 11964 llvm::APSInt PromotedMax; 11965 11966 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 11967 if (R.Width == 0) 11968 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 11969 else if (R.Width >= BitWidth && !Unsigned) { 11970 // Promotion made the type *narrower*. This happens when promoting 11971 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 11972 // Treat all values of 'signed int' as being in range for now. 11973 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 11974 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 11975 } else { 11976 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 11977 .extOrTrunc(BitWidth); 11978 PromotedMin.setIsUnsigned(Unsigned); 11979 11980 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 11981 .extOrTrunc(BitWidth); 11982 PromotedMax.setIsUnsigned(Unsigned); 11983 } 11984 } 11985 11986 // Determine whether this range is contiguous (has no hole). 11987 bool isContiguous() const { return PromotedMin <= PromotedMax; } 11988 11989 // Where a constant value is within the range. 11990 enum ComparisonResult { 11991 LT = 0x1, 11992 LE = 0x2, 11993 GT = 0x4, 11994 GE = 0x8, 11995 EQ = 0x10, 11996 NE = 0x20, 11997 InRangeFlag = 0x40, 11998 11999 Less = LE | LT | NE, 12000 Min = LE | InRangeFlag, 12001 InRange = InRangeFlag, 12002 Max = GE | InRangeFlag, 12003 Greater = GE | GT | NE, 12004 12005 OnlyValue = LE | GE | EQ | InRangeFlag, 12006 InHole = NE 12007 }; 12008 12009 ComparisonResult compare(const llvm::APSInt &Value) const { 12010 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 12011 Value.isUnsigned() == PromotedMin.isUnsigned()); 12012 if (!isContiguous()) { 12013 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 12014 if (Value.isMinValue()) return Min; 12015 if (Value.isMaxValue()) return Max; 12016 if (Value >= PromotedMin) return InRange; 12017 if (Value <= PromotedMax) return InRange; 12018 return InHole; 12019 } 12020 12021 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 12022 case -1: return Less; 12023 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 12024 case 1: 12025 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 12026 case -1: return InRange; 12027 case 0: return Max; 12028 case 1: return Greater; 12029 } 12030 } 12031 12032 llvm_unreachable("impossible compare result"); 12033 } 12034 12035 static llvm::Optional<StringRef> 12036 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 12037 if (Op == BO_Cmp) { 12038 ComparisonResult LTFlag = LT, GTFlag = GT; 12039 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 12040 12041 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 12042 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 12043 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 12044 return llvm::None; 12045 } 12046 12047 ComparisonResult TrueFlag, FalseFlag; 12048 if (Op == BO_EQ) { 12049 TrueFlag = EQ; 12050 FalseFlag = NE; 12051 } else if (Op == BO_NE) { 12052 TrueFlag = NE; 12053 FalseFlag = EQ; 12054 } else { 12055 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 12056 TrueFlag = LT; 12057 FalseFlag = GE; 12058 } else { 12059 TrueFlag = GT; 12060 FalseFlag = LE; 12061 } 12062 if (Op == BO_GE || Op == BO_LE) 12063 std::swap(TrueFlag, FalseFlag); 12064 } 12065 if (R & TrueFlag) 12066 return StringRef("true"); 12067 if (R & FalseFlag) 12068 return StringRef("false"); 12069 return llvm::None; 12070 } 12071 }; 12072 } 12073 12074 static bool HasEnumType(Expr *E) { 12075 // Strip off implicit integral promotions. 12076 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 12077 if (ICE->getCastKind() != CK_IntegralCast && 12078 ICE->getCastKind() != CK_NoOp) 12079 break; 12080 E = ICE->getSubExpr(); 12081 } 12082 12083 return E->getType()->isEnumeralType(); 12084 } 12085 12086 static int classifyConstantValue(Expr *Constant) { 12087 // The values of this enumeration are used in the diagnostics 12088 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 12089 enum ConstantValueKind { 12090 Miscellaneous = 0, 12091 LiteralTrue, 12092 LiteralFalse 12093 }; 12094 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 12095 return BL->getValue() ? ConstantValueKind::LiteralTrue 12096 : ConstantValueKind::LiteralFalse; 12097 return ConstantValueKind::Miscellaneous; 12098 } 12099 12100 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 12101 Expr *Constant, Expr *Other, 12102 const llvm::APSInt &Value, 12103 bool RhsConstant) { 12104 if (S.inTemplateInstantiation()) 12105 return false; 12106 12107 Expr *OriginalOther = Other; 12108 12109 Constant = Constant->IgnoreParenImpCasts(); 12110 Other = Other->IgnoreParenImpCasts(); 12111 12112 // Suppress warnings on tautological comparisons between values of the same 12113 // enumeration type. There are only two ways we could warn on this: 12114 // - If the constant is outside the range of representable values of 12115 // the enumeration. In such a case, we should warn about the cast 12116 // to enumeration type, not about the comparison. 12117 // - If the constant is the maximum / minimum in-range value. For an 12118 // enumeratin type, such comparisons can be meaningful and useful. 12119 if (Constant->getType()->isEnumeralType() && 12120 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 12121 return false; 12122 12123 IntRange OtherValueRange = GetExprRange( 12124 S.Context, Other, S.isConstantEvaluated(), /*Approximate*/ false); 12125 12126 QualType OtherT = Other->getType(); 12127 if (const auto *AT = OtherT->getAs<AtomicType>()) 12128 OtherT = AT->getValueType(); 12129 IntRange OtherTypeRange = IntRange::forValueOfType(S.Context, OtherT); 12130 12131 // Special case for ObjC BOOL on targets where its a typedef for a signed char 12132 // (Namely, macOS). FIXME: IntRange::forValueOfType should do this. 12133 bool IsObjCSignedCharBool = S.getLangOpts().ObjC && 12134 S.NSAPIObj->isObjCBOOLType(OtherT) && 12135 OtherT->isSpecificBuiltinType(BuiltinType::SChar); 12136 12137 // Whether we're treating Other as being a bool because of the form of 12138 // expression despite it having another type (typically 'int' in C). 12139 bool OtherIsBooleanDespiteType = 12140 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 12141 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool) 12142 OtherTypeRange = OtherValueRange = IntRange::forBoolType(); 12143 12144 // Check if all values in the range of possible values of this expression 12145 // lead to the same comparison outcome. 12146 PromotedRange OtherPromotedValueRange(OtherValueRange, Value.getBitWidth(), 12147 Value.isUnsigned()); 12148 auto Cmp = OtherPromotedValueRange.compare(Value); 12149 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 12150 if (!Result) 12151 return false; 12152 12153 // Also consider the range determined by the type alone. This allows us to 12154 // classify the warning under the proper diagnostic group. 12155 bool TautologicalTypeCompare = false; 12156 { 12157 PromotedRange OtherPromotedTypeRange(OtherTypeRange, Value.getBitWidth(), 12158 Value.isUnsigned()); 12159 auto TypeCmp = OtherPromotedTypeRange.compare(Value); 12160 if (auto TypeResult = PromotedRange::constantValue(E->getOpcode(), TypeCmp, 12161 RhsConstant)) { 12162 TautologicalTypeCompare = true; 12163 Cmp = TypeCmp; 12164 Result = TypeResult; 12165 } 12166 } 12167 12168 // Don't warn if the non-constant operand actually always evaluates to the 12169 // same value. 12170 if (!TautologicalTypeCompare && OtherValueRange.Width == 0) 12171 return false; 12172 12173 // Suppress the diagnostic for an in-range comparison if the constant comes 12174 // from a macro or enumerator. We don't want to diagnose 12175 // 12176 // some_long_value <= INT_MAX 12177 // 12178 // when sizeof(int) == sizeof(long). 12179 bool InRange = Cmp & PromotedRange::InRangeFlag; 12180 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 12181 return false; 12182 12183 // A comparison of an unsigned bit-field against 0 is really a type problem, 12184 // even though at the type level the bit-field might promote to 'signed int'. 12185 if (Other->refersToBitField() && InRange && Value == 0 && 12186 Other->getType()->isUnsignedIntegerOrEnumerationType()) 12187 TautologicalTypeCompare = true; 12188 12189 // If this is a comparison to an enum constant, include that 12190 // constant in the diagnostic. 12191 const EnumConstantDecl *ED = nullptr; 12192 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 12193 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 12194 12195 // Should be enough for uint128 (39 decimal digits) 12196 SmallString<64> PrettySourceValue; 12197 llvm::raw_svector_ostream OS(PrettySourceValue); 12198 if (ED) { 12199 OS << '\'' << *ED << "' (" << Value << ")"; 12200 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>( 12201 Constant->IgnoreParenImpCasts())) { 12202 OS << (BL->getValue() ? "YES" : "NO"); 12203 } else { 12204 OS << Value; 12205 } 12206 12207 if (!TautologicalTypeCompare) { 12208 S.Diag(E->getOperatorLoc(), diag::warn_tautological_compare_value_range) 12209 << RhsConstant << OtherValueRange.Width << OtherValueRange.NonNegative 12210 << E->getOpcodeStr() << OS.str() << *Result 12211 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 12212 return true; 12213 } 12214 12215 if (IsObjCSignedCharBool) { 12216 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 12217 S.PDiag(diag::warn_tautological_compare_objc_bool) 12218 << OS.str() << *Result); 12219 return true; 12220 } 12221 12222 // FIXME: We use a somewhat different formatting for the in-range cases and 12223 // cases involving boolean values for historical reasons. We should pick a 12224 // consistent way of presenting these diagnostics. 12225 if (!InRange || Other->isKnownToHaveBooleanValue()) { 12226 12227 S.DiagRuntimeBehavior( 12228 E->getOperatorLoc(), E, 12229 S.PDiag(!InRange ? diag::warn_out_of_range_compare 12230 : diag::warn_tautological_bool_compare) 12231 << OS.str() << classifyConstantValue(Constant) << OtherT 12232 << OtherIsBooleanDespiteType << *Result 12233 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 12234 } else { 12235 bool IsCharTy = OtherT.withoutLocalFastQualifiers() == S.Context.CharTy; 12236 unsigned Diag = 12237 (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 12238 ? (HasEnumType(OriginalOther) 12239 ? diag::warn_unsigned_enum_always_true_comparison 12240 : IsCharTy ? diag::warn_unsigned_char_always_true_comparison 12241 : diag::warn_unsigned_always_true_comparison) 12242 : diag::warn_tautological_constant_compare; 12243 12244 S.Diag(E->getOperatorLoc(), Diag) 12245 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 12246 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 12247 } 12248 12249 return true; 12250 } 12251 12252 /// Analyze the operands of the given comparison. Implements the 12253 /// fallback case from AnalyzeComparison. 12254 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 12255 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12256 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12257 } 12258 12259 /// Implements -Wsign-compare. 12260 /// 12261 /// \param E the binary operator to check for warnings 12262 static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 12263 // The type the comparison is being performed in. 12264 QualType T = E->getLHS()->getType(); 12265 12266 // Only analyze comparison operators where both sides have been converted to 12267 // the same type. 12268 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 12269 return AnalyzeImpConvsInComparison(S, E); 12270 12271 // Don't analyze value-dependent comparisons directly. 12272 if (E->isValueDependent()) 12273 return AnalyzeImpConvsInComparison(S, E); 12274 12275 Expr *LHS = E->getLHS(); 12276 Expr *RHS = E->getRHS(); 12277 12278 if (T->isIntegralType(S.Context)) { 12279 Optional<llvm::APSInt> RHSValue = RHS->getIntegerConstantExpr(S.Context); 12280 Optional<llvm::APSInt> LHSValue = LHS->getIntegerConstantExpr(S.Context); 12281 12282 // We don't care about expressions whose result is a constant. 12283 if (RHSValue && LHSValue) 12284 return AnalyzeImpConvsInComparison(S, E); 12285 12286 // We only care about expressions where just one side is literal 12287 if ((bool)RHSValue ^ (bool)LHSValue) { 12288 // Is the constant on the RHS or LHS? 12289 const bool RhsConstant = (bool)RHSValue; 12290 Expr *Const = RhsConstant ? RHS : LHS; 12291 Expr *Other = RhsConstant ? LHS : RHS; 12292 const llvm::APSInt &Value = RhsConstant ? *RHSValue : *LHSValue; 12293 12294 // Check whether an integer constant comparison results in a value 12295 // of 'true' or 'false'. 12296 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 12297 return AnalyzeImpConvsInComparison(S, E); 12298 } 12299 } 12300 12301 if (!T->hasUnsignedIntegerRepresentation()) { 12302 // We don't do anything special if this isn't an unsigned integral 12303 // comparison: we're only interested in integral comparisons, and 12304 // signed comparisons only happen in cases we don't care to warn about. 12305 return AnalyzeImpConvsInComparison(S, E); 12306 } 12307 12308 LHS = LHS->IgnoreParenImpCasts(); 12309 RHS = RHS->IgnoreParenImpCasts(); 12310 12311 if (!S.getLangOpts().CPlusPlus) { 12312 // Avoid warning about comparison of integers with different signs when 12313 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 12314 // the type of `E`. 12315 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 12316 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 12317 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 12318 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 12319 } 12320 12321 // Check to see if one of the (unmodified) operands is of different 12322 // signedness. 12323 Expr *signedOperand, *unsignedOperand; 12324 if (LHS->getType()->hasSignedIntegerRepresentation()) { 12325 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 12326 "unsigned comparison between two signed integer expressions?"); 12327 signedOperand = LHS; 12328 unsignedOperand = RHS; 12329 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 12330 signedOperand = RHS; 12331 unsignedOperand = LHS; 12332 } else { 12333 return AnalyzeImpConvsInComparison(S, E); 12334 } 12335 12336 // Otherwise, calculate the effective range of the signed operand. 12337 IntRange signedRange = GetExprRange( 12338 S.Context, signedOperand, S.isConstantEvaluated(), /*Approximate*/ true); 12339 12340 // Go ahead and analyze implicit conversions in the operands. Note 12341 // that we skip the implicit conversions on both sides. 12342 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 12343 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 12344 12345 // If the signed range is non-negative, -Wsign-compare won't fire. 12346 if (signedRange.NonNegative) 12347 return; 12348 12349 // For (in)equality comparisons, if the unsigned operand is a 12350 // constant which cannot collide with a overflowed signed operand, 12351 // then reinterpreting the signed operand as unsigned will not 12352 // change the result of the comparison. 12353 if (E->isEqualityOp()) { 12354 unsigned comparisonWidth = S.Context.getIntWidth(T); 12355 IntRange unsignedRange = 12356 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated(), 12357 /*Approximate*/ true); 12358 12359 // We should never be unable to prove that the unsigned operand is 12360 // non-negative. 12361 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 12362 12363 if (unsignedRange.Width < comparisonWidth) 12364 return; 12365 } 12366 12367 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 12368 S.PDiag(diag::warn_mixed_sign_comparison) 12369 << LHS->getType() << RHS->getType() 12370 << LHS->getSourceRange() << RHS->getSourceRange()); 12371 } 12372 12373 /// Analyzes an attempt to assign the given value to a bitfield. 12374 /// 12375 /// Returns true if there was something fishy about the attempt. 12376 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 12377 SourceLocation InitLoc) { 12378 assert(Bitfield->isBitField()); 12379 if (Bitfield->isInvalidDecl()) 12380 return false; 12381 12382 // White-list bool bitfields. 12383 QualType BitfieldType = Bitfield->getType(); 12384 if (BitfieldType->isBooleanType()) 12385 return false; 12386 12387 if (BitfieldType->isEnumeralType()) { 12388 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl(); 12389 // If the underlying enum type was not explicitly specified as an unsigned 12390 // type and the enum contain only positive values, MSVC++ will cause an 12391 // inconsistency by storing this as a signed type. 12392 if (S.getLangOpts().CPlusPlus11 && 12393 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 12394 BitfieldEnumDecl->getNumPositiveBits() > 0 && 12395 BitfieldEnumDecl->getNumNegativeBits() == 0) { 12396 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 12397 << BitfieldEnumDecl; 12398 } 12399 } 12400 12401 if (Bitfield->getType()->isBooleanType()) 12402 return false; 12403 12404 // Ignore value- or type-dependent expressions. 12405 if (Bitfield->getBitWidth()->isValueDependent() || 12406 Bitfield->getBitWidth()->isTypeDependent() || 12407 Init->isValueDependent() || 12408 Init->isTypeDependent()) 12409 return false; 12410 12411 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 12412 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 12413 12414 Expr::EvalResult Result; 12415 if (!OriginalInit->EvaluateAsInt(Result, S.Context, 12416 Expr::SE_AllowSideEffects)) { 12417 // The RHS is not constant. If the RHS has an enum type, make sure the 12418 // bitfield is wide enough to hold all the values of the enum without 12419 // truncation. 12420 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 12421 EnumDecl *ED = EnumTy->getDecl(); 12422 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 12423 12424 // Enum types are implicitly signed on Windows, so check if there are any 12425 // negative enumerators to see if the enum was intended to be signed or 12426 // not. 12427 bool SignedEnum = ED->getNumNegativeBits() > 0; 12428 12429 // Check for surprising sign changes when assigning enum values to a 12430 // bitfield of different signedness. If the bitfield is signed and we 12431 // have exactly the right number of bits to store this unsigned enum, 12432 // suggest changing the enum to an unsigned type. This typically happens 12433 // on Windows where unfixed enums always use an underlying type of 'int'. 12434 unsigned DiagID = 0; 12435 if (SignedEnum && !SignedBitfield) { 12436 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 12437 } else if (SignedBitfield && !SignedEnum && 12438 ED->getNumPositiveBits() == FieldWidth) { 12439 DiagID = diag::warn_signed_bitfield_enum_conversion; 12440 } 12441 12442 if (DiagID) { 12443 S.Diag(InitLoc, DiagID) << Bitfield << ED; 12444 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 12445 SourceRange TypeRange = 12446 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 12447 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 12448 << SignedEnum << TypeRange; 12449 } 12450 12451 // Compute the required bitwidth. If the enum has negative values, we need 12452 // one more bit than the normal number of positive bits to represent the 12453 // sign bit. 12454 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 12455 ED->getNumNegativeBits()) 12456 : ED->getNumPositiveBits(); 12457 12458 // Check the bitwidth. 12459 if (BitsNeeded > FieldWidth) { 12460 Expr *WidthExpr = Bitfield->getBitWidth(); 12461 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 12462 << Bitfield << ED; 12463 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 12464 << BitsNeeded << ED << WidthExpr->getSourceRange(); 12465 } 12466 } 12467 12468 return false; 12469 } 12470 12471 llvm::APSInt Value = Result.Val.getInt(); 12472 12473 unsigned OriginalWidth = Value.getBitWidth(); 12474 12475 if (!Value.isSigned() || Value.isNegative()) 12476 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 12477 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 12478 OriginalWidth = Value.getMinSignedBits(); 12479 12480 if (OriginalWidth <= FieldWidth) 12481 return false; 12482 12483 // Compute the value which the bitfield will contain. 12484 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 12485 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 12486 12487 // Check whether the stored value is equal to the original value. 12488 TruncatedValue = TruncatedValue.extend(OriginalWidth); 12489 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 12490 return false; 12491 12492 // Special-case bitfields of width 1: booleans are naturally 0/1, and 12493 // therefore don't strictly fit into a signed bitfield of width 1. 12494 if (FieldWidth == 1 && Value == 1) 12495 return false; 12496 12497 std::string PrettyValue = toString(Value, 10); 12498 std::string PrettyTrunc = toString(TruncatedValue, 10); 12499 12500 S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant) 12501 << PrettyValue << PrettyTrunc << OriginalInit->getType() 12502 << Init->getSourceRange(); 12503 12504 return true; 12505 } 12506 12507 /// Analyze the given simple or compound assignment for warning-worthy 12508 /// operations. 12509 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 12510 // Just recurse on the LHS. 12511 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12512 12513 // We want to recurse on the RHS as normal unless we're assigning to 12514 // a bitfield. 12515 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 12516 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 12517 E->getOperatorLoc())) { 12518 // Recurse, ignoring any implicit conversions on the RHS. 12519 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 12520 E->getOperatorLoc()); 12521 } 12522 } 12523 12524 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12525 12526 // Diagnose implicitly sequentially-consistent atomic assignment. 12527 if (E->getLHS()->getType()->isAtomicType()) 12528 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 12529 } 12530 12531 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 12532 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 12533 SourceLocation CContext, unsigned diag, 12534 bool pruneControlFlow = false) { 12535 if (pruneControlFlow) { 12536 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12537 S.PDiag(diag) 12538 << SourceType << T << E->getSourceRange() 12539 << SourceRange(CContext)); 12540 return; 12541 } 12542 S.Diag(E->getExprLoc(), diag) 12543 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 12544 } 12545 12546 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 12547 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 12548 SourceLocation CContext, 12549 unsigned diag, bool pruneControlFlow = false) { 12550 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 12551 } 12552 12553 static bool isObjCSignedCharBool(Sema &S, QualType Ty) { 12554 return Ty->isSpecificBuiltinType(BuiltinType::SChar) && 12555 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty); 12556 } 12557 12558 static void adornObjCBoolConversionDiagWithTernaryFixit( 12559 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) { 12560 Expr *Ignored = SourceExpr->IgnoreImplicit(); 12561 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored)) 12562 Ignored = OVE->getSourceExpr(); 12563 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) || 12564 isa<BinaryOperator>(Ignored) || 12565 isa<CXXOperatorCallExpr>(Ignored); 12566 SourceLocation EndLoc = S.getLocForEndOfToken(SourceExpr->getEndLoc()); 12567 if (NeedsParens) 12568 Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(") 12569 << FixItHint::CreateInsertion(EndLoc, ")"); 12570 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO"); 12571 } 12572 12573 /// Diagnose an implicit cast from a floating point value to an integer value. 12574 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 12575 SourceLocation CContext) { 12576 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 12577 const bool PruneWarnings = S.inTemplateInstantiation(); 12578 12579 Expr *InnerE = E->IgnoreParenImpCasts(); 12580 // We also want to warn on, e.g., "int i = -1.234" 12581 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 12582 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 12583 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 12584 12585 const bool IsLiteral = 12586 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 12587 12588 llvm::APFloat Value(0.0); 12589 bool IsConstant = 12590 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 12591 if (!IsConstant) { 12592 if (isObjCSignedCharBool(S, T)) { 12593 return adornObjCBoolConversionDiagWithTernaryFixit( 12594 S, E, 12595 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool) 12596 << E->getType()); 12597 } 12598 12599 return DiagnoseImpCast(S, E, T, CContext, 12600 diag::warn_impcast_float_integer, PruneWarnings); 12601 } 12602 12603 bool isExact = false; 12604 12605 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 12606 T->hasUnsignedIntegerRepresentation()); 12607 llvm::APFloat::opStatus Result = Value.convertToInteger( 12608 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 12609 12610 // FIXME: Force the precision of the source value down so we don't print 12611 // digits which are usually useless (we don't really care here if we 12612 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 12613 // would automatically print the shortest representation, but it's a bit 12614 // tricky to implement. 12615 SmallString<16> PrettySourceValue; 12616 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 12617 precision = (precision * 59 + 195) / 196; 12618 Value.toString(PrettySourceValue, precision); 12619 12620 if (isObjCSignedCharBool(S, T) && IntegerValue != 0 && IntegerValue != 1) { 12621 return adornObjCBoolConversionDiagWithTernaryFixit( 12622 S, E, 12623 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool) 12624 << PrettySourceValue); 12625 } 12626 12627 if (Result == llvm::APFloat::opOK && isExact) { 12628 if (IsLiteral) return; 12629 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 12630 PruneWarnings); 12631 } 12632 12633 // Conversion of a floating-point value to a non-bool integer where the 12634 // integral part cannot be represented by the integer type is undefined. 12635 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 12636 return DiagnoseImpCast( 12637 S, E, T, CContext, 12638 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 12639 : diag::warn_impcast_float_to_integer_out_of_range, 12640 PruneWarnings); 12641 12642 unsigned DiagID = 0; 12643 if (IsLiteral) { 12644 // Warn on floating point literal to integer. 12645 DiagID = diag::warn_impcast_literal_float_to_integer; 12646 } else if (IntegerValue == 0) { 12647 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 12648 return DiagnoseImpCast(S, E, T, CContext, 12649 diag::warn_impcast_float_integer, PruneWarnings); 12650 } 12651 // Warn on non-zero to zero conversion. 12652 DiagID = diag::warn_impcast_float_to_integer_zero; 12653 } else { 12654 if (IntegerValue.isUnsigned()) { 12655 if (!IntegerValue.isMaxValue()) { 12656 return DiagnoseImpCast(S, E, T, CContext, 12657 diag::warn_impcast_float_integer, PruneWarnings); 12658 } 12659 } else { // IntegerValue.isSigned() 12660 if (!IntegerValue.isMaxSignedValue() && 12661 !IntegerValue.isMinSignedValue()) { 12662 return DiagnoseImpCast(S, E, T, CContext, 12663 diag::warn_impcast_float_integer, PruneWarnings); 12664 } 12665 } 12666 // Warn on evaluatable floating point expression to integer conversion. 12667 DiagID = diag::warn_impcast_float_to_integer; 12668 } 12669 12670 SmallString<16> PrettyTargetValue; 12671 if (IsBool) 12672 PrettyTargetValue = Value.isZero() ? "false" : "true"; 12673 else 12674 IntegerValue.toString(PrettyTargetValue); 12675 12676 if (PruneWarnings) { 12677 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12678 S.PDiag(DiagID) 12679 << E->getType() << T.getUnqualifiedType() 12680 << PrettySourceValue << PrettyTargetValue 12681 << E->getSourceRange() << SourceRange(CContext)); 12682 } else { 12683 S.Diag(E->getExprLoc(), DiagID) 12684 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 12685 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 12686 } 12687 } 12688 12689 /// Analyze the given compound assignment for the possible losing of 12690 /// floating-point precision. 12691 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 12692 assert(isa<CompoundAssignOperator>(E) && 12693 "Must be compound assignment operation"); 12694 // Recurse on the LHS and RHS in here 12695 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12696 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12697 12698 if (E->getLHS()->getType()->isAtomicType()) 12699 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 12700 12701 // Now check the outermost expression 12702 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 12703 const auto *RBT = cast<CompoundAssignOperator>(E) 12704 ->getComputationResultType() 12705 ->getAs<BuiltinType>(); 12706 12707 // The below checks assume source is floating point. 12708 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 12709 12710 // If source is floating point but target is an integer. 12711 if (ResultBT->isInteger()) 12712 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(), 12713 E->getExprLoc(), diag::warn_impcast_float_integer); 12714 12715 if (!ResultBT->isFloatingPoint()) 12716 return; 12717 12718 // If both source and target are floating points, warn about losing precision. 12719 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 12720 QualType(ResultBT, 0), QualType(RBT, 0)); 12721 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 12722 // warn about dropping FP rank. 12723 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 12724 diag::warn_impcast_float_result_precision); 12725 } 12726 12727 static std::string PrettyPrintInRange(const llvm::APSInt &Value, 12728 IntRange Range) { 12729 if (!Range.Width) return "0"; 12730 12731 llvm::APSInt ValueInRange = Value; 12732 ValueInRange.setIsSigned(!Range.NonNegative); 12733 ValueInRange = ValueInRange.trunc(Range.Width); 12734 return toString(ValueInRange, 10); 12735 } 12736 12737 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 12738 if (!isa<ImplicitCastExpr>(Ex)) 12739 return false; 12740 12741 Expr *InnerE = Ex->IgnoreParenImpCasts(); 12742 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 12743 const Type *Source = 12744 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 12745 if (Target->isDependentType()) 12746 return false; 12747 12748 const BuiltinType *FloatCandidateBT = 12749 dyn_cast<BuiltinType>(ToBool ? Source : Target); 12750 const Type *BoolCandidateType = ToBool ? Target : Source; 12751 12752 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 12753 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 12754 } 12755 12756 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 12757 SourceLocation CC) { 12758 unsigned NumArgs = TheCall->getNumArgs(); 12759 for (unsigned i = 0; i < NumArgs; ++i) { 12760 Expr *CurrA = TheCall->getArg(i); 12761 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 12762 continue; 12763 12764 bool IsSwapped = ((i > 0) && 12765 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 12766 IsSwapped |= ((i < (NumArgs - 1)) && 12767 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 12768 if (IsSwapped) { 12769 // Warn on this floating-point to bool conversion. 12770 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 12771 CurrA->getType(), CC, 12772 diag::warn_impcast_floating_point_to_bool); 12773 } 12774 } 12775 } 12776 12777 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 12778 SourceLocation CC) { 12779 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 12780 E->getExprLoc())) 12781 return; 12782 12783 // Don't warn on functions which have return type nullptr_t. 12784 if (isa<CallExpr>(E)) 12785 return; 12786 12787 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 12788 const Expr::NullPointerConstantKind NullKind = 12789 E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull); 12790 if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr) 12791 return; 12792 12793 // Return if target type is a safe conversion. 12794 if (T->isAnyPointerType() || T->isBlockPointerType() || 12795 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 12796 return; 12797 12798 SourceLocation Loc = E->getSourceRange().getBegin(); 12799 12800 // Venture through the macro stacks to get to the source of macro arguments. 12801 // The new location is a better location than the complete location that was 12802 // passed in. 12803 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 12804 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 12805 12806 // __null is usually wrapped in a macro. Go up a macro if that is the case. 12807 if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) { 12808 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 12809 Loc, S.SourceMgr, S.getLangOpts()); 12810 if (MacroName == "NULL") 12811 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 12812 } 12813 12814 // Only warn if the null and context location are in the same macro expansion. 12815 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 12816 return; 12817 12818 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 12819 << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC) 12820 << FixItHint::CreateReplacement(Loc, 12821 S.getFixItZeroLiteralForType(T, Loc)); 12822 } 12823 12824 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 12825 ObjCArrayLiteral *ArrayLiteral); 12826 12827 static void 12828 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 12829 ObjCDictionaryLiteral *DictionaryLiteral); 12830 12831 /// Check a single element within a collection literal against the 12832 /// target element type. 12833 static void checkObjCCollectionLiteralElement(Sema &S, 12834 QualType TargetElementType, 12835 Expr *Element, 12836 unsigned ElementKind) { 12837 // Skip a bitcast to 'id' or qualified 'id'. 12838 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 12839 if (ICE->getCastKind() == CK_BitCast && 12840 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 12841 Element = ICE->getSubExpr(); 12842 } 12843 12844 QualType ElementType = Element->getType(); 12845 ExprResult ElementResult(Element); 12846 if (ElementType->getAs<ObjCObjectPointerType>() && 12847 S.CheckSingleAssignmentConstraints(TargetElementType, 12848 ElementResult, 12849 false, false) 12850 != Sema::Compatible) { 12851 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 12852 << ElementType << ElementKind << TargetElementType 12853 << Element->getSourceRange(); 12854 } 12855 12856 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 12857 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 12858 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 12859 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 12860 } 12861 12862 /// Check an Objective-C array literal being converted to the given 12863 /// target type. 12864 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 12865 ObjCArrayLiteral *ArrayLiteral) { 12866 if (!S.NSArrayDecl) 12867 return; 12868 12869 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 12870 if (!TargetObjCPtr) 12871 return; 12872 12873 if (TargetObjCPtr->isUnspecialized() || 12874 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 12875 != S.NSArrayDecl->getCanonicalDecl()) 12876 return; 12877 12878 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 12879 if (TypeArgs.size() != 1) 12880 return; 12881 12882 QualType TargetElementType = TypeArgs[0]; 12883 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 12884 checkObjCCollectionLiteralElement(S, TargetElementType, 12885 ArrayLiteral->getElement(I), 12886 0); 12887 } 12888 } 12889 12890 /// Check an Objective-C dictionary literal being converted to the given 12891 /// target type. 12892 static void 12893 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 12894 ObjCDictionaryLiteral *DictionaryLiteral) { 12895 if (!S.NSDictionaryDecl) 12896 return; 12897 12898 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 12899 if (!TargetObjCPtr) 12900 return; 12901 12902 if (TargetObjCPtr->isUnspecialized() || 12903 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 12904 != S.NSDictionaryDecl->getCanonicalDecl()) 12905 return; 12906 12907 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 12908 if (TypeArgs.size() != 2) 12909 return; 12910 12911 QualType TargetKeyType = TypeArgs[0]; 12912 QualType TargetObjectType = TypeArgs[1]; 12913 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 12914 auto Element = DictionaryLiteral->getKeyValueElement(I); 12915 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 12916 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 12917 } 12918 } 12919 12920 // Helper function to filter out cases for constant width constant conversion. 12921 // Don't warn on char array initialization or for non-decimal values. 12922 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 12923 SourceLocation CC) { 12924 // If initializing from a constant, and the constant starts with '0', 12925 // then it is a binary, octal, or hexadecimal. Allow these constants 12926 // to fill all the bits, even if there is a sign change. 12927 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 12928 const char FirstLiteralCharacter = 12929 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 12930 if (FirstLiteralCharacter == '0') 12931 return false; 12932 } 12933 12934 // If the CC location points to a '{', and the type is char, then assume 12935 // assume it is an array initialization. 12936 if (CC.isValid() && T->isCharType()) { 12937 const char FirstContextCharacter = 12938 S.getSourceManager().getCharacterData(CC)[0]; 12939 if (FirstContextCharacter == '{') 12940 return false; 12941 } 12942 12943 return true; 12944 } 12945 12946 static const IntegerLiteral *getIntegerLiteral(Expr *E) { 12947 const auto *IL = dyn_cast<IntegerLiteral>(E); 12948 if (!IL) { 12949 if (auto *UO = dyn_cast<UnaryOperator>(E)) { 12950 if (UO->getOpcode() == UO_Minus) 12951 return dyn_cast<IntegerLiteral>(UO->getSubExpr()); 12952 } 12953 } 12954 12955 return IL; 12956 } 12957 12958 static void DiagnoseIntInBoolContext(Sema &S, Expr *E) { 12959 E = E->IgnoreParenImpCasts(); 12960 SourceLocation ExprLoc = E->getExprLoc(); 12961 12962 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 12963 BinaryOperator::Opcode Opc = BO->getOpcode(); 12964 Expr::EvalResult Result; 12965 // Do not diagnose unsigned shifts. 12966 if (Opc == BO_Shl) { 12967 const auto *LHS = getIntegerLiteral(BO->getLHS()); 12968 const auto *RHS = getIntegerLiteral(BO->getRHS()); 12969 if (LHS && LHS->getValue() == 0) 12970 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0; 12971 else if (!E->isValueDependent() && LHS && RHS && 12972 RHS->getValue().isNonNegative() && 12973 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) 12974 S.Diag(ExprLoc, diag::warn_left_shift_always) 12975 << (Result.Val.getInt() != 0); 12976 else if (E->getType()->isSignedIntegerType()) 12977 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E; 12978 } 12979 } 12980 12981 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 12982 const auto *LHS = getIntegerLiteral(CO->getTrueExpr()); 12983 const auto *RHS = getIntegerLiteral(CO->getFalseExpr()); 12984 if (!LHS || !RHS) 12985 return; 12986 if ((LHS->getValue() == 0 || LHS->getValue() == 1) && 12987 (RHS->getValue() == 0 || RHS->getValue() == 1)) 12988 // Do not diagnose common idioms. 12989 return; 12990 if (LHS->getValue() != 0 && RHS->getValue() != 0) 12991 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true); 12992 } 12993 } 12994 12995 static void CheckImplicitConversion(Sema &S, Expr *E, QualType T, 12996 SourceLocation CC, 12997 bool *ICContext = nullptr, 12998 bool IsListInit = false) { 12999 if (E->isTypeDependent() || E->isValueDependent()) return; 13000 13001 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 13002 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 13003 if (Source == Target) return; 13004 if (Target->isDependentType()) return; 13005 13006 // If the conversion context location is invalid don't complain. We also 13007 // don't want to emit a warning if the issue occurs from the expansion of 13008 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 13009 // delay this check as long as possible. Once we detect we are in that 13010 // scenario, we just return. 13011 if (CC.isInvalid()) 13012 return; 13013 13014 if (Source->isAtomicType()) 13015 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 13016 13017 // Diagnose implicit casts to bool. 13018 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 13019 if (isa<StringLiteral>(E)) 13020 // Warn on string literal to bool. Checks for string literals in logical 13021 // and expressions, for instance, assert(0 && "error here"), are 13022 // prevented by a check in AnalyzeImplicitConversions(). 13023 return DiagnoseImpCast(S, E, T, CC, 13024 diag::warn_impcast_string_literal_to_bool); 13025 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 13026 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 13027 // This covers the literal expressions that evaluate to Objective-C 13028 // objects. 13029 return DiagnoseImpCast(S, E, T, CC, 13030 diag::warn_impcast_objective_c_literal_to_bool); 13031 } 13032 if (Source->isPointerType() || Source->canDecayToPointerType()) { 13033 // Warn on pointer to bool conversion that is always true. 13034 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 13035 SourceRange(CC)); 13036 } 13037 } 13038 13039 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL 13040 // is a typedef for signed char (macOS), then that constant value has to be 1 13041 // or 0. 13042 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) { 13043 Expr::EvalResult Result; 13044 if (E->EvaluateAsInt(Result, S.getASTContext(), 13045 Expr::SE_AllowSideEffects)) { 13046 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) { 13047 adornObjCBoolConversionDiagWithTernaryFixit( 13048 S, E, 13049 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool) 13050 << toString(Result.Val.getInt(), 10)); 13051 } 13052 return; 13053 } 13054 } 13055 13056 // Check implicit casts from Objective-C collection literals to specialized 13057 // collection types, e.g., NSArray<NSString *> *. 13058 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 13059 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 13060 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 13061 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 13062 13063 // Strip vector types. 13064 if (isa<VectorType>(Source)) { 13065 if (Target->isVLSTBuiltinType() && 13066 (S.Context.areCompatibleSveTypes(QualType(Target, 0), 13067 QualType(Source, 0)) || 13068 S.Context.areLaxCompatibleSveTypes(QualType(Target, 0), 13069 QualType(Source, 0)))) 13070 return; 13071 13072 if (!isa<VectorType>(Target)) { 13073 if (S.SourceMgr.isInSystemMacro(CC)) 13074 return; 13075 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 13076 } 13077 13078 // If the vector cast is cast between two vectors of the same size, it is 13079 // a bitcast, not a conversion. 13080 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 13081 return; 13082 13083 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 13084 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 13085 } 13086 if (auto VecTy = dyn_cast<VectorType>(Target)) 13087 Target = VecTy->getElementType().getTypePtr(); 13088 13089 // Strip complex types. 13090 if (isa<ComplexType>(Source)) { 13091 if (!isa<ComplexType>(Target)) { 13092 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 13093 return; 13094 13095 return DiagnoseImpCast(S, E, T, CC, 13096 S.getLangOpts().CPlusPlus 13097 ? diag::err_impcast_complex_scalar 13098 : diag::warn_impcast_complex_scalar); 13099 } 13100 13101 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 13102 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 13103 } 13104 13105 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 13106 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 13107 13108 // If the source is floating point... 13109 if (SourceBT && SourceBT->isFloatingPoint()) { 13110 // ...and the target is floating point... 13111 if (TargetBT && TargetBT->isFloatingPoint()) { 13112 // ...then warn if we're dropping FP rank. 13113 13114 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 13115 QualType(SourceBT, 0), QualType(TargetBT, 0)); 13116 if (Order > 0) { 13117 // Don't warn about float constants that are precisely 13118 // representable in the target type. 13119 Expr::EvalResult result; 13120 if (E->EvaluateAsRValue(result, S.Context)) { 13121 // Value might be a float, a float vector, or a float complex. 13122 if (IsSameFloatAfterCast(result.Val, 13123 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 13124 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 13125 return; 13126 } 13127 13128 if (S.SourceMgr.isInSystemMacro(CC)) 13129 return; 13130 13131 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 13132 } 13133 // ... or possibly if we're increasing rank, too 13134 else if (Order < 0) { 13135 if (S.SourceMgr.isInSystemMacro(CC)) 13136 return; 13137 13138 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 13139 } 13140 return; 13141 } 13142 13143 // If the target is integral, always warn. 13144 if (TargetBT && TargetBT->isInteger()) { 13145 if (S.SourceMgr.isInSystemMacro(CC)) 13146 return; 13147 13148 DiagnoseFloatingImpCast(S, E, T, CC); 13149 } 13150 13151 // Detect the case where a call result is converted from floating-point to 13152 // to bool, and the final argument to the call is converted from bool, to 13153 // discover this typo: 13154 // 13155 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 13156 // 13157 // FIXME: This is an incredibly special case; is there some more general 13158 // way to detect this class of misplaced-parentheses bug? 13159 if (Target->isBooleanType() && isa<CallExpr>(E)) { 13160 // Check last argument of function call to see if it is an 13161 // implicit cast from a type matching the type the result 13162 // is being cast to. 13163 CallExpr *CEx = cast<CallExpr>(E); 13164 if (unsigned NumArgs = CEx->getNumArgs()) { 13165 Expr *LastA = CEx->getArg(NumArgs - 1); 13166 Expr *InnerE = LastA->IgnoreParenImpCasts(); 13167 if (isa<ImplicitCastExpr>(LastA) && 13168 InnerE->getType()->isBooleanType()) { 13169 // Warn on this floating-point to bool conversion 13170 DiagnoseImpCast(S, E, T, CC, 13171 diag::warn_impcast_floating_point_to_bool); 13172 } 13173 } 13174 } 13175 return; 13176 } 13177 13178 // Valid casts involving fixed point types should be accounted for here. 13179 if (Source->isFixedPointType()) { 13180 if (Target->isUnsaturatedFixedPointType()) { 13181 Expr::EvalResult Result; 13182 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects, 13183 S.isConstantEvaluated())) { 13184 llvm::APFixedPoint Value = Result.Val.getFixedPoint(); 13185 llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(T); 13186 llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(T); 13187 if (Value > MaxVal || Value < MinVal) { 13188 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13189 S.PDiag(diag::warn_impcast_fixed_point_range) 13190 << Value.toString() << T 13191 << E->getSourceRange() 13192 << clang::SourceRange(CC)); 13193 return; 13194 } 13195 } 13196 } else if (Target->isIntegerType()) { 13197 Expr::EvalResult Result; 13198 if (!S.isConstantEvaluated() && 13199 E->EvaluateAsFixedPoint(Result, S.Context, 13200 Expr::SE_AllowSideEffects)) { 13201 llvm::APFixedPoint FXResult = Result.Val.getFixedPoint(); 13202 13203 bool Overflowed; 13204 llvm::APSInt IntResult = FXResult.convertToInt( 13205 S.Context.getIntWidth(T), 13206 Target->isSignedIntegerOrEnumerationType(), &Overflowed); 13207 13208 if (Overflowed) { 13209 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13210 S.PDiag(diag::warn_impcast_fixed_point_range) 13211 << FXResult.toString() << T 13212 << E->getSourceRange() 13213 << clang::SourceRange(CC)); 13214 return; 13215 } 13216 } 13217 } 13218 } else if (Target->isUnsaturatedFixedPointType()) { 13219 if (Source->isIntegerType()) { 13220 Expr::EvalResult Result; 13221 if (!S.isConstantEvaluated() && 13222 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 13223 llvm::APSInt Value = Result.Val.getInt(); 13224 13225 bool Overflowed; 13226 llvm::APFixedPoint IntResult = llvm::APFixedPoint::getFromIntValue( 13227 Value, S.Context.getFixedPointSemantics(T), &Overflowed); 13228 13229 if (Overflowed) { 13230 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13231 S.PDiag(diag::warn_impcast_fixed_point_range) 13232 << toString(Value, /*Radix=*/10) << T 13233 << E->getSourceRange() 13234 << clang::SourceRange(CC)); 13235 return; 13236 } 13237 } 13238 } 13239 } 13240 13241 // If we are casting an integer type to a floating point type without 13242 // initialization-list syntax, we might lose accuracy if the floating 13243 // point type has a narrower significand than the integer type. 13244 if (SourceBT && TargetBT && SourceBT->isIntegerType() && 13245 TargetBT->isFloatingType() && !IsListInit) { 13246 // Determine the number of precision bits in the source integer type. 13247 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated(), 13248 /*Approximate*/ true); 13249 unsigned int SourcePrecision = SourceRange.Width; 13250 13251 // Determine the number of precision bits in the 13252 // target floating point type. 13253 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision( 13254 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 13255 13256 if (SourcePrecision > 0 && TargetPrecision > 0 && 13257 SourcePrecision > TargetPrecision) { 13258 13259 if (Optional<llvm::APSInt> SourceInt = 13260 E->getIntegerConstantExpr(S.Context)) { 13261 // If the source integer is a constant, convert it to the target 13262 // floating point type. Issue a warning if the value changes 13263 // during the whole conversion. 13264 llvm::APFloat TargetFloatValue( 13265 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 13266 llvm::APFloat::opStatus ConversionStatus = 13267 TargetFloatValue.convertFromAPInt( 13268 *SourceInt, SourceBT->isSignedInteger(), 13269 llvm::APFloat::rmNearestTiesToEven); 13270 13271 if (ConversionStatus != llvm::APFloat::opOK) { 13272 SmallString<32> PrettySourceValue; 13273 SourceInt->toString(PrettySourceValue, 10); 13274 SmallString<32> PrettyTargetValue; 13275 TargetFloatValue.toString(PrettyTargetValue, TargetPrecision); 13276 13277 S.DiagRuntimeBehavior( 13278 E->getExprLoc(), E, 13279 S.PDiag(diag::warn_impcast_integer_float_precision_constant) 13280 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13281 << E->getSourceRange() << clang::SourceRange(CC)); 13282 } 13283 } else { 13284 // Otherwise, the implicit conversion may lose precision. 13285 DiagnoseImpCast(S, E, T, CC, 13286 diag::warn_impcast_integer_float_precision); 13287 } 13288 } 13289 } 13290 13291 DiagnoseNullConversion(S, E, T, CC); 13292 13293 S.DiscardMisalignedMemberAddress(Target, E); 13294 13295 if (Target->isBooleanType()) 13296 DiagnoseIntInBoolContext(S, E); 13297 13298 if (!Source->isIntegerType() || !Target->isIntegerType()) 13299 return; 13300 13301 // TODO: remove this early return once the false positives for constant->bool 13302 // in templates, macros, etc, are reduced or removed. 13303 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 13304 return; 13305 13306 if (isObjCSignedCharBool(S, T) && !Source->isCharType() && 13307 !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) { 13308 return adornObjCBoolConversionDiagWithTernaryFixit( 13309 S, E, 13310 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool) 13311 << E->getType()); 13312 } 13313 13314 IntRange SourceTypeRange = 13315 IntRange::forTargetOfCanonicalType(S.Context, Source); 13316 IntRange LikelySourceRange = 13317 GetExprRange(S.Context, E, S.isConstantEvaluated(), /*Approximate*/ true); 13318 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 13319 13320 if (LikelySourceRange.Width > TargetRange.Width) { 13321 // If the source is a constant, use a default-on diagnostic. 13322 // TODO: this should happen for bitfield stores, too. 13323 Expr::EvalResult Result; 13324 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects, 13325 S.isConstantEvaluated())) { 13326 llvm::APSInt Value(32); 13327 Value = Result.Val.getInt(); 13328 13329 if (S.SourceMgr.isInSystemMacro(CC)) 13330 return; 13331 13332 std::string PrettySourceValue = toString(Value, 10); 13333 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 13334 13335 S.DiagRuntimeBehavior( 13336 E->getExprLoc(), E, 13337 S.PDiag(diag::warn_impcast_integer_precision_constant) 13338 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13339 << E->getSourceRange() << SourceRange(CC)); 13340 return; 13341 } 13342 13343 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 13344 if (S.SourceMgr.isInSystemMacro(CC)) 13345 return; 13346 13347 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 13348 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 13349 /* pruneControlFlow */ true); 13350 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 13351 } 13352 13353 if (TargetRange.Width > SourceTypeRange.Width) { 13354 if (auto *UO = dyn_cast<UnaryOperator>(E)) 13355 if (UO->getOpcode() == UO_Minus) 13356 if (Source->isUnsignedIntegerType()) { 13357 if (Target->isUnsignedIntegerType()) 13358 return DiagnoseImpCast(S, E, T, CC, 13359 diag::warn_impcast_high_order_zero_bits); 13360 if (Target->isSignedIntegerType()) 13361 return DiagnoseImpCast(S, E, T, CC, 13362 diag::warn_impcast_nonnegative_result); 13363 } 13364 } 13365 13366 if (TargetRange.Width == LikelySourceRange.Width && 13367 !TargetRange.NonNegative && LikelySourceRange.NonNegative && 13368 Source->isSignedIntegerType()) { 13369 // Warn when doing a signed to signed conversion, warn if the positive 13370 // source value is exactly the width of the target type, which will 13371 // cause a negative value to be stored. 13372 13373 Expr::EvalResult Result; 13374 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) && 13375 !S.SourceMgr.isInSystemMacro(CC)) { 13376 llvm::APSInt Value = Result.Val.getInt(); 13377 if (isSameWidthConstantConversion(S, E, T, CC)) { 13378 std::string PrettySourceValue = toString(Value, 10); 13379 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 13380 13381 S.DiagRuntimeBehavior( 13382 E->getExprLoc(), E, 13383 S.PDiag(diag::warn_impcast_integer_precision_constant) 13384 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13385 << E->getSourceRange() << SourceRange(CC)); 13386 return; 13387 } 13388 } 13389 13390 // Fall through for non-constants to give a sign conversion warning. 13391 } 13392 13393 if ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) || 13394 (!TargetRange.NonNegative && LikelySourceRange.NonNegative && 13395 LikelySourceRange.Width == TargetRange.Width)) { 13396 if (S.SourceMgr.isInSystemMacro(CC)) 13397 return; 13398 13399 unsigned DiagID = diag::warn_impcast_integer_sign; 13400 13401 // Traditionally, gcc has warned about this under -Wsign-compare. 13402 // We also want to warn about it in -Wconversion. 13403 // So if -Wconversion is off, use a completely identical diagnostic 13404 // in the sign-compare group. 13405 // The conditional-checking code will 13406 if (ICContext) { 13407 DiagID = diag::warn_impcast_integer_sign_conditional; 13408 *ICContext = true; 13409 } 13410 13411 return DiagnoseImpCast(S, E, T, CC, DiagID); 13412 } 13413 13414 // Diagnose conversions between different enumeration types. 13415 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 13416 // type, to give us better diagnostics. 13417 QualType SourceType = E->getType(); 13418 if (!S.getLangOpts().CPlusPlus) { 13419 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 13420 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 13421 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 13422 SourceType = S.Context.getTypeDeclType(Enum); 13423 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 13424 } 13425 } 13426 13427 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 13428 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 13429 if (SourceEnum->getDecl()->hasNameForLinkage() && 13430 TargetEnum->getDecl()->hasNameForLinkage() && 13431 SourceEnum != TargetEnum) { 13432 if (S.SourceMgr.isInSystemMacro(CC)) 13433 return; 13434 13435 return DiagnoseImpCast(S, E, SourceType, T, CC, 13436 diag::warn_impcast_different_enum_types); 13437 } 13438 } 13439 13440 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 13441 SourceLocation CC, QualType T); 13442 13443 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 13444 SourceLocation CC, bool &ICContext) { 13445 E = E->IgnoreParenImpCasts(); 13446 13447 if (auto *CO = dyn_cast<AbstractConditionalOperator>(E)) 13448 return CheckConditionalOperator(S, CO, CC, T); 13449 13450 AnalyzeImplicitConversions(S, E, CC); 13451 if (E->getType() != T) 13452 return CheckImplicitConversion(S, E, T, CC, &ICContext); 13453 } 13454 13455 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 13456 SourceLocation CC, QualType T) { 13457 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 13458 13459 Expr *TrueExpr = E->getTrueExpr(); 13460 if (auto *BCO = dyn_cast<BinaryConditionalOperator>(E)) 13461 TrueExpr = BCO->getCommon(); 13462 13463 bool Suspicious = false; 13464 CheckConditionalOperand(S, TrueExpr, T, CC, Suspicious); 13465 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 13466 13467 if (T->isBooleanType()) 13468 DiagnoseIntInBoolContext(S, E); 13469 13470 // If -Wconversion would have warned about either of the candidates 13471 // for a signedness conversion to the context type... 13472 if (!Suspicious) return; 13473 13474 // ...but it's currently ignored... 13475 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 13476 return; 13477 13478 // ...then check whether it would have warned about either of the 13479 // candidates for a signedness conversion to the condition type. 13480 if (E->getType() == T) return; 13481 13482 Suspicious = false; 13483 CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(), 13484 E->getType(), CC, &Suspicious); 13485 if (!Suspicious) 13486 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 13487 E->getType(), CC, &Suspicious); 13488 } 13489 13490 /// Check conversion of given expression to boolean. 13491 /// Input argument E is a logical expression. 13492 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 13493 if (S.getLangOpts().Bool) 13494 return; 13495 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 13496 return; 13497 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 13498 } 13499 13500 namespace { 13501 struct AnalyzeImplicitConversionsWorkItem { 13502 Expr *E; 13503 SourceLocation CC; 13504 bool IsListInit; 13505 }; 13506 } 13507 13508 /// Data recursive variant of AnalyzeImplicitConversions. Subexpressions 13509 /// that should be visited are added to WorkList. 13510 static void AnalyzeImplicitConversions( 13511 Sema &S, AnalyzeImplicitConversionsWorkItem Item, 13512 llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) { 13513 Expr *OrigE = Item.E; 13514 SourceLocation CC = Item.CC; 13515 13516 QualType T = OrigE->getType(); 13517 Expr *E = OrigE->IgnoreParenImpCasts(); 13518 13519 // Propagate whether we are in a C++ list initialization expression. 13520 // If so, we do not issue warnings for implicit int-float conversion 13521 // precision loss, because C++11 narrowing already handles it. 13522 bool IsListInit = Item.IsListInit || 13523 (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus); 13524 13525 if (E->isTypeDependent() || E->isValueDependent()) 13526 return; 13527 13528 Expr *SourceExpr = E; 13529 // Examine, but don't traverse into the source expression of an 13530 // OpaqueValueExpr, since it may have multiple parents and we don't want to 13531 // emit duplicate diagnostics. Its fine to examine the form or attempt to 13532 // evaluate it in the context of checking the specific conversion to T though. 13533 if (auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 13534 if (auto *Src = OVE->getSourceExpr()) 13535 SourceExpr = Src; 13536 13537 if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr)) 13538 if (UO->getOpcode() == UO_Not && 13539 UO->getSubExpr()->isKnownToHaveBooleanValue()) 13540 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool) 13541 << OrigE->getSourceRange() << T->isBooleanType() 13542 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!"); 13543 13544 if (const auto *BO = dyn_cast<BinaryOperator>(SourceExpr)) 13545 if ((BO->getOpcode() == BO_And || BO->getOpcode() == BO_Or) && 13546 BO->getLHS()->isKnownToHaveBooleanValue() && 13547 BO->getRHS()->isKnownToHaveBooleanValue() && 13548 BO->getLHS()->HasSideEffects(S.Context) && 13549 BO->getRHS()->HasSideEffects(S.Context)) { 13550 S.Diag(BO->getBeginLoc(), diag::warn_bitwise_instead_of_logical) 13551 << (BO->getOpcode() == BO_And ? "&" : "|") << OrigE->getSourceRange() 13552 << FixItHint::CreateReplacement( 13553 BO->getOperatorLoc(), 13554 (BO->getOpcode() == BO_And ? "&&" : "||")); 13555 S.Diag(BO->getBeginLoc(), diag::note_cast_operand_to_int); 13556 } 13557 13558 // For conditional operators, we analyze the arguments as if they 13559 // were being fed directly into the output. 13560 if (auto *CO = dyn_cast<AbstractConditionalOperator>(SourceExpr)) { 13561 CheckConditionalOperator(S, CO, CC, T); 13562 return; 13563 } 13564 13565 // Check implicit argument conversions for function calls. 13566 if (CallExpr *Call = dyn_cast<CallExpr>(SourceExpr)) 13567 CheckImplicitArgumentConversions(S, Call, CC); 13568 13569 // Go ahead and check any implicit conversions we might have skipped. 13570 // The non-canonical typecheck is just an optimization; 13571 // CheckImplicitConversion will filter out dead implicit conversions. 13572 if (SourceExpr->getType() != T) 13573 CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit); 13574 13575 // Now continue drilling into this expression. 13576 13577 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 13578 // The bound subexpressions in a PseudoObjectExpr are not reachable 13579 // as transitive children. 13580 // FIXME: Use a more uniform representation for this. 13581 for (auto *SE : POE->semantics()) 13582 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 13583 WorkList.push_back({OVE->getSourceExpr(), CC, IsListInit}); 13584 } 13585 13586 // Skip past explicit casts. 13587 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 13588 E = CE->getSubExpr()->IgnoreParenImpCasts(); 13589 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 13590 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 13591 WorkList.push_back({E, CC, IsListInit}); 13592 return; 13593 } 13594 13595 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 13596 // Do a somewhat different check with comparison operators. 13597 if (BO->isComparisonOp()) 13598 return AnalyzeComparison(S, BO); 13599 13600 // And with simple assignments. 13601 if (BO->getOpcode() == BO_Assign) 13602 return AnalyzeAssignment(S, BO); 13603 // And with compound assignments. 13604 if (BO->isAssignmentOp()) 13605 return AnalyzeCompoundAssignment(S, BO); 13606 } 13607 13608 // These break the otherwise-useful invariant below. Fortunately, 13609 // we don't really need to recurse into them, because any internal 13610 // expressions should have been analyzed already when they were 13611 // built into statements. 13612 if (isa<StmtExpr>(E)) return; 13613 13614 // Don't descend into unevaluated contexts. 13615 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 13616 13617 // Now just recurse over the expression's children. 13618 CC = E->getExprLoc(); 13619 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 13620 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 13621 for (Stmt *SubStmt : E->children()) { 13622 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 13623 if (!ChildExpr) 13624 continue; 13625 13626 if (IsLogicalAndOperator && 13627 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 13628 // Ignore checking string literals that are in logical and operators. 13629 // This is a common pattern for asserts. 13630 continue; 13631 WorkList.push_back({ChildExpr, CC, IsListInit}); 13632 } 13633 13634 if (BO && BO->isLogicalOp()) { 13635 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 13636 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 13637 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 13638 13639 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 13640 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 13641 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 13642 } 13643 13644 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 13645 if (U->getOpcode() == UO_LNot) { 13646 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 13647 } else if (U->getOpcode() != UO_AddrOf) { 13648 if (U->getSubExpr()->getType()->isAtomicType()) 13649 S.Diag(U->getSubExpr()->getBeginLoc(), 13650 diag::warn_atomic_implicit_seq_cst); 13651 } 13652 } 13653 } 13654 13655 /// AnalyzeImplicitConversions - Find and report any interesting 13656 /// implicit conversions in the given expression. There are a couple 13657 /// of competing diagnostics here, -Wconversion and -Wsign-compare. 13658 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC, 13659 bool IsListInit/*= false*/) { 13660 llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList; 13661 WorkList.push_back({OrigE, CC, IsListInit}); 13662 while (!WorkList.empty()) 13663 AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList); 13664 } 13665 13666 /// Diagnose integer type and any valid implicit conversion to it. 13667 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 13668 // Taking into account implicit conversions, 13669 // allow any integer. 13670 if (!E->getType()->isIntegerType()) { 13671 S.Diag(E->getBeginLoc(), 13672 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 13673 return true; 13674 } 13675 // Potentially emit standard warnings for implicit conversions if enabled 13676 // using -Wconversion. 13677 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 13678 return false; 13679 } 13680 13681 // Helper function for Sema::DiagnoseAlwaysNonNullPointer. 13682 // Returns true when emitting a warning about taking the address of a reference. 13683 static bool CheckForReference(Sema &SemaRef, const Expr *E, 13684 const PartialDiagnostic &PD) { 13685 E = E->IgnoreParenImpCasts(); 13686 13687 const FunctionDecl *FD = nullptr; 13688 13689 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 13690 if (!DRE->getDecl()->getType()->isReferenceType()) 13691 return false; 13692 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 13693 if (!M->getMemberDecl()->getType()->isReferenceType()) 13694 return false; 13695 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 13696 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 13697 return false; 13698 FD = Call->getDirectCallee(); 13699 } else { 13700 return false; 13701 } 13702 13703 SemaRef.Diag(E->getExprLoc(), PD); 13704 13705 // If possible, point to location of function. 13706 if (FD) { 13707 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 13708 } 13709 13710 return true; 13711 } 13712 13713 // Returns true if the SourceLocation is expanded from any macro body. 13714 // Returns false if the SourceLocation is invalid, is from not in a macro 13715 // expansion, or is from expanded from a top-level macro argument. 13716 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 13717 if (Loc.isInvalid()) 13718 return false; 13719 13720 while (Loc.isMacroID()) { 13721 if (SM.isMacroBodyExpansion(Loc)) 13722 return true; 13723 Loc = SM.getImmediateMacroCallerLoc(Loc); 13724 } 13725 13726 return false; 13727 } 13728 13729 /// Diagnose pointers that are always non-null. 13730 /// \param E the expression containing the pointer 13731 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 13732 /// compared to a null pointer 13733 /// \param IsEqual True when the comparison is equal to a null pointer 13734 /// \param Range Extra SourceRange to highlight in the diagnostic 13735 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 13736 Expr::NullPointerConstantKind NullKind, 13737 bool IsEqual, SourceRange Range) { 13738 if (!E) 13739 return; 13740 13741 // Don't warn inside macros. 13742 if (E->getExprLoc().isMacroID()) { 13743 const SourceManager &SM = getSourceManager(); 13744 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 13745 IsInAnyMacroBody(SM, Range.getBegin())) 13746 return; 13747 } 13748 E = E->IgnoreImpCasts(); 13749 13750 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 13751 13752 if (isa<CXXThisExpr>(E)) { 13753 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 13754 : diag::warn_this_bool_conversion; 13755 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 13756 return; 13757 } 13758 13759 bool IsAddressOf = false; 13760 13761 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 13762 if (UO->getOpcode() != UO_AddrOf) 13763 return; 13764 IsAddressOf = true; 13765 E = UO->getSubExpr(); 13766 } 13767 13768 if (IsAddressOf) { 13769 unsigned DiagID = IsCompare 13770 ? diag::warn_address_of_reference_null_compare 13771 : diag::warn_address_of_reference_bool_conversion; 13772 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 13773 << IsEqual; 13774 if (CheckForReference(*this, E, PD)) { 13775 return; 13776 } 13777 } 13778 13779 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 13780 bool IsParam = isa<NonNullAttr>(NonnullAttr); 13781 std::string Str; 13782 llvm::raw_string_ostream S(Str); 13783 E->printPretty(S, nullptr, getPrintingPolicy()); 13784 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 13785 : diag::warn_cast_nonnull_to_bool; 13786 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 13787 << E->getSourceRange() << Range << IsEqual; 13788 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 13789 }; 13790 13791 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 13792 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 13793 if (auto *Callee = Call->getDirectCallee()) { 13794 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 13795 ComplainAboutNonnullParamOrCall(A); 13796 return; 13797 } 13798 } 13799 } 13800 13801 // Expect to find a single Decl. Skip anything more complicated. 13802 ValueDecl *D = nullptr; 13803 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 13804 D = R->getDecl(); 13805 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 13806 D = M->getMemberDecl(); 13807 } 13808 13809 // Weak Decls can be null. 13810 if (!D || D->isWeak()) 13811 return; 13812 13813 // Check for parameter decl with nonnull attribute 13814 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 13815 if (getCurFunction() && 13816 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 13817 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 13818 ComplainAboutNonnullParamOrCall(A); 13819 return; 13820 } 13821 13822 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 13823 // Skip function template not specialized yet. 13824 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 13825 return; 13826 auto ParamIter = llvm::find(FD->parameters(), PV); 13827 assert(ParamIter != FD->param_end()); 13828 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 13829 13830 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 13831 if (!NonNull->args_size()) { 13832 ComplainAboutNonnullParamOrCall(NonNull); 13833 return; 13834 } 13835 13836 for (const ParamIdx &ArgNo : NonNull->args()) { 13837 if (ArgNo.getASTIndex() == ParamNo) { 13838 ComplainAboutNonnullParamOrCall(NonNull); 13839 return; 13840 } 13841 } 13842 } 13843 } 13844 } 13845 } 13846 13847 QualType T = D->getType(); 13848 const bool IsArray = T->isArrayType(); 13849 const bool IsFunction = T->isFunctionType(); 13850 13851 // Address of function is used to silence the function warning. 13852 if (IsAddressOf && IsFunction) { 13853 return; 13854 } 13855 13856 // Found nothing. 13857 if (!IsAddressOf && !IsFunction && !IsArray) 13858 return; 13859 13860 // Pretty print the expression for the diagnostic. 13861 std::string Str; 13862 llvm::raw_string_ostream S(Str); 13863 E->printPretty(S, nullptr, getPrintingPolicy()); 13864 13865 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 13866 : diag::warn_impcast_pointer_to_bool; 13867 enum { 13868 AddressOf, 13869 FunctionPointer, 13870 ArrayPointer 13871 } DiagType; 13872 if (IsAddressOf) 13873 DiagType = AddressOf; 13874 else if (IsFunction) 13875 DiagType = FunctionPointer; 13876 else if (IsArray) 13877 DiagType = ArrayPointer; 13878 else 13879 llvm_unreachable("Could not determine diagnostic."); 13880 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 13881 << Range << IsEqual; 13882 13883 if (!IsFunction) 13884 return; 13885 13886 // Suggest '&' to silence the function warning. 13887 Diag(E->getExprLoc(), diag::note_function_warning_silence) 13888 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 13889 13890 // Check to see if '()' fixit should be emitted. 13891 QualType ReturnType; 13892 UnresolvedSet<4> NonTemplateOverloads; 13893 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 13894 if (ReturnType.isNull()) 13895 return; 13896 13897 if (IsCompare) { 13898 // There are two cases here. If there is null constant, the only suggest 13899 // for a pointer return type. If the null is 0, then suggest if the return 13900 // type is a pointer or an integer type. 13901 if (!ReturnType->isPointerType()) { 13902 if (NullKind == Expr::NPCK_ZeroExpression || 13903 NullKind == Expr::NPCK_ZeroLiteral) { 13904 if (!ReturnType->isIntegerType()) 13905 return; 13906 } else { 13907 return; 13908 } 13909 } 13910 } else { // !IsCompare 13911 // For function to bool, only suggest if the function pointer has bool 13912 // return type. 13913 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 13914 return; 13915 } 13916 Diag(E->getExprLoc(), diag::note_function_to_function_call) 13917 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 13918 } 13919 13920 /// Diagnoses "dangerous" implicit conversions within the given 13921 /// expression (which is a full expression). Implements -Wconversion 13922 /// and -Wsign-compare. 13923 /// 13924 /// \param CC the "context" location of the implicit conversion, i.e. 13925 /// the most location of the syntactic entity requiring the implicit 13926 /// conversion 13927 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 13928 // Don't diagnose in unevaluated contexts. 13929 if (isUnevaluatedContext()) 13930 return; 13931 13932 // Don't diagnose for value- or type-dependent expressions. 13933 if (E->isTypeDependent() || E->isValueDependent()) 13934 return; 13935 13936 // Check for array bounds violations in cases where the check isn't triggered 13937 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 13938 // ArraySubscriptExpr is on the RHS of a variable initialization. 13939 CheckArrayAccess(E); 13940 13941 // This is not the right CC for (e.g.) a variable initialization. 13942 AnalyzeImplicitConversions(*this, E, CC); 13943 } 13944 13945 /// CheckBoolLikeConversion - Check conversion of given expression to boolean. 13946 /// Input argument E is a logical expression. 13947 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 13948 ::CheckBoolLikeConversion(*this, E, CC); 13949 } 13950 13951 /// Diagnose when expression is an integer constant expression and its evaluation 13952 /// results in integer overflow 13953 void Sema::CheckForIntOverflow (Expr *E) { 13954 // Use a work list to deal with nested struct initializers. 13955 SmallVector<Expr *, 2> Exprs(1, E); 13956 13957 do { 13958 Expr *OriginalE = Exprs.pop_back_val(); 13959 Expr *E = OriginalE->IgnoreParenCasts(); 13960 13961 if (isa<BinaryOperator>(E)) { 13962 E->EvaluateForOverflow(Context); 13963 continue; 13964 } 13965 13966 if (auto InitList = dyn_cast<InitListExpr>(OriginalE)) 13967 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 13968 else if (isa<ObjCBoxedExpr>(OriginalE)) 13969 E->EvaluateForOverflow(Context); 13970 else if (auto Call = dyn_cast<CallExpr>(E)) 13971 Exprs.append(Call->arg_begin(), Call->arg_end()); 13972 else if (auto Message = dyn_cast<ObjCMessageExpr>(E)) 13973 Exprs.append(Message->arg_begin(), Message->arg_end()); 13974 } while (!Exprs.empty()); 13975 } 13976 13977 namespace { 13978 13979 /// Visitor for expressions which looks for unsequenced operations on the 13980 /// same object. 13981 class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> { 13982 using Base = ConstEvaluatedExprVisitor<SequenceChecker>; 13983 13984 /// A tree of sequenced regions within an expression. Two regions are 13985 /// unsequenced if one is an ancestor or a descendent of the other. When we 13986 /// finish processing an expression with sequencing, such as a comma 13987 /// expression, we fold its tree nodes into its parent, since they are 13988 /// unsequenced with respect to nodes we will visit later. 13989 class SequenceTree { 13990 struct Value { 13991 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 13992 unsigned Parent : 31; 13993 unsigned Merged : 1; 13994 }; 13995 SmallVector<Value, 8> Values; 13996 13997 public: 13998 /// A region within an expression which may be sequenced with respect 13999 /// to some other region. 14000 class Seq { 14001 friend class SequenceTree; 14002 14003 unsigned Index; 14004 14005 explicit Seq(unsigned N) : Index(N) {} 14006 14007 public: 14008 Seq() : Index(0) {} 14009 }; 14010 14011 SequenceTree() { Values.push_back(Value(0)); } 14012 Seq root() const { return Seq(0); } 14013 14014 /// Create a new sequence of operations, which is an unsequenced 14015 /// subset of \p Parent. This sequence of operations is sequenced with 14016 /// respect to other children of \p Parent. 14017 Seq allocate(Seq Parent) { 14018 Values.push_back(Value(Parent.Index)); 14019 return Seq(Values.size() - 1); 14020 } 14021 14022 /// Merge a sequence of operations into its parent. 14023 void merge(Seq S) { 14024 Values[S.Index].Merged = true; 14025 } 14026 14027 /// Determine whether two operations are unsequenced. This operation 14028 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 14029 /// should have been merged into its parent as appropriate. 14030 bool isUnsequenced(Seq Cur, Seq Old) { 14031 unsigned C = representative(Cur.Index); 14032 unsigned Target = representative(Old.Index); 14033 while (C >= Target) { 14034 if (C == Target) 14035 return true; 14036 C = Values[C].Parent; 14037 } 14038 return false; 14039 } 14040 14041 private: 14042 /// Pick a representative for a sequence. 14043 unsigned representative(unsigned K) { 14044 if (Values[K].Merged) 14045 // Perform path compression as we go. 14046 return Values[K].Parent = representative(Values[K].Parent); 14047 return K; 14048 } 14049 }; 14050 14051 /// An object for which we can track unsequenced uses. 14052 using Object = const NamedDecl *; 14053 14054 /// Different flavors of object usage which we track. We only track the 14055 /// least-sequenced usage of each kind. 14056 enum UsageKind { 14057 /// A read of an object. Multiple unsequenced reads are OK. 14058 UK_Use, 14059 14060 /// A modification of an object which is sequenced before the value 14061 /// computation of the expression, such as ++n in C++. 14062 UK_ModAsValue, 14063 14064 /// A modification of an object which is not sequenced before the value 14065 /// computation of the expression, such as n++. 14066 UK_ModAsSideEffect, 14067 14068 UK_Count = UK_ModAsSideEffect + 1 14069 }; 14070 14071 /// Bundle together a sequencing region and the expression corresponding 14072 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo. 14073 struct Usage { 14074 const Expr *UsageExpr; 14075 SequenceTree::Seq Seq; 14076 14077 Usage() : UsageExpr(nullptr) {} 14078 }; 14079 14080 struct UsageInfo { 14081 Usage Uses[UK_Count]; 14082 14083 /// Have we issued a diagnostic for this object already? 14084 bool Diagnosed; 14085 14086 UsageInfo() : Diagnosed(false) {} 14087 }; 14088 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 14089 14090 Sema &SemaRef; 14091 14092 /// Sequenced regions within the expression. 14093 SequenceTree Tree; 14094 14095 /// Declaration modifications and references which we have seen. 14096 UsageInfoMap UsageMap; 14097 14098 /// The region we are currently within. 14099 SequenceTree::Seq Region; 14100 14101 /// Filled in with declarations which were modified as a side-effect 14102 /// (that is, post-increment operations). 14103 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 14104 14105 /// Expressions to check later. We defer checking these to reduce 14106 /// stack usage. 14107 SmallVectorImpl<const Expr *> &WorkList; 14108 14109 /// RAII object wrapping the visitation of a sequenced subexpression of an 14110 /// expression. At the end of this process, the side-effects of the evaluation 14111 /// become sequenced with respect to the value computation of the result, so 14112 /// we downgrade any UK_ModAsSideEffect within the evaluation to 14113 /// UK_ModAsValue. 14114 struct SequencedSubexpression { 14115 SequencedSubexpression(SequenceChecker &Self) 14116 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 14117 Self.ModAsSideEffect = &ModAsSideEffect; 14118 } 14119 14120 ~SequencedSubexpression() { 14121 for (const std::pair<Object, Usage> &M : llvm::reverse(ModAsSideEffect)) { 14122 // Add a new usage with usage kind UK_ModAsValue, and then restore 14123 // the previous usage with UK_ModAsSideEffect (thus clearing it if 14124 // the previous one was empty). 14125 UsageInfo &UI = Self.UsageMap[M.first]; 14126 auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect]; 14127 Self.addUsage(M.first, UI, SideEffectUsage.UsageExpr, UK_ModAsValue); 14128 SideEffectUsage = M.second; 14129 } 14130 Self.ModAsSideEffect = OldModAsSideEffect; 14131 } 14132 14133 SequenceChecker &Self; 14134 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 14135 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 14136 }; 14137 14138 /// RAII object wrapping the visitation of a subexpression which we might 14139 /// choose to evaluate as a constant. If any subexpression is evaluated and 14140 /// found to be non-constant, this allows us to suppress the evaluation of 14141 /// the outer expression. 14142 class EvaluationTracker { 14143 public: 14144 EvaluationTracker(SequenceChecker &Self) 14145 : Self(Self), Prev(Self.EvalTracker) { 14146 Self.EvalTracker = this; 14147 } 14148 14149 ~EvaluationTracker() { 14150 Self.EvalTracker = Prev; 14151 if (Prev) 14152 Prev->EvalOK &= EvalOK; 14153 } 14154 14155 bool evaluate(const Expr *E, bool &Result) { 14156 if (!EvalOK || E->isValueDependent()) 14157 return false; 14158 EvalOK = E->EvaluateAsBooleanCondition( 14159 Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated()); 14160 return EvalOK; 14161 } 14162 14163 private: 14164 SequenceChecker &Self; 14165 EvaluationTracker *Prev; 14166 bool EvalOK = true; 14167 } *EvalTracker = nullptr; 14168 14169 /// Find the object which is produced by the specified expression, 14170 /// if any. 14171 Object getObject(const Expr *E, bool Mod) const { 14172 E = E->IgnoreParenCasts(); 14173 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 14174 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 14175 return getObject(UO->getSubExpr(), Mod); 14176 } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 14177 if (BO->getOpcode() == BO_Comma) 14178 return getObject(BO->getRHS(), Mod); 14179 if (Mod && BO->isAssignmentOp()) 14180 return getObject(BO->getLHS(), Mod); 14181 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 14182 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 14183 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 14184 return ME->getMemberDecl(); 14185 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 14186 // FIXME: If this is a reference, map through to its value. 14187 return DRE->getDecl(); 14188 return nullptr; 14189 } 14190 14191 /// Note that an object \p O was modified or used by an expression 14192 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for 14193 /// the object \p O as obtained via the \p UsageMap. 14194 void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) { 14195 // Get the old usage for the given object and usage kind. 14196 Usage &U = UI.Uses[UK]; 14197 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) { 14198 // If we have a modification as side effect and are in a sequenced 14199 // subexpression, save the old Usage so that we can restore it later 14200 // in SequencedSubexpression::~SequencedSubexpression. 14201 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 14202 ModAsSideEffect->push_back(std::make_pair(O, U)); 14203 // Then record the new usage with the current sequencing region. 14204 U.UsageExpr = UsageExpr; 14205 U.Seq = Region; 14206 } 14207 } 14208 14209 /// Check whether a modification or use of an object \p O in an expression 14210 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is 14211 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap. 14212 /// \p IsModMod is true when we are checking for a mod-mod unsequenced 14213 /// usage and false we are checking for a mod-use unsequenced usage. 14214 void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, 14215 UsageKind OtherKind, bool IsModMod) { 14216 if (UI.Diagnosed) 14217 return; 14218 14219 const Usage &U = UI.Uses[OtherKind]; 14220 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) 14221 return; 14222 14223 const Expr *Mod = U.UsageExpr; 14224 const Expr *ModOrUse = UsageExpr; 14225 if (OtherKind == UK_Use) 14226 std::swap(Mod, ModOrUse); 14227 14228 SemaRef.DiagRuntimeBehavior( 14229 Mod->getExprLoc(), {Mod, ModOrUse}, 14230 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod 14231 : diag::warn_unsequenced_mod_use) 14232 << O << SourceRange(ModOrUse->getExprLoc())); 14233 UI.Diagnosed = true; 14234 } 14235 14236 // A note on note{Pre, Post}{Use, Mod}: 14237 // 14238 // (It helps to follow the algorithm with an expression such as 14239 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced 14240 // operations before C++17 and both are well-defined in C++17). 14241 // 14242 // When visiting a node which uses/modify an object we first call notePreUse 14243 // or notePreMod before visiting its sub-expression(s). At this point the 14244 // children of the current node have not yet been visited and so the eventual 14245 // uses/modifications resulting from the children of the current node have not 14246 // been recorded yet. 14247 // 14248 // We then visit the children of the current node. After that notePostUse or 14249 // notePostMod is called. These will 1) detect an unsequenced modification 14250 // as side effect (as in "k++ + k") and 2) add a new usage with the 14251 // appropriate usage kind. 14252 // 14253 // We also have to be careful that some operation sequences modification as 14254 // side effect as well (for example: || or ,). To account for this we wrap 14255 // the visitation of such a sub-expression (for example: the LHS of || or ,) 14256 // with SequencedSubexpression. SequencedSubexpression is an RAII object 14257 // which record usages which are modifications as side effect, and then 14258 // downgrade them (or more accurately restore the previous usage which was a 14259 // modification as side effect) when exiting the scope of the sequenced 14260 // subexpression. 14261 14262 void notePreUse(Object O, const Expr *UseExpr) { 14263 UsageInfo &UI = UsageMap[O]; 14264 // Uses conflict with other modifications. 14265 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false); 14266 } 14267 14268 void notePostUse(Object O, const Expr *UseExpr) { 14269 UsageInfo &UI = UsageMap[O]; 14270 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsSideEffect, 14271 /*IsModMod=*/false); 14272 addUsage(O, UI, UseExpr, /*UsageKind=*/UK_Use); 14273 } 14274 14275 void notePreMod(Object O, const Expr *ModExpr) { 14276 UsageInfo &UI = UsageMap[O]; 14277 // Modifications conflict with other modifications and with uses. 14278 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true); 14279 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false); 14280 } 14281 14282 void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) { 14283 UsageInfo &UI = UsageMap[O]; 14284 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsSideEffect, 14285 /*IsModMod=*/true); 14286 addUsage(O, UI, ModExpr, /*UsageKind=*/UK); 14287 } 14288 14289 public: 14290 SequenceChecker(Sema &S, const Expr *E, 14291 SmallVectorImpl<const Expr *> &WorkList) 14292 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 14293 Visit(E); 14294 // Silence a -Wunused-private-field since WorkList is now unused. 14295 // TODO: Evaluate if it can be used, and if not remove it. 14296 (void)this->WorkList; 14297 } 14298 14299 void VisitStmt(const Stmt *S) { 14300 // Skip all statements which aren't expressions for now. 14301 } 14302 14303 void VisitExpr(const Expr *E) { 14304 // By default, just recurse to evaluated subexpressions. 14305 Base::VisitStmt(E); 14306 } 14307 14308 void VisitCastExpr(const CastExpr *E) { 14309 Object O = Object(); 14310 if (E->getCastKind() == CK_LValueToRValue) 14311 O = getObject(E->getSubExpr(), false); 14312 14313 if (O) 14314 notePreUse(O, E); 14315 VisitExpr(E); 14316 if (O) 14317 notePostUse(O, E); 14318 } 14319 14320 void VisitSequencedExpressions(const Expr *SequencedBefore, 14321 const Expr *SequencedAfter) { 14322 SequenceTree::Seq BeforeRegion = Tree.allocate(Region); 14323 SequenceTree::Seq AfterRegion = Tree.allocate(Region); 14324 SequenceTree::Seq OldRegion = Region; 14325 14326 { 14327 SequencedSubexpression SeqBefore(*this); 14328 Region = BeforeRegion; 14329 Visit(SequencedBefore); 14330 } 14331 14332 Region = AfterRegion; 14333 Visit(SequencedAfter); 14334 14335 Region = OldRegion; 14336 14337 Tree.merge(BeforeRegion); 14338 Tree.merge(AfterRegion); 14339 } 14340 14341 void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) { 14342 // C++17 [expr.sub]p1: 14343 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The 14344 // expression E1 is sequenced before the expression E2. 14345 if (SemaRef.getLangOpts().CPlusPlus17) 14346 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS()); 14347 else { 14348 Visit(ASE->getLHS()); 14349 Visit(ASE->getRHS()); 14350 } 14351 } 14352 14353 void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 14354 void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 14355 void VisitBinPtrMem(const BinaryOperator *BO) { 14356 // C++17 [expr.mptr.oper]p4: 14357 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...] 14358 // the expression E1 is sequenced before the expression E2. 14359 if (SemaRef.getLangOpts().CPlusPlus17) 14360 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14361 else { 14362 Visit(BO->getLHS()); 14363 Visit(BO->getRHS()); 14364 } 14365 } 14366 14367 void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); } 14368 void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); } 14369 void VisitBinShlShr(const BinaryOperator *BO) { 14370 // C++17 [expr.shift]p4: 14371 // The expression E1 is sequenced before the expression E2. 14372 if (SemaRef.getLangOpts().CPlusPlus17) 14373 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14374 else { 14375 Visit(BO->getLHS()); 14376 Visit(BO->getRHS()); 14377 } 14378 } 14379 14380 void VisitBinComma(const BinaryOperator *BO) { 14381 // C++11 [expr.comma]p1: 14382 // Every value computation and side effect associated with the left 14383 // expression is sequenced before every value computation and side 14384 // effect associated with the right expression. 14385 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14386 } 14387 14388 void VisitBinAssign(const BinaryOperator *BO) { 14389 SequenceTree::Seq RHSRegion; 14390 SequenceTree::Seq LHSRegion; 14391 if (SemaRef.getLangOpts().CPlusPlus17) { 14392 RHSRegion = Tree.allocate(Region); 14393 LHSRegion = Tree.allocate(Region); 14394 } else { 14395 RHSRegion = Region; 14396 LHSRegion = Region; 14397 } 14398 SequenceTree::Seq OldRegion = Region; 14399 14400 // C++11 [expr.ass]p1: 14401 // [...] the assignment is sequenced after the value computation 14402 // of the right and left operands, [...] 14403 // 14404 // so check it before inspecting the operands and update the 14405 // map afterwards. 14406 Object O = getObject(BO->getLHS(), /*Mod=*/true); 14407 if (O) 14408 notePreMod(O, BO); 14409 14410 if (SemaRef.getLangOpts().CPlusPlus17) { 14411 // C++17 [expr.ass]p1: 14412 // [...] The right operand is sequenced before the left operand. [...] 14413 { 14414 SequencedSubexpression SeqBefore(*this); 14415 Region = RHSRegion; 14416 Visit(BO->getRHS()); 14417 } 14418 14419 Region = LHSRegion; 14420 Visit(BO->getLHS()); 14421 14422 if (O && isa<CompoundAssignOperator>(BO)) 14423 notePostUse(O, BO); 14424 14425 } else { 14426 // C++11 does not specify any sequencing between the LHS and RHS. 14427 Region = LHSRegion; 14428 Visit(BO->getLHS()); 14429 14430 if (O && isa<CompoundAssignOperator>(BO)) 14431 notePostUse(O, BO); 14432 14433 Region = RHSRegion; 14434 Visit(BO->getRHS()); 14435 } 14436 14437 // C++11 [expr.ass]p1: 14438 // the assignment is sequenced [...] before the value computation of the 14439 // assignment expression. 14440 // C11 6.5.16/3 has no such rule. 14441 Region = OldRegion; 14442 if (O) 14443 notePostMod(O, BO, 14444 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 14445 : UK_ModAsSideEffect); 14446 if (SemaRef.getLangOpts().CPlusPlus17) { 14447 Tree.merge(RHSRegion); 14448 Tree.merge(LHSRegion); 14449 } 14450 } 14451 14452 void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) { 14453 VisitBinAssign(CAO); 14454 } 14455 14456 void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 14457 void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 14458 void VisitUnaryPreIncDec(const UnaryOperator *UO) { 14459 Object O = getObject(UO->getSubExpr(), true); 14460 if (!O) 14461 return VisitExpr(UO); 14462 14463 notePreMod(O, UO); 14464 Visit(UO->getSubExpr()); 14465 // C++11 [expr.pre.incr]p1: 14466 // the expression ++x is equivalent to x+=1 14467 notePostMod(O, UO, 14468 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 14469 : UK_ModAsSideEffect); 14470 } 14471 14472 void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 14473 void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 14474 void VisitUnaryPostIncDec(const UnaryOperator *UO) { 14475 Object O = getObject(UO->getSubExpr(), true); 14476 if (!O) 14477 return VisitExpr(UO); 14478 14479 notePreMod(O, UO); 14480 Visit(UO->getSubExpr()); 14481 notePostMod(O, UO, UK_ModAsSideEffect); 14482 } 14483 14484 void VisitBinLOr(const BinaryOperator *BO) { 14485 // C++11 [expr.log.or]p2: 14486 // If the second expression is evaluated, every value computation and 14487 // side effect associated with the first expression is sequenced before 14488 // every value computation and side effect associated with the 14489 // second expression. 14490 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 14491 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 14492 SequenceTree::Seq OldRegion = Region; 14493 14494 EvaluationTracker Eval(*this); 14495 { 14496 SequencedSubexpression Sequenced(*this); 14497 Region = LHSRegion; 14498 Visit(BO->getLHS()); 14499 } 14500 14501 // C++11 [expr.log.or]p1: 14502 // [...] the second operand is not evaluated if the first operand 14503 // evaluates to true. 14504 bool EvalResult = false; 14505 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 14506 bool ShouldVisitRHS = !EvalOK || (EvalOK && !EvalResult); 14507 if (ShouldVisitRHS) { 14508 Region = RHSRegion; 14509 Visit(BO->getRHS()); 14510 } 14511 14512 Region = OldRegion; 14513 Tree.merge(LHSRegion); 14514 Tree.merge(RHSRegion); 14515 } 14516 14517 void VisitBinLAnd(const BinaryOperator *BO) { 14518 // C++11 [expr.log.and]p2: 14519 // If the second expression is evaluated, every value computation and 14520 // side effect associated with the first expression is sequenced before 14521 // every value computation and side effect associated with the 14522 // second expression. 14523 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 14524 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 14525 SequenceTree::Seq OldRegion = Region; 14526 14527 EvaluationTracker Eval(*this); 14528 { 14529 SequencedSubexpression Sequenced(*this); 14530 Region = LHSRegion; 14531 Visit(BO->getLHS()); 14532 } 14533 14534 // C++11 [expr.log.and]p1: 14535 // [...] the second operand is not evaluated if the first operand is false. 14536 bool EvalResult = false; 14537 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 14538 bool ShouldVisitRHS = !EvalOK || (EvalOK && EvalResult); 14539 if (ShouldVisitRHS) { 14540 Region = RHSRegion; 14541 Visit(BO->getRHS()); 14542 } 14543 14544 Region = OldRegion; 14545 Tree.merge(LHSRegion); 14546 Tree.merge(RHSRegion); 14547 } 14548 14549 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) { 14550 // C++11 [expr.cond]p1: 14551 // [...] Every value computation and side effect associated with the first 14552 // expression is sequenced before every value computation and side effect 14553 // associated with the second or third expression. 14554 SequenceTree::Seq ConditionRegion = Tree.allocate(Region); 14555 14556 // No sequencing is specified between the true and false expression. 14557 // However since exactly one of both is going to be evaluated we can 14558 // consider them to be sequenced. This is needed to avoid warning on 14559 // something like "x ? y+= 1 : y += 2;" in the case where we will visit 14560 // both the true and false expressions because we can't evaluate x. 14561 // This will still allow us to detect an expression like (pre C++17) 14562 // "(x ? y += 1 : y += 2) = y". 14563 // 14564 // We don't wrap the visitation of the true and false expression with 14565 // SequencedSubexpression because we don't want to downgrade modifications 14566 // as side effect in the true and false expressions after the visition 14567 // is done. (for example in the expression "(x ? y++ : y++) + y" we should 14568 // not warn between the two "y++", but we should warn between the "y++" 14569 // and the "y". 14570 SequenceTree::Seq TrueRegion = Tree.allocate(Region); 14571 SequenceTree::Seq FalseRegion = Tree.allocate(Region); 14572 SequenceTree::Seq OldRegion = Region; 14573 14574 EvaluationTracker Eval(*this); 14575 { 14576 SequencedSubexpression Sequenced(*this); 14577 Region = ConditionRegion; 14578 Visit(CO->getCond()); 14579 } 14580 14581 // C++11 [expr.cond]p1: 14582 // [...] The first expression is contextually converted to bool (Clause 4). 14583 // It is evaluated and if it is true, the result of the conditional 14584 // expression is the value of the second expression, otherwise that of the 14585 // third expression. Only one of the second and third expressions is 14586 // evaluated. [...] 14587 bool EvalResult = false; 14588 bool EvalOK = Eval.evaluate(CO->getCond(), EvalResult); 14589 bool ShouldVisitTrueExpr = !EvalOK || (EvalOK && EvalResult); 14590 bool ShouldVisitFalseExpr = !EvalOK || (EvalOK && !EvalResult); 14591 if (ShouldVisitTrueExpr) { 14592 Region = TrueRegion; 14593 Visit(CO->getTrueExpr()); 14594 } 14595 if (ShouldVisitFalseExpr) { 14596 Region = FalseRegion; 14597 Visit(CO->getFalseExpr()); 14598 } 14599 14600 Region = OldRegion; 14601 Tree.merge(ConditionRegion); 14602 Tree.merge(TrueRegion); 14603 Tree.merge(FalseRegion); 14604 } 14605 14606 void VisitCallExpr(const CallExpr *CE) { 14607 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 14608 14609 if (CE->isUnevaluatedBuiltinCall(Context)) 14610 return; 14611 14612 // C++11 [intro.execution]p15: 14613 // When calling a function [...], every value computation and side effect 14614 // associated with any argument expression, or with the postfix expression 14615 // designating the called function, is sequenced before execution of every 14616 // expression or statement in the body of the function [and thus before 14617 // the value computation of its result]. 14618 SequencedSubexpression Sequenced(*this); 14619 SemaRef.runWithSufficientStackSpace(CE->getExprLoc(), [&] { 14620 // C++17 [expr.call]p5 14621 // The postfix-expression is sequenced before each expression in the 14622 // expression-list and any default argument. [...] 14623 SequenceTree::Seq CalleeRegion; 14624 SequenceTree::Seq OtherRegion; 14625 if (SemaRef.getLangOpts().CPlusPlus17) { 14626 CalleeRegion = Tree.allocate(Region); 14627 OtherRegion = Tree.allocate(Region); 14628 } else { 14629 CalleeRegion = Region; 14630 OtherRegion = Region; 14631 } 14632 SequenceTree::Seq OldRegion = Region; 14633 14634 // Visit the callee expression first. 14635 Region = CalleeRegion; 14636 if (SemaRef.getLangOpts().CPlusPlus17) { 14637 SequencedSubexpression Sequenced(*this); 14638 Visit(CE->getCallee()); 14639 } else { 14640 Visit(CE->getCallee()); 14641 } 14642 14643 // Then visit the argument expressions. 14644 Region = OtherRegion; 14645 for (const Expr *Argument : CE->arguments()) 14646 Visit(Argument); 14647 14648 Region = OldRegion; 14649 if (SemaRef.getLangOpts().CPlusPlus17) { 14650 Tree.merge(CalleeRegion); 14651 Tree.merge(OtherRegion); 14652 } 14653 }); 14654 } 14655 14656 void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) { 14657 // C++17 [over.match.oper]p2: 14658 // [...] the operator notation is first transformed to the equivalent 14659 // function-call notation as summarized in Table 12 (where @ denotes one 14660 // of the operators covered in the specified subclause). However, the 14661 // operands are sequenced in the order prescribed for the built-in 14662 // operator (Clause 8). 14663 // 14664 // From the above only overloaded binary operators and overloaded call 14665 // operators have sequencing rules in C++17 that we need to handle 14666 // separately. 14667 if (!SemaRef.getLangOpts().CPlusPlus17 || 14668 (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call)) 14669 return VisitCallExpr(CXXOCE); 14670 14671 enum { 14672 NoSequencing, 14673 LHSBeforeRHS, 14674 RHSBeforeLHS, 14675 LHSBeforeRest 14676 } SequencingKind; 14677 switch (CXXOCE->getOperator()) { 14678 case OO_Equal: 14679 case OO_PlusEqual: 14680 case OO_MinusEqual: 14681 case OO_StarEqual: 14682 case OO_SlashEqual: 14683 case OO_PercentEqual: 14684 case OO_CaretEqual: 14685 case OO_AmpEqual: 14686 case OO_PipeEqual: 14687 case OO_LessLessEqual: 14688 case OO_GreaterGreaterEqual: 14689 SequencingKind = RHSBeforeLHS; 14690 break; 14691 14692 case OO_LessLess: 14693 case OO_GreaterGreater: 14694 case OO_AmpAmp: 14695 case OO_PipePipe: 14696 case OO_Comma: 14697 case OO_ArrowStar: 14698 case OO_Subscript: 14699 SequencingKind = LHSBeforeRHS; 14700 break; 14701 14702 case OO_Call: 14703 SequencingKind = LHSBeforeRest; 14704 break; 14705 14706 default: 14707 SequencingKind = NoSequencing; 14708 break; 14709 } 14710 14711 if (SequencingKind == NoSequencing) 14712 return VisitCallExpr(CXXOCE); 14713 14714 // This is a call, so all subexpressions are sequenced before the result. 14715 SequencedSubexpression Sequenced(*this); 14716 14717 SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] { 14718 assert(SemaRef.getLangOpts().CPlusPlus17 && 14719 "Should only get there with C++17 and above!"); 14720 assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) && 14721 "Should only get there with an overloaded binary operator" 14722 " or an overloaded call operator!"); 14723 14724 if (SequencingKind == LHSBeforeRest) { 14725 assert(CXXOCE->getOperator() == OO_Call && 14726 "We should only have an overloaded call operator here!"); 14727 14728 // This is very similar to VisitCallExpr, except that we only have the 14729 // C++17 case. The postfix-expression is the first argument of the 14730 // CXXOperatorCallExpr. The expressions in the expression-list, if any, 14731 // are in the following arguments. 14732 // 14733 // Note that we intentionally do not visit the callee expression since 14734 // it is just a decayed reference to a function. 14735 SequenceTree::Seq PostfixExprRegion = Tree.allocate(Region); 14736 SequenceTree::Seq ArgsRegion = Tree.allocate(Region); 14737 SequenceTree::Seq OldRegion = Region; 14738 14739 assert(CXXOCE->getNumArgs() >= 1 && 14740 "An overloaded call operator must have at least one argument" 14741 " for the postfix-expression!"); 14742 const Expr *PostfixExpr = CXXOCE->getArgs()[0]; 14743 llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1, 14744 CXXOCE->getNumArgs() - 1); 14745 14746 // Visit the postfix-expression first. 14747 { 14748 Region = PostfixExprRegion; 14749 SequencedSubexpression Sequenced(*this); 14750 Visit(PostfixExpr); 14751 } 14752 14753 // Then visit the argument expressions. 14754 Region = ArgsRegion; 14755 for (const Expr *Arg : Args) 14756 Visit(Arg); 14757 14758 Region = OldRegion; 14759 Tree.merge(PostfixExprRegion); 14760 Tree.merge(ArgsRegion); 14761 } else { 14762 assert(CXXOCE->getNumArgs() == 2 && 14763 "Should only have two arguments here!"); 14764 assert((SequencingKind == LHSBeforeRHS || 14765 SequencingKind == RHSBeforeLHS) && 14766 "Unexpected sequencing kind!"); 14767 14768 // We do not visit the callee expression since it is just a decayed 14769 // reference to a function. 14770 const Expr *E1 = CXXOCE->getArg(0); 14771 const Expr *E2 = CXXOCE->getArg(1); 14772 if (SequencingKind == RHSBeforeLHS) 14773 std::swap(E1, E2); 14774 14775 return VisitSequencedExpressions(E1, E2); 14776 } 14777 }); 14778 } 14779 14780 void VisitCXXConstructExpr(const CXXConstructExpr *CCE) { 14781 // This is a call, so all subexpressions are sequenced before the result. 14782 SequencedSubexpression Sequenced(*this); 14783 14784 if (!CCE->isListInitialization()) 14785 return VisitExpr(CCE); 14786 14787 // In C++11, list initializations are sequenced. 14788 SmallVector<SequenceTree::Seq, 32> Elts; 14789 SequenceTree::Seq Parent = Region; 14790 for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(), 14791 E = CCE->arg_end(); 14792 I != E; ++I) { 14793 Region = Tree.allocate(Parent); 14794 Elts.push_back(Region); 14795 Visit(*I); 14796 } 14797 14798 // Forget that the initializers are sequenced. 14799 Region = Parent; 14800 for (unsigned I = 0; I < Elts.size(); ++I) 14801 Tree.merge(Elts[I]); 14802 } 14803 14804 void VisitInitListExpr(const InitListExpr *ILE) { 14805 if (!SemaRef.getLangOpts().CPlusPlus11) 14806 return VisitExpr(ILE); 14807 14808 // In C++11, list initializations are sequenced. 14809 SmallVector<SequenceTree::Seq, 32> Elts; 14810 SequenceTree::Seq Parent = Region; 14811 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 14812 const Expr *E = ILE->getInit(I); 14813 if (!E) 14814 continue; 14815 Region = Tree.allocate(Parent); 14816 Elts.push_back(Region); 14817 Visit(E); 14818 } 14819 14820 // Forget that the initializers are sequenced. 14821 Region = Parent; 14822 for (unsigned I = 0; I < Elts.size(); ++I) 14823 Tree.merge(Elts[I]); 14824 } 14825 }; 14826 14827 } // namespace 14828 14829 void Sema::CheckUnsequencedOperations(const Expr *E) { 14830 SmallVector<const Expr *, 8> WorkList; 14831 WorkList.push_back(E); 14832 while (!WorkList.empty()) { 14833 const Expr *Item = WorkList.pop_back_val(); 14834 SequenceChecker(*this, Item, WorkList); 14835 } 14836 } 14837 14838 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 14839 bool IsConstexpr) { 14840 llvm::SaveAndRestore<bool> ConstantContext( 14841 isConstantEvaluatedOverride, IsConstexpr || isa<ConstantExpr>(E)); 14842 CheckImplicitConversions(E, CheckLoc); 14843 if (!E->isInstantiationDependent()) 14844 CheckUnsequencedOperations(E); 14845 if (!IsConstexpr && !E->isValueDependent()) 14846 CheckForIntOverflow(E); 14847 DiagnoseMisalignedMembers(); 14848 } 14849 14850 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 14851 FieldDecl *BitField, 14852 Expr *Init) { 14853 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 14854 } 14855 14856 static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 14857 SourceLocation Loc) { 14858 if (!PType->isVariablyModifiedType()) 14859 return; 14860 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 14861 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 14862 return; 14863 } 14864 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 14865 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 14866 return; 14867 } 14868 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 14869 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 14870 return; 14871 } 14872 14873 const ArrayType *AT = S.Context.getAsArrayType(PType); 14874 if (!AT) 14875 return; 14876 14877 if (AT->getSizeModifier() != ArrayType::Star) { 14878 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 14879 return; 14880 } 14881 14882 S.Diag(Loc, diag::err_array_star_in_function_definition); 14883 } 14884 14885 /// CheckParmsForFunctionDef - Check that the parameters of the given 14886 /// function are appropriate for the definition of a function. This 14887 /// takes care of any checks that cannot be performed on the 14888 /// declaration itself, e.g., that the types of each of the function 14889 /// parameters are complete. 14890 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 14891 bool CheckParameterNames) { 14892 bool HasInvalidParm = false; 14893 for (ParmVarDecl *Param : Parameters) { 14894 // C99 6.7.5.3p4: the parameters in a parameter type list in a 14895 // function declarator that is part of a function definition of 14896 // that function shall not have incomplete type. 14897 // 14898 // This is also C++ [dcl.fct]p6. 14899 if (!Param->isInvalidDecl() && 14900 RequireCompleteType(Param->getLocation(), Param->getType(), 14901 diag::err_typecheck_decl_incomplete_type)) { 14902 Param->setInvalidDecl(); 14903 HasInvalidParm = true; 14904 } 14905 14906 // C99 6.9.1p5: If the declarator includes a parameter type list, the 14907 // declaration of each parameter shall include an identifier. 14908 if (CheckParameterNames && Param->getIdentifier() == nullptr && 14909 !Param->isImplicit() && !getLangOpts().CPlusPlus) { 14910 // Diagnose this as an extension in C17 and earlier. 14911 if (!getLangOpts().C2x) 14912 Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x); 14913 } 14914 14915 // C99 6.7.5.3p12: 14916 // If the function declarator is not part of a definition of that 14917 // function, parameters may have incomplete type and may use the [*] 14918 // notation in their sequences of declarator specifiers to specify 14919 // variable length array types. 14920 QualType PType = Param->getOriginalType(); 14921 // FIXME: This diagnostic should point the '[*]' if source-location 14922 // information is added for it. 14923 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 14924 14925 // If the parameter is a c++ class type and it has to be destructed in the 14926 // callee function, declare the destructor so that it can be called by the 14927 // callee function. Do not perform any direct access check on the dtor here. 14928 if (!Param->isInvalidDecl()) { 14929 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 14930 if (!ClassDecl->isInvalidDecl() && 14931 !ClassDecl->hasIrrelevantDestructor() && 14932 !ClassDecl->isDependentContext() && 14933 ClassDecl->isParamDestroyedInCallee()) { 14934 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 14935 MarkFunctionReferenced(Param->getLocation(), Destructor); 14936 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 14937 } 14938 } 14939 } 14940 14941 // Parameters with the pass_object_size attribute only need to be marked 14942 // constant at function definitions. Because we lack information about 14943 // whether we're on a declaration or definition when we're instantiating the 14944 // attribute, we need to check for constness here. 14945 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 14946 if (!Param->getType().isConstQualified()) 14947 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 14948 << Attr->getSpelling() << 1; 14949 14950 // Check for parameter names shadowing fields from the class. 14951 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) { 14952 // The owning context for the parameter should be the function, but we 14953 // want to see if this function's declaration context is a record. 14954 DeclContext *DC = Param->getDeclContext(); 14955 if (DC && DC->isFunctionOrMethod()) { 14956 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent())) 14957 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(), 14958 RD, /*DeclIsField*/ false); 14959 } 14960 } 14961 } 14962 14963 return HasInvalidParm; 14964 } 14965 14966 Optional<std::pair<CharUnits, CharUnits>> 14967 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx); 14968 14969 /// Compute the alignment and offset of the base class object given the 14970 /// derived-to-base cast expression and the alignment and offset of the derived 14971 /// class object. 14972 static std::pair<CharUnits, CharUnits> 14973 getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType, 14974 CharUnits BaseAlignment, CharUnits Offset, 14975 ASTContext &Ctx) { 14976 for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE; 14977 ++PathI) { 14978 const CXXBaseSpecifier *Base = *PathI; 14979 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl(); 14980 if (Base->isVirtual()) { 14981 // The complete object may have a lower alignment than the non-virtual 14982 // alignment of the base, in which case the base may be misaligned. Choose 14983 // the smaller of the non-virtual alignment and BaseAlignment, which is a 14984 // conservative lower bound of the complete object alignment. 14985 CharUnits NonVirtualAlignment = 14986 Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment(); 14987 BaseAlignment = std::min(BaseAlignment, NonVirtualAlignment); 14988 Offset = CharUnits::Zero(); 14989 } else { 14990 const ASTRecordLayout &RL = 14991 Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl()); 14992 Offset += RL.getBaseClassOffset(BaseDecl); 14993 } 14994 DerivedType = Base->getType(); 14995 } 14996 14997 return std::make_pair(BaseAlignment, Offset); 14998 } 14999 15000 /// Compute the alignment and offset of a binary additive operator. 15001 static Optional<std::pair<CharUnits, CharUnits>> 15002 getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE, 15003 bool IsSub, ASTContext &Ctx) { 15004 QualType PointeeType = PtrE->getType()->getPointeeType(); 15005 15006 if (!PointeeType->isConstantSizeType()) 15007 return llvm::None; 15008 15009 auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx); 15010 15011 if (!P) 15012 return llvm::None; 15013 15014 CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType); 15015 if (Optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) { 15016 CharUnits Offset = EltSize * IdxRes->getExtValue(); 15017 if (IsSub) 15018 Offset = -Offset; 15019 return std::make_pair(P->first, P->second + Offset); 15020 } 15021 15022 // If the integer expression isn't a constant expression, compute the lower 15023 // bound of the alignment using the alignment and offset of the pointer 15024 // expression and the element size. 15025 return std::make_pair( 15026 P->first.alignmentAtOffset(P->second).alignmentAtOffset(EltSize), 15027 CharUnits::Zero()); 15028 } 15029 15030 /// This helper function takes an lvalue expression and returns the alignment of 15031 /// a VarDecl and a constant offset from the VarDecl. 15032 Optional<std::pair<CharUnits, CharUnits>> 15033 static getBaseAlignmentAndOffsetFromLValue(const Expr *E, ASTContext &Ctx) { 15034 E = E->IgnoreParens(); 15035 switch (E->getStmtClass()) { 15036 default: 15037 break; 15038 case Stmt::CStyleCastExprClass: 15039 case Stmt::CXXStaticCastExprClass: 15040 case Stmt::ImplicitCastExprClass: { 15041 auto *CE = cast<CastExpr>(E); 15042 const Expr *From = CE->getSubExpr(); 15043 switch (CE->getCastKind()) { 15044 default: 15045 break; 15046 case CK_NoOp: 15047 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15048 case CK_UncheckedDerivedToBase: 15049 case CK_DerivedToBase: { 15050 auto P = getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15051 if (!P) 15052 break; 15053 return getDerivedToBaseAlignmentAndOffset(CE, From->getType(), P->first, 15054 P->second, Ctx); 15055 } 15056 } 15057 break; 15058 } 15059 case Stmt::ArraySubscriptExprClass: { 15060 auto *ASE = cast<ArraySubscriptExpr>(E); 15061 return getAlignmentAndOffsetFromBinAddOrSub(ASE->getBase(), ASE->getIdx(), 15062 false, Ctx); 15063 } 15064 case Stmt::DeclRefExprClass: { 15065 if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) { 15066 // FIXME: If VD is captured by copy or is an escaping __block variable, 15067 // use the alignment of VD's type. 15068 if (!VD->getType()->isReferenceType()) 15069 return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero()); 15070 if (VD->hasInit()) 15071 return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx); 15072 } 15073 break; 15074 } 15075 case Stmt::MemberExprClass: { 15076 auto *ME = cast<MemberExpr>(E); 15077 auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); 15078 if (!FD || FD->getType()->isReferenceType() || 15079 FD->getParent()->isInvalidDecl()) 15080 break; 15081 Optional<std::pair<CharUnits, CharUnits>> P; 15082 if (ME->isArrow()) 15083 P = getBaseAlignmentAndOffsetFromPtr(ME->getBase(), Ctx); 15084 else 15085 P = getBaseAlignmentAndOffsetFromLValue(ME->getBase(), Ctx); 15086 if (!P) 15087 break; 15088 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent()); 15089 uint64_t Offset = Layout.getFieldOffset(FD->getFieldIndex()); 15090 return std::make_pair(P->first, 15091 P->second + CharUnits::fromQuantity(Offset)); 15092 } 15093 case Stmt::UnaryOperatorClass: { 15094 auto *UO = cast<UnaryOperator>(E); 15095 switch (UO->getOpcode()) { 15096 default: 15097 break; 15098 case UO_Deref: 15099 return getBaseAlignmentAndOffsetFromPtr(UO->getSubExpr(), Ctx); 15100 } 15101 break; 15102 } 15103 case Stmt::BinaryOperatorClass: { 15104 auto *BO = cast<BinaryOperator>(E); 15105 auto Opcode = BO->getOpcode(); 15106 switch (Opcode) { 15107 default: 15108 break; 15109 case BO_Comma: 15110 return getBaseAlignmentAndOffsetFromLValue(BO->getRHS(), Ctx); 15111 } 15112 break; 15113 } 15114 } 15115 return llvm::None; 15116 } 15117 15118 /// This helper function takes a pointer expression and returns the alignment of 15119 /// a VarDecl and a constant offset from the VarDecl. 15120 Optional<std::pair<CharUnits, CharUnits>> 15121 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx) { 15122 E = E->IgnoreParens(); 15123 switch (E->getStmtClass()) { 15124 default: 15125 break; 15126 case Stmt::CStyleCastExprClass: 15127 case Stmt::CXXStaticCastExprClass: 15128 case Stmt::ImplicitCastExprClass: { 15129 auto *CE = cast<CastExpr>(E); 15130 const Expr *From = CE->getSubExpr(); 15131 switch (CE->getCastKind()) { 15132 default: 15133 break; 15134 case CK_NoOp: 15135 return getBaseAlignmentAndOffsetFromPtr(From, Ctx); 15136 case CK_ArrayToPointerDecay: 15137 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15138 case CK_UncheckedDerivedToBase: 15139 case CK_DerivedToBase: { 15140 auto P = getBaseAlignmentAndOffsetFromPtr(From, Ctx); 15141 if (!P) 15142 break; 15143 return getDerivedToBaseAlignmentAndOffset( 15144 CE, From->getType()->getPointeeType(), P->first, P->second, Ctx); 15145 } 15146 } 15147 break; 15148 } 15149 case Stmt::CXXThisExprClass: { 15150 auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl(); 15151 CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment(); 15152 return std::make_pair(Alignment, CharUnits::Zero()); 15153 } 15154 case Stmt::UnaryOperatorClass: { 15155 auto *UO = cast<UnaryOperator>(E); 15156 if (UO->getOpcode() == UO_AddrOf) 15157 return getBaseAlignmentAndOffsetFromLValue(UO->getSubExpr(), Ctx); 15158 break; 15159 } 15160 case Stmt::BinaryOperatorClass: { 15161 auto *BO = cast<BinaryOperator>(E); 15162 auto Opcode = BO->getOpcode(); 15163 switch (Opcode) { 15164 default: 15165 break; 15166 case BO_Add: 15167 case BO_Sub: { 15168 const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS(); 15169 if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType()) 15170 std::swap(LHS, RHS); 15171 return getAlignmentAndOffsetFromBinAddOrSub(LHS, RHS, Opcode == BO_Sub, 15172 Ctx); 15173 } 15174 case BO_Comma: 15175 return getBaseAlignmentAndOffsetFromPtr(BO->getRHS(), Ctx); 15176 } 15177 break; 15178 } 15179 } 15180 return llvm::None; 15181 } 15182 15183 static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) { 15184 // See if we can compute the alignment of a VarDecl and an offset from it. 15185 Optional<std::pair<CharUnits, CharUnits>> P = 15186 getBaseAlignmentAndOffsetFromPtr(E, S.Context); 15187 15188 if (P) 15189 return P->first.alignmentAtOffset(P->second); 15190 15191 // If that failed, return the type's alignment. 15192 return S.Context.getTypeAlignInChars(E->getType()->getPointeeType()); 15193 } 15194 15195 /// CheckCastAlign - Implements -Wcast-align, which warns when a 15196 /// pointer cast increases the alignment requirements. 15197 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 15198 // This is actually a lot of work to potentially be doing on every 15199 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 15200 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 15201 return; 15202 15203 // Ignore dependent types. 15204 if (T->isDependentType() || Op->getType()->isDependentType()) 15205 return; 15206 15207 // Require that the destination be a pointer type. 15208 const PointerType *DestPtr = T->getAs<PointerType>(); 15209 if (!DestPtr) return; 15210 15211 // If the destination has alignment 1, we're done. 15212 QualType DestPointee = DestPtr->getPointeeType(); 15213 if (DestPointee->isIncompleteType()) return; 15214 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 15215 if (DestAlign.isOne()) return; 15216 15217 // Require that the source be a pointer type. 15218 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 15219 if (!SrcPtr) return; 15220 QualType SrcPointee = SrcPtr->getPointeeType(); 15221 15222 // Explicitly allow casts from cv void*. We already implicitly 15223 // allowed casts to cv void*, since they have alignment 1. 15224 // Also allow casts involving incomplete types, which implicitly 15225 // includes 'void'. 15226 if (SrcPointee->isIncompleteType()) return; 15227 15228 CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this); 15229 15230 if (SrcAlign >= DestAlign) return; 15231 15232 Diag(TRange.getBegin(), diag::warn_cast_align) 15233 << Op->getType() << T 15234 << static_cast<unsigned>(SrcAlign.getQuantity()) 15235 << static_cast<unsigned>(DestAlign.getQuantity()) 15236 << TRange << Op->getSourceRange(); 15237 } 15238 15239 /// Check whether this array fits the idiom of a size-one tail padded 15240 /// array member of a struct. 15241 /// 15242 /// We avoid emitting out-of-bounds access warnings for such arrays as they are 15243 /// commonly used to emulate flexible arrays in C89 code. 15244 static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size, 15245 const NamedDecl *ND) { 15246 if (Size != 1 || !ND) return false; 15247 15248 const FieldDecl *FD = dyn_cast<FieldDecl>(ND); 15249 if (!FD) return false; 15250 15251 // Don't consider sizes resulting from macro expansions or template argument 15252 // substitution to form C89 tail-padded arrays. 15253 15254 TypeSourceInfo *TInfo = FD->getTypeSourceInfo(); 15255 while (TInfo) { 15256 TypeLoc TL = TInfo->getTypeLoc(); 15257 // Look through typedefs. 15258 if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) { 15259 const TypedefNameDecl *TDL = TTL.getTypedefNameDecl(); 15260 TInfo = TDL->getTypeSourceInfo(); 15261 continue; 15262 } 15263 if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) { 15264 const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr()); 15265 if (!SizeExpr || SizeExpr->getExprLoc().isMacroID()) 15266 return false; 15267 } 15268 break; 15269 } 15270 15271 const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext()); 15272 if (!RD) return false; 15273 if (RD->isUnion()) return false; 15274 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 15275 if (!CRD->isStandardLayout()) return false; 15276 } 15277 15278 // See if this is the last field decl in the record. 15279 const Decl *D = FD; 15280 while ((D = D->getNextDeclInContext())) 15281 if (isa<FieldDecl>(D)) 15282 return false; 15283 return true; 15284 } 15285 15286 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 15287 const ArraySubscriptExpr *ASE, 15288 bool AllowOnePastEnd, bool IndexNegated) { 15289 // Already diagnosed by the constant evaluator. 15290 if (isConstantEvaluated()) 15291 return; 15292 15293 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 15294 if (IndexExpr->isValueDependent()) 15295 return; 15296 15297 const Type *EffectiveType = 15298 BaseExpr->getType()->getPointeeOrArrayElementType(); 15299 BaseExpr = BaseExpr->IgnoreParenCasts(); 15300 const ConstantArrayType *ArrayTy = 15301 Context.getAsConstantArrayType(BaseExpr->getType()); 15302 15303 const Type *BaseType = 15304 ArrayTy == nullptr ? nullptr : ArrayTy->getElementType().getTypePtr(); 15305 bool IsUnboundedArray = (BaseType == nullptr); 15306 if (EffectiveType->isDependentType() || 15307 (!IsUnboundedArray && BaseType->isDependentType())) 15308 return; 15309 15310 Expr::EvalResult Result; 15311 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) 15312 return; 15313 15314 llvm::APSInt index = Result.Val.getInt(); 15315 if (IndexNegated) { 15316 index.setIsUnsigned(false); 15317 index = -index; 15318 } 15319 15320 const NamedDecl *ND = nullptr; 15321 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15322 ND = DRE->getDecl(); 15323 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 15324 ND = ME->getMemberDecl(); 15325 15326 if (IsUnboundedArray) { 15327 if (index.isUnsigned() || !index.isNegative()) { 15328 const auto &ASTC = getASTContext(); 15329 unsigned AddrBits = 15330 ASTC.getTargetInfo().getPointerWidth(ASTC.getTargetAddressSpace( 15331 EffectiveType->getCanonicalTypeInternal())); 15332 if (index.getBitWidth() < AddrBits) 15333 index = index.zext(AddrBits); 15334 Optional<CharUnits> ElemCharUnits = 15335 ASTC.getTypeSizeInCharsIfKnown(EffectiveType); 15336 // PR50741 - If EffectiveType has unknown size (e.g., if it's a void 15337 // pointer) bounds-checking isn't meaningful. 15338 if (!ElemCharUnits) 15339 return; 15340 llvm::APInt ElemBytes(index.getBitWidth(), ElemCharUnits->getQuantity()); 15341 // If index has more active bits than address space, we already know 15342 // we have a bounds violation to warn about. Otherwise, compute 15343 // address of (index + 1)th element, and warn about bounds violation 15344 // only if that address exceeds address space. 15345 if (index.getActiveBits() <= AddrBits) { 15346 bool Overflow; 15347 llvm::APInt Product(index); 15348 Product += 1; 15349 Product = Product.umul_ov(ElemBytes, Overflow); 15350 if (!Overflow && Product.getActiveBits() <= AddrBits) 15351 return; 15352 } 15353 15354 // Need to compute max possible elements in address space, since that 15355 // is included in diag message. 15356 llvm::APInt MaxElems = llvm::APInt::getMaxValue(AddrBits); 15357 MaxElems = MaxElems.zext(std::max(AddrBits + 1, ElemBytes.getBitWidth())); 15358 MaxElems += 1; 15359 ElemBytes = ElemBytes.zextOrTrunc(MaxElems.getBitWidth()); 15360 MaxElems = MaxElems.udiv(ElemBytes); 15361 15362 unsigned DiagID = 15363 ASE ? diag::warn_array_index_exceeds_max_addressable_bounds 15364 : diag::warn_ptr_arith_exceeds_max_addressable_bounds; 15365 15366 // Diag message shows element size in bits and in "bytes" (platform- 15367 // dependent CharUnits) 15368 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15369 PDiag(DiagID) 15370 << toString(index, 10, true) << AddrBits 15371 << (unsigned)ASTC.toBits(*ElemCharUnits) 15372 << toString(ElemBytes, 10, false) 15373 << toString(MaxElems, 10, false) 15374 << (unsigned)MaxElems.getLimitedValue(~0U) 15375 << IndexExpr->getSourceRange()); 15376 15377 if (!ND) { 15378 // Try harder to find a NamedDecl to point at in the note. 15379 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 15380 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 15381 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15382 ND = DRE->getDecl(); 15383 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 15384 ND = ME->getMemberDecl(); 15385 } 15386 15387 if (ND) 15388 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 15389 PDiag(diag::note_array_declared_here) << ND); 15390 } 15391 return; 15392 } 15393 15394 if (index.isUnsigned() || !index.isNegative()) { 15395 // It is possible that the type of the base expression after 15396 // IgnoreParenCasts is incomplete, even though the type of the base 15397 // expression before IgnoreParenCasts is complete (see PR39746 for an 15398 // example). In this case we have no information about whether the array 15399 // access exceeds the array bounds. However we can still diagnose an array 15400 // access which precedes the array bounds. 15401 if (BaseType->isIncompleteType()) 15402 return; 15403 15404 llvm::APInt size = ArrayTy->getSize(); 15405 if (!size.isStrictlyPositive()) 15406 return; 15407 15408 if (BaseType != EffectiveType) { 15409 // Make sure we're comparing apples to apples when comparing index to size 15410 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 15411 uint64_t array_typesize = Context.getTypeSize(BaseType); 15412 // Handle ptrarith_typesize being zero, such as when casting to void* 15413 if (!ptrarith_typesize) ptrarith_typesize = 1; 15414 if (ptrarith_typesize != array_typesize) { 15415 // There's a cast to a different size type involved 15416 uint64_t ratio = array_typesize / ptrarith_typesize; 15417 // TODO: Be smarter about handling cases where array_typesize is not a 15418 // multiple of ptrarith_typesize 15419 if (ptrarith_typesize * ratio == array_typesize) 15420 size *= llvm::APInt(size.getBitWidth(), ratio); 15421 } 15422 } 15423 15424 if (size.getBitWidth() > index.getBitWidth()) 15425 index = index.zext(size.getBitWidth()); 15426 else if (size.getBitWidth() < index.getBitWidth()) 15427 size = size.zext(index.getBitWidth()); 15428 15429 // For array subscripting the index must be less than size, but for pointer 15430 // arithmetic also allow the index (offset) to be equal to size since 15431 // computing the next address after the end of the array is legal and 15432 // commonly done e.g. in C++ iterators and range-based for loops. 15433 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 15434 return; 15435 15436 // Also don't warn for arrays of size 1 which are members of some 15437 // structure. These are often used to approximate flexible arrays in C89 15438 // code. 15439 if (IsTailPaddedMemberArray(*this, size, ND)) 15440 return; 15441 15442 // Suppress the warning if the subscript expression (as identified by the 15443 // ']' location) and the index expression are both from macro expansions 15444 // within a system header. 15445 if (ASE) { 15446 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 15447 ASE->getRBracketLoc()); 15448 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 15449 SourceLocation IndexLoc = 15450 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 15451 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 15452 return; 15453 } 15454 } 15455 15456 unsigned DiagID = ASE ? diag::warn_array_index_exceeds_bounds 15457 : diag::warn_ptr_arith_exceeds_bounds; 15458 15459 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15460 PDiag(DiagID) << toString(index, 10, true) 15461 << toString(size, 10, true) 15462 << (unsigned)size.getLimitedValue(~0U) 15463 << IndexExpr->getSourceRange()); 15464 } else { 15465 unsigned DiagID = diag::warn_array_index_precedes_bounds; 15466 if (!ASE) { 15467 DiagID = diag::warn_ptr_arith_precedes_bounds; 15468 if (index.isNegative()) index = -index; 15469 } 15470 15471 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15472 PDiag(DiagID) << toString(index, 10, true) 15473 << IndexExpr->getSourceRange()); 15474 } 15475 15476 if (!ND) { 15477 // Try harder to find a NamedDecl to point at in the note. 15478 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 15479 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 15480 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15481 ND = DRE->getDecl(); 15482 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 15483 ND = ME->getMemberDecl(); 15484 } 15485 15486 if (ND) 15487 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 15488 PDiag(diag::note_array_declared_here) << ND); 15489 } 15490 15491 void Sema::CheckArrayAccess(const Expr *expr) { 15492 int AllowOnePastEnd = 0; 15493 while (expr) { 15494 expr = expr->IgnoreParenImpCasts(); 15495 switch (expr->getStmtClass()) { 15496 case Stmt::ArraySubscriptExprClass: { 15497 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 15498 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 15499 AllowOnePastEnd > 0); 15500 expr = ASE->getBase(); 15501 break; 15502 } 15503 case Stmt::MemberExprClass: { 15504 expr = cast<MemberExpr>(expr)->getBase(); 15505 break; 15506 } 15507 case Stmt::OMPArraySectionExprClass: { 15508 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 15509 if (ASE->getLowerBound()) 15510 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 15511 /*ASE=*/nullptr, AllowOnePastEnd > 0); 15512 return; 15513 } 15514 case Stmt::UnaryOperatorClass: { 15515 // Only unwrap the * and & unary operators 15516 const UnaryOperator *UO = cast<UnaryOperator>(expr); 15517 expr = UO->getSubExpr(); 15518 switch (UO->getOpcode()) { 15519 case UO_AddrOf: 15520 AllowOnePastEnd++; 15521 break; 15522 case UO_Deref: 15523 AllowOnePastEnd--; 15524 break; 15525 default: 15526 return; 15527 } 15528 break; 15529 } 15530 case Stmt::ConditionalOperatorClass: { 15531 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 15532 if (const Expr *lhs = cond->getLHS()) 15533 CheckArrayAccess(lhs); 15534 if (const Expr *rhs = cond->getRHS()) 15535 CheckArrayAccess(rhs); 15536 return; 15537 } 15538 case Stmt::CXXOperatorCallExprClass: { 15539 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 15540 for (const auto *Arg : OCE->arguments()) 15541 CheckArrayAccess(Arg); 15542 return; 15543 } 15544 default: 15545 return; 15546 } 15547 } 15548 } 15549 15550 //===--- CHECK: Objective-C retain cycles ----------------------------------// 15551 15552 namespace { 15553 15554 struct RetainCycleOwner { 15555 VarDecl *Variable = nullptr; 15556 SourceRange Range; 15557 SourceLocation Loc; 15558 bool Indirect = false; 15559 15560 RetainCycleOwner() = default; 15561 15562 void setLocsFrom(Expr *e) { 15563 Loc = e->getExprLoc(); 15564 Range = e->getSourceRange(); 15565 } 15566 }; 15567 15568 } // namespace 15569 15570 /// Consider whether capturing the given variable can possibly lead to 15571 /// a retain cycle. 15572 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 15573 // In ARC, it's captured strongly iff the variable has __strong 15574 // lifetime. In MRR, it's captured strongly if the variable is 15575 // __block and has an appropriate type. 15576 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 15577 return false; 15578 15579 owner.Variable = var; 15580 if (ref) 15581 owner.setLocsFrom(ref); 15582 return true; 15583 } 15584 15585 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 15586 while (true) { 15587 e = e->IgnoreParens(); 15588 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 15589 switch (cast->getCastKind()) { 15590 case CK_BitCast: 15591 case CK_LValueBitCast: 15592 case CK_LValueToRValue: 15593 case CK_ARCReclaimReturnedObject: 15594 e = cast->getSubExpr(); 15595 continue; 15596 15597 default: 15598 return false; 15599 } 15600 } 15601 15602 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 15603 ObjCIvarDecl *ivar = ref->getDecl(); 15604 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 15605 return false; 15606 15607 // Try to find a retain cycle in the base. 15608 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 15609 return false; 15610 15611 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 15612 owner.Indirect = true; 15613 return true; 15614 } 15615 15616 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 15617 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 15618 if (!var) return false; 15619 return considerVariable(var, ref, owner); 15620 } 15621 15622 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 15623 if (member->isArrow()) return false; 15624 15625 // Don't count this as an indirect ownership. 15626 e = member->getBase(); 15627 continue; 15628 } 15629 15630 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 15631 // Only pay attention to pseudo-objects on property references. 15632 ObjCPropertyRefExpr *pre 15633 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 15634 ->IgnoreParens()); 15635 if (!pre) return false; 15636 if (pre->isImplicitProperty()) return false; 15637 ObjCPropertyDecl *property = pre->getExplicitProperty(); 15638 if (!property->isRetaining() && 15639 !(property->getPropertyIvarDecl() && 15640 property->getPropertyIvarDecl()->getType() 15641 .getObjCLifetime() == Qualifiers::OCL_Strong)) 15642 return false; 15643 15644 owner.Indirect = true; 15645 if (pre->isSuperReceiver()) { 15646 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 15647 if (!owner.Variable) 15648 return false; 15649 owner.Loc = pre->getLocation(); 15650 owner.Range = pre->getSourceRange(); 15651 return true; 15652 } 15653 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 15654 ->getSourceExpr()); 15655 continue; 15656 } 15657 15658 // Array ivars? 15659 15660 return false; 15661 } 15662 } 15663 15664 namespace { 15665 15666 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 15667 ASTContext &Context; 15668 VarDecl *Variable; 15669 Expr *Capturer = nullptr; 15670 bool VarWillBeReased = false; 15671 15672 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 15673 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 15674 Context(Context), Variable(variable) {} 15675 15676 void VisitDeclRefExpr(DeclRefExpr *ref) { 15677 if (ref->getDecl() == Variable && !Capturer) 15678 Capturer = ref; 15679 } 15680 15681 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 15682 if (Capturer) return; 15683 Visit(ref->getBase()); 15684 if (Capturer && ref->isFreeIvar()) 15685 Capturer = ref; 15686 } 15687 15688 void VisitBlockExpr(BlockExpr *block) { 15689 // Look inside nested blocks 15690 if (block->getBlockDecl()->capturesVariable(Variable)) 15691 Visit(block->getBlockDecl()->getBody()); 15692 } 15693 15694 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 15695 if (Capturer) return; 15696 if (OVE->getSourceExpr()) 15697 Visit(OVE->getSourceExpr()); 15698 } 15699 15700 void VisitBinaryOperator(BinaryOperator *BinOp) { 15701 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 15702 return; 15703 Expr *LHS = BinOp->getLHS(); 15704 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 15705 if (DRE->getDecl() != Variable) 15706 return; 15707 if (Expr *RHS = BinOp->getRHS()) { 15708 RHS = RHS->IgnoreParenCasts(); 15709 Optional<llvm::APSInt> Value; 15710 VarWillBeReased = 15711 (RHS && (Value = RHS->getIntegerConstantExpr(Context)) && 15712 *Value == 0); 15713 } 15714 } 15715 } 15716 }; 15717 15718 } // namespace 15719 15720 /// Check whether the given argument is a block which captures a 15721 /// variable. 15722 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 15723 assert(owner.Variable && owner.Loc.isValid()); 15724 15725 e = e->IgnoreParenCasts(); 15726 15727 // Look through [^{...} copy] and Block_copy(^{...}). 15728 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 15729 Selector Cmd = ME->getSelector(); 15730 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 15731 e = ME->getInstanceReceiver(); 15732 if (!e) 15733 return nullptr; 15734 e = e->IgnoreParenCasts(); 15735 } 15736 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 15737 if (CE->getNumArgs() == 1) { 15738 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 15739 if (Fn) { 15740 const IdentifierInfo *FnI = Fn->getIdentifier(); 15741 if (FnI && FnI->isStr("_Block_copy")) { 15742 e = CE->getArg(0)->IgnoreParenCasts(); 15743 } 15744 } 15745 } 15746 } 15747 15748 BlockExpr *block = dyn_cast<BlockExpr>(e); 15749 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 15750 return nullptr; 15751 15752 FindCaptureVisitor visitor(S.Context, owner.Variable); 15753 visitor.Visit(block->getBlockDecl()->getBody()); 15754 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 15755 } 15756 15757 static void diagnoseRetainCycle(Sema &S, Expr *capturer, 15758 RetainCycleOwner &owner) { 15759 assert(capturer); 15760 assert(owner.Variable && owner.Loc.isValid()); 15761 15762 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 15763 << owner.Variable << capturer->getSourceRange(); 15764 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 15765 << owner.Indirect << owner.Range; 15766 } 15767 15768 /// Check for a keyword selector that starts with the word 'add' or 15769 /// 'set'. 15770 static bool isSetterLikeSelector(Selector sel) { 15771 if (sel.isUnarySelector()) return false; 15772 15773 StringRef str = sel.getNameForSlot(0); 15774 while (!str.empty() && str.front() == '_') str = str.substr(1); 15775 if (str.startswith("set")) 15776 str = str.substr(3); 15777 else if (str.startswith("add")) { 15778 // Specially allow 'addOperationWithBlock:'. 15779 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) 15780 return false; 15781 str = str.substr(3); 15782 } 15783 else 15784 return false; 15785 15786 if (str.empty()) return true; 15787 return !isLowercase(str.front()); 15788 } 15789 15790 static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S, 15791 ObjCMessageExpr *Message) { 15792 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 15793 Message->getReceiverInterface(), 15794 NSAPI::ClassId_NSMutableArray); 15795 if (!IsMutableArray) { 15796 return None; 15797 } 15798 15799 Selector Sel = Message->getSelector(); 15800 15801 Optional<NSAPI::NSArrayMethodKind> MKOpt = 15802 S.NSAPIObj->getNSArrayMethodKind(Sel); 15803 if (!MKOpt) { 15804 return None; 15805 } 15806 15807 NSAPI::NSArrayMethodKind MK = *MKOpt; 15808 15809 switch (MK) { 15810 case NSAPI::NSMutableArr_addObject: 15811 case NSAPI::NSMutableArr_insertObjectAtIndex: 15812 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 15813 return 0; 15814 case NSAPI::NSMutableArr_replaceObjectAtIndex: 15815 return 1; 15816 15817 default: 15818 return None; 15819 } 15820 15821 return None; 15822 } 15823 15824 static 15825 Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S, 15826 ObjCMessageExpr *Message) { 15827 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 15828 Message->getReceiverInterface(), 15829 NSAPI::ClassId_NSMutableDictionary); 15830 if (!IsMutableDictionary) { 15831 return None; 15832 } 15833 15834 Selector Sel = Message->getSelector(); 15835 15836 Optional<NSAPI::NSDictionaryMethodKind> MKOpt = 15837 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 15838 if (!MKOpt) { 15839 return None; 15840 } 15841 15842 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 15843 15844 switch (MK) { 15845 case NSAPI::NSMutableDict_setObjectForKey: 15846 case NSAPI::NSMutableDict_setValueForKey: 15847 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 15848 return 0; 15849 15850 default: 15851 return None; 15852 } 15853 15854 return None; 15855 } 15856 15857 static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 15858 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 15859 Message->getReceiverInterface(), 15860 NSAPI::ClassId_NSMutableSet); 15861 15862 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 15863 Message->getReceiverInterface(), 15864 NSAPI::ClassId_NSMutableOrderedSet); 15865 if (!IsMutableSet && !IsMutableOrderedSet) { 15866 return None; 15867 } 15868 15869 Selector Sel = Message->getSelector(); 15870 15871 Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel); 15872 if (!MKOpt) { 15873 return None; 15874 } 15875 15876 NSAPI::NSSetMethodKind MK = *MKOpt; 15877 15878 switch (MK) { 15879 case NSAPI::NSMutableSet_addObject: 15880 case NSAPI::NSOrderedSet_setObjectAtIndex: 15881 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 15882 case NSAPI::NSOrderedSet_insertObjectAtIndex: 15883 return 0; 15884 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 15885 return 1; 15886 } 15887 15888 return None; 15889 } 15890 15891 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 15892 if (!Message->isInstanceMessage()) { 15893 return; 15894 } 15895 15896 Optional<int> ArgOpt; 15897 15898 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 15899 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 15900 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 15901 return; 15902 } 15903 15904 int ArgIndex = *ArgOpt; 15905 15906 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 15907 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 15908 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 15909 } 15910 15911 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 15912 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 15913 if (ArgRE->isObjCSelfExpr()) { 15914 Diag(Message->getSourceRange().getBegin(), 15915 diag::warn_objc_circular_container) 15916 << ArgRE->getDecl() << StringRef("'super'"); 15917 } 15918 } 15919 } else { 15920 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 15921 15922 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 15923 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 15924 } 15925 15926 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 15927 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 15928 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 15929 ValueDecl *Decl = ReceiverRE->getDecl(); 15930 Diag(Message->getSourceRange().getBegin(), 15931 diag::warn_objc_circular_container) 15932 << Decl << Decl; 15933 if (!ArgRE->isObjCSelfExpr()) { 15934 Diag(Decl->getLocation(), 15935 diag::note_objc_circular_container_declared_here) 15936 << Decl; 15937 } 15938 } 15939 } 15940 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 15941 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 15942 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 15943 ObjCIvarDecl *Decl = IvarRE->getDecl(); 15944 Diag(Message->getSourceRange().getBegin(), 15945 diag::warn_objc_circular_container) 15946 << Decl << Decl; 15947 Diag(Decl->getLocation(), 15948 diag::note_objc_circular_container_declared_here) 15949 << Decl; 15950 } 15951 } 15952 } 15953 } 15954 } 15955 15956 /// Check a message send to see if it's likely to cause a retain cycle. 15957 void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 15958 // Only check instance methods whose selector looks like a setter. 15959 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 15960 return; 15961 15962 // Try to find a variable that the receiver is strongly owned by. 15963 RetainCycleOwner owner; 15964 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 15965 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 15966 return; 15967 } else { 15968 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 15969 owner.Variable = getCurMethodDecl()->getSelfDecl(); 15970 owner.Loc = msg->getSuperLoc(); 15971 owner.Range = msg->getSuperLoc(); 15972 } 15973 15974 // Check whether the receiver is captured by any of the arguments. 15975 const ObjCMethodDecl *MD = msg->getMethodDecl(); 15976 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 15977 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 15978 // noescape blocks should not be retained by the method. 15979 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 15980 continue; 15981 return diagnoseRetainCycle(*this, capturer, owner); 15982 } 15983 } 15984 } 15985 15986 /// Check a property assign to see if it's likely to cause a retain cycle. 15987 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 15988 RetainCycleOwner owner; 15989 if (!findRetainCycleOwner(*this, receiver, owner)) 15990 return; 15991 15992 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 15993 diagnoseRetainCycle(*this, capturer, owner); 15994 } 15995 15996 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 15997 RetainCycleOwner Owner; 15998 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 15999 return; 16000 16001 // Because we don't have an expression for the variable, we have to set the 16002 // location explicitly here. 16003 Owner.Loc = Var->getLocation(); 16004 Owner.Range = Var->getSourceRange(); 16005 16006 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 16007 diagnoseRetainCycle(*this, Capturer, Owner); 16008 } 16009 16010 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 16011 Expr *RHS, bool isProperty) { 16012 // Check if RHS is an Objective-C object literal, which also can get 16013 // immediately zapped in a weak reference. Note that we explicitly 16014 // allow ObjCStringLiterals, since those are designed to never really die. 16015 RHS = RHS->IgnoreParenImpCasts(); 16016 16017 // This enum needs to match with the 'select' in 16018 // warn_objc_arc_literal_assign (off-by-1). 16019 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 16020 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 16021 return false; 16022 16023 S.Diag(Loc, diag::warn_arc_literal_assign) 16024 << (unsigned) Kind 16025 << (isProperty ? 0 : 1) 16026 << RHS->getSourceRange(); 16027 16028 return true; 16029 } 16030 16031 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 16032 Qualifiers::ObjCLifetime LT, 16033 Expr *RHS, bool isProperty) { 16034 // Strip off any implicit cast added to get to the one ARC-specific. 16035 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 16036 if (cast->getCastKind() == CK_ARCConsumeObject) { 16037 S.Diag(Loc, diag::warn_arc_retained_assign) 16038 << (LT == Qualifiers::OCL_ExplicitNone) 16039 << (isProperty ? 0 : 1) 16040 << RHS->getSourceRange(); 16041 return true; 16042 } 16043 RHS = cast->getSubExpr(); 16044 } 16045 16046 if (LT == Qualifiers::OCL_Weak && 16047 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 16048 return true; 16049 16050 return false; 16051 } 16052 16053 bool Sema::checkUnsafeAssigns(SourceLocation Loc, 16054 QualType LHS, Expr *RHS) { 16055 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 16056 16057 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 16058 return false; 16059 16060 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 16061 return true; 16062 16063 return false; 16064 } 16065 16066 void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 16067 Expr *LHS, Expr *RHS) { 16068 QualType LHSType; 16069 // PropertyRef on LHS type need be directly obtained from 16070 // its declaration as it has a PseudoType. 16071 ObjCPropertyRefExpr *PRE 16072 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 16073 if (PRE && !PRE->isImplicitProperty()) { 16074 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 16075 if (PD) 16076 LHSType = PD->getType(); 16077 } 16078 16079 if (LHSType.isNull()) 16080 LHSType = LHS->getType(); 16081 16082 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 16083 16084 if (LT == Qualifiers::OCL_Weak) { 16085 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 16086 getCurFunction()->markSafeWeakUse(LHS); 16087 } 16088 16089 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 16090 return; 16091 16092 // FIXME. Check for other life times. 16093 if (LT != Qualifiers::OCL_None) 16094 return; 16095 16096 if (PRE) { 16097 if (PRE->isImplicitProperty()) 16098 return; 16099 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 16100 if (!PD) 16101 return; 16102 16103 unsigned Attributes = PD->getPropertyAttributes(); 16104 if (Attributes & ObjCPropertyAttribute::kind_assign) { 16105 // when 'assign' attribute was not explicitly specified 16106 // by user, ignore it and rely on property type itself 16107 // for lifetime info. 16108 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 16109 if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) && 16110 LHSType->isObjCRetainableType()) 16111 return; 16112 16113 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 16114 if (cast->getCastKind() == CK_ARCConsumeObject) { 16115 Diag(Loc, diag::warn_arc_retained_property_assign) 16116 << RHS->getSourceRange(); 16117 return; 16118 } 16119 RHS = cast->getSubExpr(); 16120 } 16121 } else if (Attributes & ObjCPropertyAttribute::kind_weak) { 16122 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 16123 return; 16124 } 16125 } 16126 } 16127 16128 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 16129 16130 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 16131 SourceLocation StmtLoc, 16132 const NullStmt *Body) { 16133 // Do not warn if the body is a macro that expands to nothing, e.g: 16134 // 16135 // #define CALL(x) 16136 // if (condition) 16137 // CALL(0); 16138 if (Body->hasLeadingEmptyMacro()) 16139 return false; 16140 16141 // Get line numbers of statement and body. 16142 bool StmtLineInvalid; 16143 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 16144 &StmtLineInvalid); 16145 if (StmtLineInvalid) 16146 return false; 16147 16148 bool BodyLineInvalid; 16149 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 16150 &BodyLineInvalid); 16151 if (BodyLineInvalid) 16152 return false; 16153 16154 // Warn if null statement and body are on the same line. 16155 if (StmtLine != BodyLine) 16156 return false; 16157 16158 return true; 16159 } 16160 16161 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 16162 const Stmt *Body, 16163 unsigned DiagID) { 16164 // Since this is a syntactic check, don't emit diagnostic for template 16165 // instantiations, this just adds noise. 16166 if (CurrentInstantiationScope) 16167 return; 16168 16169 // The body should be a null statement. 16170 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 16171 if (!NBody) 16172 return; 16173 16174 // Do the usual checks. 16175 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 16176 return; 16177 16178 Diag(NBody->getSemiLoc(), DiagID); 16179 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 16180 } 16181 16182 void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 16183 const Stmt *PossibleBody) { 16184 assert(!CurrentInstantiationScope); // Ensured by caller 16185 16186 SourceLocation StmtLoc; 16187 const Stmt *Body; 16188 unsigned DiagID; 16189 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 16190 StmtLoc = FS->getRParenLoc(); 16191 Body = FS->getBody(); 16192 DiagID = diag::warn_empty_for_body; 16193 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 16194 StmtLoc = WS->getCond()->getSourceRange().getEnd(); 16195 Body = WS->getBody(); 16196 DiagID = diag::warn_empty_while_body; 16197 } else 16198 return; // Neither `for' nor `while'. 16199 16200 // The body should be a null statement. 16201 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 16202 if (!NBody) 16203 return; 16204 16205 // Skip expensive checks if diagnostic is disabled. 16206 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 16207 return; 16208 16209 // Do the usual checks. 16210 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 16211 return; 16212 16213 // `for(...);' and `while(...);' are popular idioms, so in order to keep 16214 // noise level low, emit diagnostics only if for/while is followed by a 16215 // CompoundStmt, e.g.: 16216 // for (int i = 0; i < n; i++); 16217 // { 16218 // a(i); 16219 // } 16220 // or if for/while is followed by a statement with more indentation 16221 // than for/while itself: 16222 // for (int i = 0; i < n; i++); 16223 // a(i); 16224 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 16225 if (!ProbableTypo) { 16226 bool BodyColInvalid; 16227 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 16228 PossibleBody->getBeginLoc(), &BodyColInvalid); 16229 if (BodyColInvalid) 16230 return; 16231 16232 bool StmtColInvalid; 16233 unsigned StmtCol = 16234 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 16235 if (StmtColInvalid) 16236 return; 16237 16238 if (BodyCol > StmtCol) 16239 ProbableTypo = true; 16240 } 16241 16242 if (ProbableTypo) { 16243 Diag(NBody->getSemiLoc(), DiagID); 16244 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 16245 } 16246 } 16247 16248 //===--- CHECK: Warn on self move with std::move. -------------------------===// 16249 16250 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 16251 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 16252 SourceLocation OpLoc) { 16253 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 16254 return; 16255 16256 if (inTemplateInstantiation()) 16257 return; 16258 16259 // Strip parens and casts away. 16260 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 16261 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 16262 16263 // Check for a call expression 16264 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 16265 if (!CE || CE->getNumArgs() != 1) 16266 return; 16267 16268 // Check for a call to std::move 16269 if (!CE->isCallToStdMove()) 16270 return; 16271 16272 // Get argument from std::move 16273 RHSExpr = CE->getArg(0); 16274 16275 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 16276 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 16277 16278 // Two DeclRefExpr's, check that the decls are the same. 16279 if (LHSDeclRef && RHSDeclRef) { 16280 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 16281 return; 16282 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 16283 RHSDeclRef->getDecl()->getCanonicalDecl()) 16284 return; 16285 16286 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16287 << LHSExpr->getSourceRange() 16288 << RHSExpr->getSourceRange(); 16289 return; 16290 } 16291 16292 // Member variables require a different approach to check for self moves. 16293 // MemberExpr's are the same if every nested MemberExpr refers to the same 16294 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 16295 // the base Expr's are CXXThisExpr's. 16296 const Expr *LHSBase = LHSExpr; 16297 const Expr *RHSBase = RHSExpr; 16298 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 16299 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 16300 if (!LHSME || !RHSME) 16301 return; 16302 16303 while (LHSME && RHSME) { 16304 if (LHSME->getMemberDecl()->getCanonicalDecl() != 16305 RHSME->getMemberDecl()->getCanonicalDecl()) 16306 return; 16307 16308 LHSBase = LHSME->getBase(); 16309 RHSBase = RHSME->getBase(); 16310 LHSME = dyn_cast<MemberExpr>(LHSBase); 16311 RHSME = dyn_cast<MemberExpr>(RHSBase); 16312 } 16313 16314 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 16315 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 16316 if (LHSDeclRef && RHSDeclRef) { 16317 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 16318 return; 16319 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 16320 RHSDeclRef->getDecl()->getCanonicalDecl()) 16321 return; 16322 16323 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16324 << LHSExpr->getSourceRange() 16325 << RHSExpr->getSourceRange(); 16326 return; 16327 } 16328 16329 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 16330 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16331 << LHSExpr->getSourceRange() 16332 << RHSExpr->getSourceRange(); 16333 } 16334 16335 //===--- Layout compatibility ----------------------------------------------// 16336 16337 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 16338 16339 /// Check if two enumeration types are layout-compatible. 16340 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 16341 // C++11 [dcl.enum] p8: 16342 // Two enumeration types are layout-compatible if they have the same 16343 // underlying type. 16344 return ED1->isComplete() && ED2->isComplete() && 16345 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 16346 } 16347 16348 /// Check if two fields are layout-compatible. 16349 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 16350 FieldDecl *Field2) { 16351 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 16352 return false; 16353 16354 if (Field1->isBitField() != Field2->isBitField()) 16355 return false; 16356 16357 if (Field1->isBitField()) { 16358 // Make sure that the bit-fields are the same length. 16359 unsigned Bits1 = Field1->getBitWidthValue(C); 16360 unsigned Bits2 = Field2->getBitWidthValue(C); 16361 16362 if (Bits1 != Bits2) 16363 return false; 16364 } 16365 16366 return true; 16367 } 16368 16369 /// Check if two standard-layout structs are layout-compatible. 16370 /// (C++11 [class.mem] p17) 16371 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 16372 RecordDecl *RD2) { 16373 // If both records are C++ classes, check that base classes match. 16374 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 16375 // If one of records is a CXXRecordDecl we are in C++ mode, 16376 // thus the other one is a CXXRecordDecl, too. 16377 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 16378 // Check number of base classes. 16379 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 16380 return false; 16381 16382 // Check the base classes. 16383 for (CXXRecordDecl::base_class_const_iterator 16384 Base1 = D1CXX->bases_begin(), 16385 BaseEnd1 = D1CXX->bases_end(), 16386 Base2 = D2CXX->bases_begin(); 16387 Base1 != BaseEnd1; 16388 ++Base1, ++Base2) { 16389 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 16390 return false; 16391 } 16392 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 16393 // If only RD2 is a C++ class, it should have zero base classes. 16394 if (D2CXX->getNumBases() > 0) 16395 return false; 16396 } 16397 16398 // Check the fields. 16399 RecordDecl::field_iterator Field2 = RD2->field_begin(), 16400 Field2End = RD2->field_end(), 16401 Field1 = RD1->field_begin(), 16402 Field1End = RD1->field_end(); 16403 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 16404 if (!isLayoutCompatible(C, *Field1, *Field2)) 16405 return false; 16406 } 16407 if (Field1 != Field1End || Field2 != Field2End) 16408 return false; 16409 16410 return true; 16411 } 16412 16413 /// Check if two standard-layout unions are layout-compatible. 16414 /// (C++11 [class.mem] p18) 16415 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 16416 RecordDecl *RD2) { 16417 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 16418 for (auto *Field2 : RD2->fields()) 16419 UnmatchedFields.insert(Field2); 16420 16421 for (auto *Field1 : RD1->fields()) { 16422 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 16423 I = UnmatchedFields.begin(), 16424 E = UnmatchedFields.end(); 16425 16426 for ( ; I != E; ++I) { 16427 if (isLayoutCompatible(C, Field1, *I)) { 16428 bool Result = UnmatchedFields.erase(*I); 16429 (void) Result; 16430 assert(Result); 16431 break; 16432 } 16433 } 16434 if (I == E) 16435 return false; 16436 } 16437 16438 return UnmatchedFields.empty(); 16439 } 16440 16441 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 16442 RecordDecl *RD2) { 16443 if (RD1->isUnion() != RD2->isUnion()) 16444 return false; 16445 16446 if (RD1->isUnion()) 16447 return isLayoutCompatibleUnion(C, RD1, RD2); 16448 else 16449 return isLayoutCompatibleStruct(C, RD1, RD2); 16450 } 16451 16452 /// Check if two types are layout-compatible in C++11 sense. 16453 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 16454 if (T1.isNull() || T2.isNull()) 16455 return false; 16456 16457 // C++11 [basic.types] p11: 16458 // If two types T1 and T2 are the same type, then T1 and T2 are 16459 // layout-compatible types. 16460 if (C.hasSameType(T1, T2)) 16461 return true; 16462 16463 T1 = T1.getCanonicalType().getUnqualifiedType(); 16464 T2 = T2.getCanonicalType().getUnqualifiedType(); 16465 16466 const Type::TypeClass TC1 = T1->getTypeClass(); 16467 const Type::TypeClass TC2 = T2->getTypeClass(); 16468 16469 if (TC1 != TC2) 16470 return false; 16471 16472 if (TC1 == Type::Enum) { 16473 return isLayoutCompatible(C, 16474 cast<EnumType>(T1)->getDecl(), 16475 cast<EnumType>(T2)->getDecl()); 16476 } else if (TC1 == Type::Record) { 16477 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 16478 return false; 16479 16480 return isLayoutCompatible(C, 16481 cast<RecordType>(T1)->getDecl(), 16482 cast<RecordType>(T2)->getDecl()); 16483 } 16484 16485 return false; 16486 } 16487 16488 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 16489 16490 /// Given a type tag expression find the type tag itself. 16491 /// 16492 /// \param TypeExpr Type tag expression, as it appears in user's code. 16493 /// 16494 /// \param VD Declaration of an identifier that appears in a type tag. 16495 /// 16496 /// \param MagicValue Type tag magic value. 16497 /// 16498 /// \param isConstantEvaluated whether the evalaution should be performed in 16499 16500 /// constant context. 16501 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 16502 const ValueDecl **VD, uint64_t *MagicValue, 16503 bool isConstantEvaluated) { 16504 while(true) { 16505 if (!TypeExpr) 16506 return false; 16507 16508 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 16509 16510 switch (TypeExpr->getStmtClass()) { 16511 case Stmt::UnaryOperatorClass: { 16512 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 16513 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 16514 TypeExpr = UO->getSubExpr(); 16515 continue; 16516 } 16517 return false; 16518 } 16519 16520 case Stmt::DeclRefExprClass: { 16521 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 16522 *VD = DRE->getDecl(); 16523 return true; 16524 } 16525 16526 case Stmt::IntegerLiteralClass: { 16527 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 16528 llvm::APInt MagicValueAPInt = IL->getValue(); 16529 if (MagicValueAPInt.getActiveBits() <= 64) { 16530 *MagicValue = MagicValueAPInt.getZExtValue(); 16531 return true; 16532 } else 16533 return false; 16534 } 16535 16536 case Stmt::BinaryConditionalOperatorClass: 16537 case Stmt::ConditionalOperatorClass: { 16538 const AbstractConditionalOperator *ACO = 16539 cast<AbstractConditionalOperator>(TypeExpr); 16540 bool Result; 16541 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx, 16542 isConstantEvaluated)) { 16543 if (Result) 16544 TypeExpr = ACO->getTrueExpr(); 16545 else 16546 TypeExpr = ACO->getFalseExpr(); 16547 continue; 16548 } 16549 return false; 16550 } 16551 16552 case Stmt::BinaryOperatorClass: { 16553 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 16554 if (BO->getOpcode() == BO_Comma) { 16555 TypeExpr = BO->getRHS(); 16556 continue; 16557 } 16558 return false; 16559 } 16560 16561 default: 16562 return false; 16563 } 16564 } 16565 } 16566 16567 /// Retrieve the C type corresponding to type tag TypeExpr. 16568 /// 16569 /// \param TypeExpr Expression that specifies a type tag. 16570 /// 16571 /// \param MagicValues Registered magic values. 16572 /// 16573 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 16574 /// kind. 16575 /// 16576 /// \param TypeInfo Information about the corresponding C type. 16577 /// 16578 /// \param isConstantEvaluated whether the evalaution should be performed in 16579 /// constant context. 16580 /// 16581 /// \returns true if the corresponding C type was found. 16582 static bool GetMatchingCType( 16583 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr, 16584 const ASTContext &Ctx, 16585 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData> 16586 *MagicValues, 16587 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo, 16588 bool isConstantEvaluated) { 16589 FoundWrongKind = false; 16590 16591 // Variable declaration that has type_tag_for_datatype attribute. 16592 const ValueDecl *VD = nullptr; 16593 16594 uint64_t MagicValue; 16595 16596 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated)) 16597 return false; 16598 16599 if (VD) { 16600 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 16601 if (I->getArgumentKind() != ArgumentKind) { 16602 FoundWrongKind = true; 16603 return false; 16604 } 16605 TypeInfo.Type = I->getMatchingCType(); 16606 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 16607 TypeInfo.MustBeNull = I->getMustBeNull(); 16608 return true; 16609 } 16610 return false; 16611 } 16612 16613 if (!MagicValues) 16614 return false; 16615 16616 llvm::DenseMap<Sema::TypeTagMagicValue, 16617 Sema::TypeTagData>::const_iterator I = 16618 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 16619 if (I == MagicValues->end()) 16620 return false; 16621 16622 TypeInfo = I->second; 16623 return true; 16624 } 16625 16626 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 16627 uint64_t MagicValue, QualType Type, 16628 bool LayoutCompatible, 16629 bool MustBeNull) { 16630 if (!TypeTagForDatatypeMagicValues) 16631 TypeTagForDatatypeMagicValues.reset( 16632 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 16633 16634 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 16635 (*TypeTagForDatatypeMagicValues)[Magic] = 16636 TypeTagData(Type, LayoutCompatible, MustBeNull); 16637 } 16638 16639 static bool IsSameCharType(QualType T1, QualType T2) { 16640 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 16641 if (!BT1) 16642 return false; 16643 16644 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 16645 if (!BT2) 16646 return false; 16647 16648 BuiltinType::Kind T1Kind = BT1->getKind(); 16649 BuiltinType::Kind T2Kind = BT2->getKind(); 16650 16651 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 16652 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 16653 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 16654 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 16655 } 16656 16657 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 16658 const ArrayRef<const Expr *> ExprArgs, 16659 SourceLocation CallSiteLoc) { 16660 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 16661 bool IsPointerAttr = Attr->getIsPointer(); 16662 16663 // Retrieve the argument representing the 'type_tag'. 16664 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 16665 if (TypeTagIdxAST >= ExprArgs.size()) { 16666 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 16667 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 16668 return; 16669 } 16670 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 16671 bool FoundWrongKind; 16672 TypeTagData TypeInfo; 16673 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 16674 TypeTagForDatatypeMagicValues.get(), FoundWrongKind, 16675 TypeInfo, isConstantEvaluated())) { 16676 if (FoundWrongKind) 16677 Diag(TypeTagExpr->getExprLoc(), 16678 diag::warn_type_tag_for_datatype_wrong_kind) 16679 << TypeTagExpr->getSourceRange(); 16680 return; 16681 } 16682 16683 // Retrieve the argument representing the 'arg_idx'. 16684 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 16685 if (ArgumentIdxAST >= ExprArgs.size()) { 16686 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 16687 << 1 << Attr->getArgumentIdx().getSourceIndex(); 16688 return; 16689 } 16690 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 16691 if (IsPointerAttr) { 16692 // Skip implicit cast of pointer to `void *' (as a function argument). 16693 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 16694 if (ICE->getType()->isVoidPointerType() && 16695 ICE->getCastKind() == CK_BitCast) 16696 ArgumentExpr = ICE->getSubExpr(); 16697 } 16698 QualType ArgumentType = ArgumentExpr->getType(); 16699 16700 // Passing a `void*' pointer shouldn't trigger a warning. 16701 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 16702 return; 16703 16704 if (TypeInfo.MustBeNull) { 16705 // Type tag with matching void type requires a null pointer. 16706 if (!ArgumentExpr->isNullPointerConstant(Context, 16707 Expr::NPC_ValueDependentIsNotNull)) { 16708 Diag(ArgumentExpr->getExprLoc(), 16709 diag::warn_type_safety_null_pointer_required) 16710 << ArgumentKind->getName() 16711 << ArgumentExpr->getSourceRange() 16712 << TypeTagExpr->getSourceRange(); 16713 } 16714 return; 16715 } 16716 16717 QualType RequiredType = TypeInfo.Type; 16718 if (IsPointerAttr) 16719 RequiredType = Context.getPointerType(RequiredType); 16720 16721 bool mismatch = false; 16722 if (!TypeInfo.LayoutCompatible) { 16723 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 16724 16725 // C++11 [basic.fundamental] p1: 16726 // Plain char, signed char, and unsigned char are three distinct types. 16727 // 16728 // But we treat plain `char' as equivalent to `signed char' or `unsigned 16729 // char' depending on the current char signedness mode. 16730 if (mismatch) 16731 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 16732 RequiredType->getPointeeType())) || 16733 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 16734 mismatch = false; 16735 } else 16736 if (IsPointerAttr) 16737 mismatch = !isLayoutCompatible(Context, 16738 ArgumentType->getPointeeType(), 16739 RequiredType->getPointeeType()); 16740 else 16741 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 16742 16743 if (mismatch) 16744 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 16745 << ArgumentType << ArgumentKind 16746 << TypeInfo.LayoutCompatible << RequiredType 16747 << ArgumentExpr->getSourceRange() 16748 << TypeTagExpr->getSourceRange(); 16749 } 16750 16751 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 16752 CharUnits Alignment) { 16753 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 16754 } 16755 16756 void Sema::DiagnoseMisalignedMembers() { 16757 for (MisalignedMember &m : MisalignedMembers) { 16758 const NamedDecl *ND = m.RD; 16759 if (ND->getName().empty()) { 16760 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 16761 ND = TD; 16762 } 16763 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 16764 << m.MD << ND << m.E->getSourceRange(); 16765 } 16766 MisalignedMembers.clear(); 16767 } 16768 16769 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 16770 E = E->IgnoreParens(); 16771 if (!T->isPointerType() && !T->isIntegerType()) 16772 return; 16773 if (isa<UnaryOperator>(E) && 16774 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 16775 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 16776 if (isa<MemberExpr>(Op)) { 16777 auto MA = llvm::find(MisalignedMembers, MisalignedMember(Op)); 16778 if (MA != MisalignedMembers.end() && 16779 (T->isIntegerType() || 16780 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 16781 Context.getTypeAlignInChars( 16782 T->getPointeeType()) <= MA->Alignment)))) 16783 MisalignedMembers.erase(MA); 16784 } 16785 } 16786 } 16787 16788 void Sema::RefersToMemberWithReducedAlignment( 16789 Expr *E, 16790 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 16791 Action) { 16792 const auto *ME = dyn_cast<MemberExpr>(E); 16793 if (!ME) 16794 return; 16795 16796 // No need to check expressions with an __unaligned-qualified type. 16797 if (E->getType().getQualifiers().hasUnaligned()) 16798 return; 16799 16800 // For a chain of MemberExpr like "a.b.c.d" this list 16801 // will keep FieldDecl's like [d, c, b]. 16802 SmallVector<FieldDecl *, 4> ReverseMemberChain; 16803 const MemberExpr *TopME = nullptr; 16804 bool AnyIsPacked = false; 16805 do { 16806 QualType BaseType = ME->getBase()->getType(); 16807 if (BaseType->isDependentType()) 16808 return; 16809 if (ME->isArrow()) 16810 BaseType = BaseType->getPointeeType(); 16811 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl(); 16812 if (RD->isInvalidDecl()) 16813 return; 16814 16815 ValueDecl *MD = ME->getMemberDecl(); 16816 auto *FD = dyn_cast<FieldDecl>(MD); 16817 // We do not care about non-data members. 16818 if (!FD || FD->isInvalidDecl()) 16819 return; 16820 16821 AnyIsPacked = 16822 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 16823 ReverseMemberChain.push_back(FD); 16824 16825 TopME = ME; 16826 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 16827 } while (ME); 16828 assert(TopME && "We did not compute a topmost MemberExpr!"); 16829 16830 // Not the scope of this diagnostic. 16831 if (!AnyIsPacked) 16832 return; 16833 16834 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 16835 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 16836 // TODO: The innermost base of the member expression may be too complicated. 16837 // For now, just disregard these cases. This is left for future 16838 // improvement. 16839 if (!DRE && !isa<CXXThisExpr>(TopBase)) 16840 return; 16841 16842 // Alignment expected by the whole expression. 16843 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 16844 16845 // No need to do anything else with this case. 16846 if (ExpectedAlignment.isOne()) 16847 return; 16848 16849 // Synthesize offset of the whole access. 16850 CharUnits Offset; 16851 for (const FieldDecl *FD : llvm::reverse(ReverseMemberChain)) 16852 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(FD)); 16853 16854 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 16855 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 16856 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 16857 16858 // The base expression of the innermost MemberExpr may give 16859 // stronger guarantees than the class containing the member. 16860 if (DRE && !TopME->isArrow()) { 16861 const ValueDecl *VD = DRE->getDecl(); 16862 if (!VD->getType()->isReferenceType()) 16863 CompleteObjectAlignment = 16864 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 16865 } 16866 16867 // Check if the synthesized offset fulfills the alignment. 16868 if (Offset % ExpectedAlignment != 0 || 16869 // It may fulfill the offset it but the effective alignment may still be 16870 // lower than the expected expression alignment. 16871 CompleteObjectAlignment < ExpectedAlignment) { 16872 // If this happens, we want to determine a sensible culprit of this. 16873 // Intuitively, watching the chain of member expressions from right to 16874 // left, we start with the required alignment (as required by the field 16875 // type) but some packed attribute in that chain has reduced the alignment. 16876 // It may happen that another packed structure increases it again. But if 16877 // we are here such increase has not been enough. So pointing the first 16878 // FieldDecl that either is packed or else its RecordDecl is, 16879 // seems reasonable. 16880 FieldDecl *FD = nullptr; 16881 CharUnits Alignment; 16882 for (FieldDecl *FDI : ReverseMemberChain) { 16883 if (FDI->hasAttr<PackedAttr>() || 16884 FDI->getParent()->hasAttr<PackedAttr>()) { 16885 FD = FDI; 16886 Alignment = std::min( 16887 Context.getTypeAlignInChars(FD->getType()), 16888 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 16889 break; 16890 } 16891 } 16892 assert(FD && "We did not find a packed FieldDecl!"); 16893 Action(E, FD->getParent(), FD, Alignment); 16894 } 16895 } 16896 16897 void Sema::CheckAddressOfPackedMember(Expr *rhs) { 16898 using namespace std::placeholders; 16899 16900 RefersToMemberWithReducedAlignment( 16901 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 16902 _2, _3, _4)); 16903 } 16904 16905 // Check if \p Ty is a valid type for the elementwise math builtins. If it is 16906 // not a valid type, emit an error message and return true. Otherwise return 16907 // false. 16908 static bool checkMathBuiltinElementType(Sema &S, SourceLocation Loc, 16909 QualType Ty) { 16910 if (!Ty->getAs<VectorType>() && !ConstantMatrixType::isValidElementType(Ty)) { 16911 S.Diag(Loc, diag::err_builtin_invalid_arg_type) 16912 << 1 << /* vector, integer or float ty*/ 0 << Ty; 16913 return true; 16914 } 16915 return false; 16916 } 16917 16918 bool Sema::PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall) { 16919 if (checkArgCount(*this, TheCall, 1)) 16920 return true; 16921 16922 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 16923 if (A.isInvalid()) 16924 return true; 16925 16926 TheCall->setArg(0, A.get()); 16927 QualType TyA = A.get()->getType(); 16928 16929 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 16930 return true; 16931 16932 TheCall->setType(TyA); 16933 return false; 16934 } 16935 16936 bool Sema::SemaBuiltinElementwiseMath(CallExpr *TheCall) { 16937 if (checkArgCount(*this, TheCall, 2)) 16938 return true; 16939 16940 ExprResult A = TheCall->getArg(0); 16941 ExprResult B = TheCall->getArg(1); 16942 // Do standard promotions between the two arguments, returning their common 16943 // type. 16944 QualType Res = 16945 UsualArithmeticConversions(A, B, TheCall->getExprLoc(), ACK_Comparison); 16946 if (A.isInvalid() || B.isInvalid()) 16947 return true; 16948 16949 QualType TyA = A.get()->getType(); 16950 QualType TyB = B.get()->getType(); 16951 16952 if (Res.isNull() || TyA.getCanonicalType() != TyB.getCanonicalType()) 16953 return Diag(A.get()->getBeginLoc(), 16954 diag::err_typecheck_call_different_arg_types) 16955 << TyA << TyB; 16956 16957 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 16958 return true; 16959 16960 TheCall->setArg(0, A.get()); 16961 TheCall->setArg(1, B.get()); 16962 TheCall->setType(Res); 16963 return false; 16964 } 16965 16966 bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall) { 16967 if (checkArgCount(*this, TheCall, 1)) 16968 return true; 16969 16970 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 16971 if (A.isInvalid()) 16972 return true; 16973 16974 TheCall->setArg(0, A.get()); 16975 return false; 16976 } 16977 16978 ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall, 16979 ExprResult CallResult) { 16980 if (checkArgCount(*this, TheCall, 1)) 16981 return ExprError(); 16982 16983 ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0)); 16984 if (MatrixArg.isInvalid()) 16985 return MatrixArg; 16986 Expr *Matrix = MatrixArg.get(); 16987 16988 auto *MType = Matrix->getType()->getAs<ConstantMatrixType>(); 16989 if (!MType) { 16990 Diag(Matrix->getBeginLoc(), diag::err_builtin_invalid_arg_type) 16991 << 1 << /* matrix ty*/ 1 << Matrix->getType(); 16992 return ExprError(); 16993 } 16994 16995 // Create returned matrix type by swapping rows and columns of the argument 16996 // matrix type. 16997 QualType ResultType = Context.getConstantMatrixType( 16998 MType->getElementType(), MType->getNumColumns(), MType->getNumRows()); 16999 17000 // Change the return type to the type of the returned matrix. 17001 TheCall->setType(ResultType); 17002 17003 // Update call argument to use the possibly converted matrix argument. 17004 TheCall->setArg(0, Matrix); 17005 return CallResult; 17006 } 17007 17008 // Get and verify the matrix dimensions. 17009 static llvm::Optional<unsigned> 17010 getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) { 17011 SourceLocation ErrorPos; 17012 Optional<llvm::APSInt> Value = 17013 Expr->getIntegerConstantExpr(S.Context, &ErrorPos); 17014 if (!Value) { 17015 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg) 17016 << Name; 17017 return {}; 17018 } 17019 uint64_t Dim = Value->getZExtValue(); 17020 if (!ConstantMatrixType::isDimensionValid(Dim)) { 17021 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension) 17022 << Name << ConstantMatrixType::getMaxElementsPerDimension(); 17023 return {}; 17024 } 17025 return Dim; 17026 } 17027 17028 ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, 17029 ExprResult CallResult) { 17030 if (!getLangOpts().MatrixTypes) { 17031 Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled); 17032 return ExprError(); 17033 } 17034 17035 if (checkArgCount(*this, TheCall, 4)) 17036 return ExprError(); 17037 17038 unsigned PtrArgIdx = 0; 17039 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 17040 Expr *RowsExpr = TheCall->getArg(1); 17041 Expr *ColumnsExpr = TheCall->getArg(2); 17042 Expr *StrideExpr = TheCall->getArg(3); 17043 17044 bool ArgError = false; 17045 17046 // Check pointer argument. 17047 { 17048 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 17049 if (PtrConv.isInvalid()) 17050 return PtrConv; 17051 PtrExpr = PtrConv.get(); 17052 TheCall->setArg(0, PtrExpr); 17053 if (PtrExpr->isTypeDependent()) { 17054 TheCall->setType(Context.DependentTy); 17055 return TheCall; 17056 } 17057 } 17058 17059 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 17060 QualType ElementTy; 17061 if (!PtrTy) { 17062 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17063 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 17064 ArgError = true; 17065 } else { 17066 ElementTy = PtrTy->getPointeeType().getUnqualifiedType(); 17067 17068 if (!ConstantMatrixType::isValidElementType(ElementTy)) { 17069 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17070 << PtrArgIdx + 1 << /* pointer to element ty*/ 2 17071 << PtrExpr->getType(); 17072 ArgError = true; 17073 } 17074 } 17075 17076 // Apply default Lvalue conversions and convert the expression to size_t. 17077 auto ApplyArgumentConversions = [this](Expr *E) { 17078 ExprResult Conv = DefaultLvalueConversion(E); 17079 if (Conv.isInvalid()) 17080 return Conv; 17081 17082 return tryConvertExprToType(Conv.get(), Context.getSizeType()); 17083 }; 17084 17085 // Apply conversion to row and column expressions. 17086 ExprResult RowsConv = ApplyArgumentConversions(RowsExpr); 17087 if (!RowsConv.isInvalid()) { 17088 RowsExpr = RowsConv.get(); 17089 TheCall->setArg(1, RowsExpr); 17090 } else 17091 RowsExpr = nullptr; 17092 17093 ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr); 17094 if (!ColumnsConv.isInvalid()) { 17095 ColumnsExpr = ColumnsConv.get(); 17096 TheCall->setArg(2, ColumnsExpr); 17097 } else 17098 ColumnsExpr = nullptr; 17099 17100 // If any any part of the result matrix type is still pending, just use 17101 // Context.DependentTy, until all parts are resolved. 17102 if ((RowsExpr && RowsExpr->isTypeDependent()) || 17103 (ColumnsExpr && ColumnsExpr->isTypeDependent())) { 17104 TheCall->setType(Context.DependentTy); 17105 return CallResult; 17106 } 17107 17108 // Check row and column dimensions. 17109 llvm::Optional<unsigned> MaybeRows; 17110 if (RowsExpr) 17111 MaybeRows = getAndVerifyMatrixDimension(RowsExpr, "row", *this); 17112 17113 llvm::Optional<unsigned> MaybeColumns; 17114 if (ColumnsExpr) 17115 MaybeColumns = getAndVerifyMatrixDimension(ColumnsExpr, "column", *this); 17116 17117 // Check stride argument. 17118 ExprResult StrideConv = ApplyArgumentConversions(StrideExpr); 17119 if (StrideConv.isInvalid()) 17120 return ExprError(); 17121 StrideExpr = StrideConv.get(); 17122 TheCall->setArg(3, StrideExpr); 17123 17124 if (MaybeRows) { 17125 if (Optional<llvm::APSInt> Value = 17126 StrideExpr->getIntegerConstantExpr(Context)) { 17127 uint64_t Stride = Value->getZExtValue(); 17128 if (Stride < *MaybeRows) { 17129 Diag(StrideExpr->getBeginLoc(), 17130 diag::err_builtin_matrix_stride_too_small); 17131 ArgError = true; 17132 } 17133 } 17134 } 17135 17136 if (ArgError || !MaybeRows || !MaybeColumns) 17137 return ExprError(); 17138 17139 TheCall->setType( 17140 Context.getConstantMatrixType(ElementTy, *MaybeRows, *MaybeColumns)); 17141 return CallResult; 17142 } 17143 17144 ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, 17145 ExprResult CallResult) { 17146 if (checkArgCount(*this, TheCall, 3)) 17147 return ExprError(); 17148 17149 unsigned PtrArgIdx = 1; 17150 Expr *MatrixExpr = TheCall->getArg(0); 17151 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 17152 Expr *StrideExpr = TheCall->getArg(2); 17153 17154 bool ArgError = false; 17155 17156 { 17157 ExprResult MatrixConv = DefaultLvalueConversion(MatrixExpr); 17158 if (MatrixConv.isInvalid()) 17159 return MatrixConv; 17160 MatrixExpr = MatrixConv.get(); 17161 TheCall->setArg(0, MatrixExpr); 17162 } 17163 if (MatrixExpr->isTypeDependent()) { 17164 TheCall->setType(Context.DependentTy); 17165 return TheCall; 17166 } 17167 17168 auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>(); 17169 if (!MatrixTy) { 17170 Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17171 << 1 << /*matrix ty */ 1 << MatrixExpr->getType(); 17172 ArgError = true; 17173 } 17174 17175 { 17176 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 17177 if (PtrConv.isInvalid()) 17178 return PtrConv; 17179 PtrExpr = PtrConv.get(); 17180 TheCall->setArg(1, PtrExpr); 17181 if (PtrExpr->isTypeDependent()) { 17182 TheCall->setType(Context.DependentTy); 17183 return TheCall; 17184 } 17185 } 17186 17187 // Check pointer argument. 17188 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 17189 if (!PtrTy) { 17190 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17191 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 17192 ArgError = true; 17193 } else { 17194 QualType ElementTy = PtrTy->getPointeeType(); 17195 if (ElementTy.isConstQualified()) { 17196 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const); 17197 ArgError = true; 17198 } 17199 ElementTy = ElementTy.getUnqualifiedType().getCanonicalType(); 17200 if (MatrixTy && 17201 !Context.hasSameType(ElementTy, MatrixTy->getElementType())) { 17202 Diag(PtrExpr->getBeginLoc(), 17203 diag::err_builtin_matrix_pointer_arg_mismatch) 17204 << ElementTy << MatrixTy->getElementType(); 17205 ArgError = true; 17206 } 17207 } 17208 17209 // Apply default Lvalue conversions and convert the stride expression to 17210 // size_t. 17211 { 17212 ExprResult StrideConv = DefaultLvalueConversion(StrideExpr); 17213 if (StrideConv.isInvalid()) 17214 return StrideConv; 17215 17216 StrideConv = tryConvertExprToType(StrideConv.get(), Context.getSizeType()); 17217 if (StrideConv.isInvalid()) 17218 return StrideConv; 17219 StrideExpr = StrideConv.get(); 17220 TheCall->setArg(2, StrideExpr); 17221 } 17222 17223 // Check stride argument. 17224 if (MatrixTy) { 17225 if (Optional<llvm::APSInt> Value = 17226 StrideExpr->getIntegerConstantExpr(Context)) { 17227 uint64_t Stride = Value->getZExtValue(); 17228 if (Stride < MatrixTy->getNumRows()) { 17229 Diag(StrideExpr->getBeginLoc(), 17230 diag::err_builtin_matrix_stride_too_small); 17231 ArgError = true; 17232 } 17233 } 17234 } 17235 17236 if (ArgError) 17237 return ExprError(); 17238 17239 return CallResult; 17240 } 17241 17242 /// \brief Enforce the bounds of a TCB 17243 /// CheckTCBEnforcement - Enforces that every function in a named TCB only 17244 /// directly calls other functions in the same TCB as marked by the enforce_tcb 17245 /// and enforce_tcb_leaf attributes. 17246 void Sema::CheckTCBEnforcement(const CallExpr *TheCall, 17247 const FunctionDecl *Callee) { 17248 const FunctionDecl *Caller = getCurFunctionDecl(); 17249 17250 // Calls to builtins are not enforced. 17251 if (!Caller || !Caller->hasAttr<EnforceTCBAttr>() || 17252 Callee->getBuiltinID() != 0) 17253 return; 17254 17255 // Search through the enforce_tcb and enforce_tcb_leaf attributes to find 17256 // all TCBs the callee is a part of. 17257 llvm::StringSet<> CalleeTCBs; 17258 for_each(Callee->specific_attrs<EnforceTCBAttr>(), 17259 [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); }); 17260 for_each(Callee->specific_attrs<EnforceTCBLeafAttr>(), 17261 [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); }); 17262 17263 // Go through the TCBs the caller is a part of and emit warnings if Caller 17264 // is in a TCB that the Callee is not. 17265 for_each( 17266 Caller->specific_attrs<EnforceTCBAttr>(), 17267 [&](const auto *A) { 17268 StringRef CallerTCB = A->getTCBName(); 17269 if (CalleeTCBs.count(CallerTCB) == 0) { 17270 this->Diag(TheCall->getExprLoc(), 17271 diag::warn_tcb_enforcement_violation) << Callee 17272 << CallerTCB; 17273 } 17274 }); 17275 } 17276