1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements extra semantic analysis beyond what is enforced 10 // by the C type system. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/APValue.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/AttrIterator.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclBase.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/DeclarationName.h" 24 #include "clang/AST/EvaluatedExprVisitor.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/ExprCXX.h" 27 #include "clang/AST/ExprObjC.h" 28 #include "clang/AST/ExprOpenMP.h" 29 #include "clang/AST/FormatString.h" 30 #include "clang/AST/NSAPI.h" 31 #include "clang/AST/NonTrivialTypeVisitor.h" 32 #include "clang/AST/OperationKinds.h" 33 #include "clang/AST/RecordLayout.h" 34 #include "clang/AST/Stmt.h" 35 #include "clang/AST/TemplateBase.h" 36 #include "clang/AST/Type.h" 37 #include "clang/AST/TypeLoc.h" 38 #include "clang/AST/UnresolvedSet.h" 39 #include "clang/Basic/AddressSpaces.h" 40 #include "clang/Basic/CharInfo.h" 41 #include "clang/Basic/Diagnostic.h" 42 #include "clang/Basic/IdentifierTable.h" 43 #include "clang/Basic/LLVM.h" 44 #include "clang/Basic/LangOptions.h" 45 #include "clang/Basic/OpenCLOptions.h" 46 #include "clang/Basic/OperatorKinds.h" 47 #include "clang/Basic/PartialDiagnostic.h" 48 #include "clang/Basic/SourceLocation.h" 49 #include "clang/Basic/SourceManager.h" 50 #include "clang/Basic/Specifiers.h" 51 #include "clang/Basic/SyncScope.h" 52 #include "clang/Basic/TargetBuiltins.h" 53 #include "clang/Basic/TargetCXXABI.h" 54 #include "clang/Basic/TargetInfo.h" 55 #include "clang/Basic/TypeTraits.h" 56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 57 #include "clang/Sema/Initialization.h" 58 #include "clang/Sema/Lookup.h" 59 #include "clang/Sema/Ownership.h" 60 #include "clang/Sema/Scope.h" 61 #include "clang/Sema/ScopeInfo.h" 62 #include "clang/Sema/Sema.h" 63 #include "clang/Sema/SemaInternal.h" 64 #include "llvm/ADT/APFloat.h" 65 #include "llvm/ADT/APInt.h" 66 #include "llvm/ADT/APSInt.h" 67 #include "llvm/ADT/ArrayRef.h" 68 #include "llvm/ADT/DenseMap.h" 69 #include "llvm/ADT/FoldingSet.h" 70 #include "llvm/ADT/None.h" 71 #include "llvm/ADT/Optional.h" 72 #include "llvm/ADT/STLExtras.h" 73 #include "llvm/ADT/SmallBitVector.h" 74 #include "llvm/ADT/SmallPtrSet.h" 75 #include "llvm/ADT/SmallString.h" 76 #include "llvm/ADT/SmallVector.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/ADT/StringSet.h" 79 #include "llvm/ADT/StringSwitch.h" 80 #include "llvm/ADT/Triple.h" 81 #include "llvm/Support/AtomicOrdering.h" 82 #include "llvm/Support/Casting.h" 83 #include "llvm/Support/Compiler.h" 84 #include "llvm/Support/ConvertUTF.h" 85 #include "llvm/Support/ErrorHandling.h" 86 #include "llvm/Support/Format.h" 87 #include "llvm/Support/Locale.h" 88 #include "llvm/Support/MathExtras.h" 89 #include "llvm/Support/SaveAndRestore.h" 90 #include "llvm/Support/raw_ostream.h" 91 #include <algorithm> 92 #include <bitset> 93 #include <cassert> 94 #include <cctype> 95 #include <cstddef> 96 #include <cstdint> 97 #include <functional> 98 #include <limits> 99 #include <string> 100 #include <tuple> 101 #include <utility> 102 103 using namespace clang; 104 using namespace sema; 105 106 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 107 unsigned ByteNo) const { 108 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 109 Context.getTargetInfo()); 110 } 111 112 /// Checks that a call expression's argument count is the desired number. 113 /// This is useful when doing custom type-checking. Returns true on error. 114 static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) { 115 unsigned argCount = call->getNumArgs(); 116 if (argCount == desiredArgCount) return false; 117 118 if (argCount < desiredArgCount) 119 return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args) 120 << 0 /*function call*/ << desiredArgCount << argCount 121 << call->getSourceRange(); 122 123 // Highlight all the excess arguments. 124 SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(), 125 call->getArg(argCount - 1)->getEndLoc()); 126 127 return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args) 128 << 0 /*function call*/ << desiredArgCount << argCount 129 << call->getArg(1)->getSourceRange(); 130 } 131 132 /// Check that the first argument to __builtin_annotation is an integer 133 /// and the second argument is a non-wide string literal. 134 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 135 if (checkArgCount(S, TheCall, 2)) 136 return true; 137 138 // First argument should be an integer. 139 Expr *ValArg = TheCall->getArg(0); 140 QualType Ty = ValArg->getType(); 141 if (!Ty->isIntegerType()) { 142 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 143 << ValArg->getSourceRange(); 144 return true; 145 } 146 147 // Second argument should be a constant string. 148 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 149 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 150 if (!Literal || !Literal->isAscii()) { 151 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 152 << StrArg->getSourceRange(); 153 return true; 154 } 155 156 TheCall->setType(Ty); 157 return false; 158 } 159 160 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 161 // We need at least one argument. 162 if (TheCall->getNumArgs() < 1) { 163 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 164 << 0 << 1 << TheCall->getNumArgs() 165 << TheCall->getCallee()->getSourceRange(); 166 return true; 167 } 168 169 // All arguments should be wide string literals. 170 for (Expr *Arg : TheCall->arguments()) { 171 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 172 if (!Literal || !Literal->isWide()) { 173 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 174 << Arg->getSourceRange(); 175 return true; 176 } 177 } 178 179 return false; 180 } 181 182 /// Check that the argument to __builtin_addressof is a glvalue, and set the 183 /// result type to the corresponding pointer type. 184 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 185 if (checkArgCount(S, TheCall, 1)) 186 return true; 187 188 ExprResult Arg(TheCall->getArg(0)); 189 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 190 if (ResultType.isNull()) 191 return true; 192 193 TheCall->setArg(0, Arg.get()); 194 TheCall->setType(ResultType); 195 return false; 196 } 197 198 /// Check that the argument to __builtin_function_start is a function. 199 static bool SemaBuiltinFunctionStart(Sema &S, CallExpr *TheCall) { 200 if (checkArgCount(S, TheCall, 1)) 201 return true; 202 203 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 204 if (Arg.isInvalid()) 205 return true; 206 207 TheCall->setArg(0, Arg.get()); 208 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>( 209 Arg.get()->getAsBuiltinConstantDeclRef(S.getASTContext())); 210 211 if (!FD) { 212 S.Diag(TheCall->getBeginLoc(), diag::err_function_start_invalid_type) 213 << TheCall->getSourceRange(); 214 return true; 215 } 216 217 return !S.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true, 218 TheCall->getBeginLoc()); 219 } 220 221 /// Check the number of arguments and set the result type to 222 /// the argument type. 223 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) { 224 if (checkArgCount(S, TheCall, 1)) 225 return true; 226 227 TheCall->setType(TheCall->getArg(0)->getType()); 228 return false; 229 } 230 231 /// Check that the value argument for __builtin_is_aligned(value, alignment) and 232 /// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer 233 /// type (but not a function pointer) and that the alignment is a power-of-two. 234 static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) { 235 if (checkArgCount(S, TheCall, 2)) 236 return true; 237 238 clang::Expr *Source = TheCall->getArg(0); 239 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned; 240 241 auto IsValidIntegerType = [](QualType Ty) { 242 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType(); 243 }; 244 QualType SrcTy = Source->getType(); 245 // We should also be able to use it with arrays (but not functions!). 246 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) { 247 SrcTy = S.Context.getDecayedType(SrcTy); 248 } 249 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) || 250 SrcTy->isFunctionPointerType()) { 251 // FIXME: this is not quite the right error message since we don't allow 252 // floating point types, or member pointers. 253 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand) 254 << SrcTy; 255 return true; 256 } 257 258 clang::Expr *AlignOp = TheCall->getArg(1); 259 if (!IsValidIntegerType(AlignOp->getType())) { 260 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int) 261 << AlignOp->getType(); 262 return true; 263 } 264 Expr::EvalResult AlignResult; 265 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1; 266 // We can't check validity of alignment if it is value dependent. 267 if (!AlignOp->isValueDependent() && 268 AlignOp->EvaluateAsInt(AlignResult, S.Context, 269 Expr::SE_AllowSideEffects)) { 270 llvm::APSInt AlignValue = AlignResult.Val.getInt(); 271 llvm::APSInt MaxValue( 272 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits)); 273 if (AlignValue < 1) { 274 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1; 275 return true; 276 } 277 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) { 278 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big) 279 << toString(MaxValue, 10); 280 return true; 281 } 282 if (!AlignValue.isPowerOf2()) { 283 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two); 284 return true; 285 } 286 if (AlignValue == 1) { 287 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless) 288 << IsBooleanAlignBuiltin; 289 } 290 } 291 292 ExprResult SrcArg = S.PerformCopyInitialization( 293 InitializedEntity::InitializeParameter(S.Context, SrcTy, false), 294 SourceLocation(), Source); 295 if (SrcArg.isInvalid()) 296 return true; 297 TheCall->setArg(0, SrcArg.get()); 298 ExprResult AlignArg = 299 S.PerformCopyInitialization(InitializedEntity::InitializeParameter( 300 S.Context, AlignOp->getType(), false), 301 SourceLocation(), AlignOp); 302 if (AlignArg.isInvalid()) 303 return true; 304 TheCall->setArg(1, AlignArg.get()); 305 // For align_up/align_down, the return type is the same as the (potentially 306 // decayed) argument type including qualifiers. For is_aligned(), the result 307 // is always bool. 308 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy); 309 return false; 310 } 311 312 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall, 313 unsigned BuiltinID) { 314 if (checkArgCount(S, TheCall, 3)) 315 return true; 316 317 // First two arguments should be integers. 318 for (unsigned I = 0; I < 2; ++I) { 319 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I)); 320 if (Arg.isInvalid()) return true; 321 TheCall->setArg(I, Arg.get()); 322 323 QualType Ty = Arg.get()->getType(); 324 if (!Ty->isIntegerType()) { 325 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 326 << Ty << Arg.get()->getSourceRange(); 327 return true; 328 } 329 } 330 331 // Third argument should be a pointer to a non-const integer. 332 // IRGen correctly handles volatile, restrict, and address spaces, and 333 // the other qualifiers aren't possible. 334 { 335 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2)); 336 if (Arg.isInvalid()) return true; 337 TheCall->setArg(2, Arg.get()); 338 339 QualType Ty = Arg.get()->getType(); 340 const auto *PtrTy = Ty->getAs<PointerType>(); 341 if (!PtrTy || 342 !PtrTy->getPointeeType()->isIntegerType() || 343 PtrTy->getPointeeType().isConstQualified()) { 344 S.Diag(Arg.get()->getBeginLoc(), 345 diag::err_overflow_builtin_must_be_ptr_int) 346 << Ty << Arg.get()->getSourceRange(); 347 return true; 348 } 349 } 350 351 // Disallow signed bit-precise integer args larger than 128 bits to mul 352 // function until we improve backend support. 353 if (BuiltinID == Builtin::BI__builtin_mul_overflow) { 354 for (unsigned I = 0; I < 3; ++I) { 355 const auto Arg = TheCall->getArg(I); 356 // Third argument will be a pointer. 357 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType(); 358 if (Ty->isBitIntType() && Ty->isSignedIntegerType() && 359 S.getASTContext().getIntWidth(Ty) > 128) 360 return S.Diag(Arg->getBeginLoc(), 361 diag::err_overflow_builtin_bit_int_max_size) 362 << 128; 363 } 364 } 365 366 return false; 367 } 368 369 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 370 if (checkArgCount(S, BuiltinCall, 2)) 371 return true; 372 373 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 374 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 375 Expr *Call = BuiltinCall->getArg(0); 376 Expr *Chain = BuiltinCall->getArg(1); 377 378 if (Call->getStmtClass() != Stmt::CallExprClass) { 379 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 380 << Call->getSourceRange(); 381 return true; 382 } 383 384 auto CE = cast<CallExpr>(Call); 385 if (CE->getCallee()->getType()->isBlockPointerType()) { 386 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 387 << Call->getSourceRange(); 388 return true; 389 } 390 391 const Decl *TargetDecl = CE->getCalleeDecl(); 392 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 393 if (FD->getBuiltinID()) { 394 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 395 << Call->getSourceRange(); 396 return true; 397 } 398 399 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 400 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 401 << Call->getSourceRange(); 402 return true; 403 } 404 405 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 406 if (ChainResult.isInvalid()) 407 return true; 408 if (!ChainResult.get()->getType()->isPointerType()) { 409 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 410 << Chain->getSourceRange(); 411 return true; 412 } 413 414 QualType ReturnTy = CE->getCallReturnType(S.Context); 415 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 416 QualType BuiltinTy = S.Context.getFunctionType( 417 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 418 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 419 420 Builtin = 421 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 422 423 BuiltinCall->setType(CE->getType()); 424 BuiltinCall->setValueKind(CE->getValueKind()); 425 BuiltinCall->setObjectKind(CE->getObjectKind()); 426 BuiltinCall->setCallee(Builtin); 427 BuiltinCall->setArg(1, ChainResult.get()); 428 429 return false; 430 } 431 432 namespace { 433 434 class ScanfDiagnosticFormatHandler 435 : public analyze_format_string::FormatStringHandler { 436 // Accepts the argument index (relative to the first destination index) of the 437 // argument whose size we want. 438 using ComputeSizeFunction = 439 llvm::function_ref<Optional<llvm::APSInt>(unsigned)>; 440 441 // Accepts the argument index (relative to the first destination index), the 442 // destination size, and the source size). 443 using DiagnoseFunction = 444 llvm::function_ref<void(unsigned, unsigned, unsigned)>; 445 446 ComputeSizeFunction ComputeSizeArgument; 447 DiagnoseFunction Diagnose; 448 449 public: 450 ScanfDiagnosticFormatHandler(ComputeSizeFunction ComputeSizeArgument, 451 DiagnoseFunction Diagnose) 452 : ComputeSizeArgument(ComputeSizeArgument), Diagnose(Diagnose) {} 453 454 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 455 const char *StartSpecifier, 456 unsigned specifierLen) override { 457 if (!FS.consumesDataArgument()) 458 return true; 459 460 unsigned NulByte = 0; 461 switch ((FS.getConversionSpecifier().getKind())) { 462 default: 463 return true; 464 case analyze_format_string::ConversionSpecifier::sArg: 465 case analyze_format_string::ConversionSpecifier::ScanListArg: 466 NulByte = 1; 467 break; 468 case analyze_format_string::ConversionSpecifier::cArg: 469 break; 470 } 471 472 analyze_format_string::OptionalAmount FW = FS.getFieldWidth(); 473 if (FW.getHowSpecified() != 474 analyze_format_string::OptionalAmount::HowSpecified::Constant) 475 return true; 476 477 unsigned SourceSize = FW.getConstantAmount() + NulByte; 478 479 Optional<llvm::APSInt> DestSizeAPS = ComputeSizeArgument(FS.getArgIndex()); 480 if (!DestSizeAPS) 481 return true; 482 483 unsigned DestSize = DestSizeAPS->getZExtValue(); 484 485 if (DestSize < SourceSize) 486 Diagnose(FS.getArgIndex(), DestSize, SourceSize); 487 488 return true; 489 } 490 }; 491 492 class EstimateSizeFormatHandler 493 : public analyze_format_string::FormatStringHandler { 494 size_t Size; 495 496 public: 497 EstimateSizeFormatHandler(StringRef Format) 498 : Size(std::min(Format.find(0), Format.size()) + 499 1 /* null byte always written by sprintf */) {} 500 501 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 502 const char *, unsigned SpecifierLen, 503 const TargetInfo &) override { 504 505 const size_t FieldWidth = computeFieldWidth(FS); 506 const size_t Precision = computePrecision(FS); 507 508 // The actual format. 509 switch (FS.getConversionSpecifier().getKind()) { 510 // Just a char. 511 case analyze_format_string::ConversionSpecifier::cArg: 512 case analyze_format_string::ConversionSpecifier::CArg: 513 Size += std::max(FieldWidth, (size_t)1); 514 break; 515 // Just an integer. 516 case analyze_format_string::ConversionSpecifier::dArg: 517 case analyze_format_string::ConversionSpecifier::DArg: 518 case analyze_format_string::ConversionSpecifier::iArg: 519 case analyze_format_string::ConversionSpecifier::oArg: 520 case analyze_format_string::ConversionSpecifier::OArg: 521 case analyze_format_string::ConversionSpecifier::uArg: 522 case analyze_format_string::ConversionSpecifier::UArg: 523 case analyze_format_string::ConversionSpecifier::xArg: 524 case analyze_format_string::ConversionSpecifier::XArg: 525 Size += std::max(FieldWidth, Precision); 526 break; 527 528 // %g style conversion switches between %f or %e style dynamically. 529 // %f always takes less space, so default to it. 530 case analyze_format_string::ConversionSpecifier::gArg: 531 case analyze_format_string::ConversionSpecifier::GArg: 532 533 // Floating point number in the form '[+]ddd.ddd'. 534 case analyze_format_string::ConversionSpecifier::fArg: 535 case analyze_format_string::ConversionSpecifier::FArg: 536 Size += std::max(FieldWidth, 1 /* integer part */ + 537 (Precision ? 1 + Precision 538 : 0) /* period + decimal */); 539 break; 540 541 // Floating point number in the form '[-]d.ddde[+-]dd'. 542 case analyze_format_string::ConversionSpecifier::eArg: 543 case analyze_format_string::ConversionSpecifier::EArg: 544 Size += 545 std::max(FieldWidth, 546 1 /* integer part */ + 547 (Precision ? 1 + Precision : 0) /* period + decimal */ + 548 1 /* e or E letter */ + 2 /* exponent */); 549 break; 550 551 // Floating point number in the form '[-]0xh.hhhhp±dd'. 552 case analyze_format_string::ConversionSpecifier::aArg: 553 case analyze_format_string::ConversionSpecifier::AArg: 554 Size += 555 std::max(FieldWidth, 556 2 /* 0x */ + 1 /* integer part */ + 557 (Precision ? 1 + Precision : 0) /* period + decimal */ + 558 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */); 559 break; 560 561 // Just a string. 562 case analyze_format_string::ConversionSpecifier::sArg: 563 case analyze_format_string::ConversionSpecifier::SArg: 564 Size += FieldWidth; 565 break; 566 567 // Just a pointer in the form '0xddd'. 568 case analyze_format_string::ConversionSpecifier::pArg: 569 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision); 570 break; 571 572 // A plain percent. 573 case analyze_format_string::ConversionSpecifier::PercentArg: 574 Size += 1; 575 break; 576 577 default: 578 break; 579 } 580 581 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix(); 582 583 if (FS.hasAlternativeForm()) { 584 switch (FS.getConversionSpecifier().getKind()) { 585 default: 586 break; 587 // Force a leading '0'. 588 case analyze_format_string::ConversionSpecifier::oArg: 589 Size += 1; 590 break; 591 // Force a leading '0x'. 592 case analyze_format_string::ConversionSpecifier::xArg: 593 case analyze_format_string::ConversionSpecifier::XArg: 594 Size += 2; 595 break; 596 // Force a period '.' before decimal, even if precision is 0. 597 case analyze_format_string::ConversionSpecifier::aArg: 598 case analyze_format_string::ConversionSpecifier::AArg: 599 case analyze_format_string::ConversionSpecifier::eArg: 600 case analyze_format_string::ConversionSpecifier::EArg: 601 case analyze_format_string::ConversionSpecifier::fArg: 602 case analyze_format_string::ConversionSpecifier::FArg: 603 case analyze_format_string::ConversionSpecifier::gArg: 604 case analyze_format_string::ConversionSpecifier::GArg: 605 Size += (Precision ? 0 : 1); 606 break; 607 } 608 } 609 assert(SpecifierLen <= Size && "no underflow"); 610 Size -= SpecifierLen; 611 return true; 612 } 613 614 size_t getSizeLowerBound() const { return Size; } 615 616 private: 617 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) { 618 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth(); 619 size_t FieldWidth = 0; 620 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant) 621 FieldWidth = FW.getConstantAmount(); 622 return FieldWidth; 623 } 624 625 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) { 626 const analyze_format_string::OptionalAmount &FW = FS.getPrecision(); 627 size_t Precision = 0; 628 629 // See man 3 printf for default precision value based on the specifier. 630 switch (FW.getHowSpecified()) { 631 case analyze_format_string::OptionalAmount::NotSpecified: 632 switch (FS.getConversionSpecifier().getKind()) { 633 default: 634 break; 635 case analyze_format_string::ConversionSpecifier::dArg: // %d 636 case analyze_format_string::ConversionSpecifier::DArg: // %D 637 case analyze_format_string::ConversionSpecifier::iArg: // %i 638 Precision = 1; 639 break; 640 case analyze_format_string::ConversionSpecifier::oArg: // %d 641 case analyze_format_string::ConversionSpecifier::OArg: // %D 642 case analyze_format_string::ConversionSpecifier::uArg: // %d 643 case analyze_format_string::ConversionSpecifier::UArg: // %D 644 case analyze_format_string::ConversionSpecifier::xArg: // %d 645 case analyze_format_string::ConversionSpecifier::XArg: // %D 646 Precision = 1; 647 break; 648 case analyze_format_string::ConversionSpecifier::fArg: // %f 649 case analyze_format_string::ConversionSpecifier::FArg: // %F 650 case analyze_format_string::ConversionSpecifier::eArg: // %e 651 case analyze_format_string::ConversionSpecifier::EArg: // %E 652 case analyze_format_string::ConversionSpecifier::gArg: // %g 653 case analyze_format_string::ConversionSpecifier::GArg: // %G 654 Precision = 6; 655 break; 656 case analyze_format_string::ConversionSpecifier::pArg: // %d 657 Precision = 1; 658 break; 659 } 660 break; 661 case analyze_format_string::OptionalAmount::Constant: 662 Precision = FW.getConstantAmount(); 663 break; 664 default: 665 break; 666 } 667 return Precision; 668 } 669 }; 670 671 } // namespace 672 673 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, 674 CallExpr *TheCall) { 675 if (TheCall->isValueDependent() || TheCall->isTypeDependent() || 676 isConstantEvaluated()) 677 return; 678 679 bool UseDABAttr = false; 680 const FunctionDecl *UseDecl = FD; 681 682 const auto *DABAttr = FD->getAttr<DiagnoseAsBuiltinAttr>(); 683 if (DABAttr) { 684 UseDecl = DABAttr->getFunction(); 685 assert(UseDecl && "Missing FunctionDecl in DiagnoseAsBuiltin attribute!"); 686 UseDABAttr = true; 687 } 688 689 unsigned BuiltinID = UseDecl->getBuiltinID(/*ConsiderWrappers=*/true); 690 691 if (!BuiltinID) 692 return; 693 694 const TargetInfo &TI = getASTContext().getTargetInfo(); 695 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType()); 696 697 auto TranslateIndex = [&](unsigned Index) -> Optional<unsigned> { 698 // If we refer to a diagnose_as_builtin attribute, we need to change the 699 // argument index to refer to the arguments of the called function. Unless 700 // the index is out of bounds, which presumably means it's a variadic 701 // function. 702 if (!UseDABAttr) 703 return Index; 704 unsigned DABIndices = DABAttr->argIndices_size(); 705 unsigned NewIndex = Index < DABIndices 706 ? DABAttr->argIndices_begin()[Index] 707 : Index - DABIndices + FD->getNumParams(); 708 if (NewIndex >= TheCall->getNumArgs()) 709 return llvm::None; 710 return NewIndex; 711 }; 712 713 auto ComputeExplicitObjectSizeArgument = 714 [&](unsigned Index) -> Optional<llvm::APSInt> { 715 Optional<unsigned> IndexOptional = TranslateIndex(Index); 716 if (!IndexOptional) 717 return llvm::None; 718 unsigned NewIndex = IndexOptional.getValue(); 719 Expr::EvalResult Result; 720 Expr *SizeArg = TheCall->getArg(NewIndex); 721 if (!SizeArg->EvaluateAsInt(Result, getASTContext())) 722 return llvm::None; 723 llvm::APSInt Integer = Result.Val.getInt(); 724 Integer.setIsUnsigned(true); 725 return Integer; 726 }; 727 728 auto ComputeSizeArgument = [&](unsigned Index) -> Optional<llvm::APSInt> { 729 // If the parameter has a pass_object_size attribute, then we should use its 730 // (potentially) more strict checking mode. Otherwise, conservatively assume 731 // type 0. 732 int BOSType = 0; 733 // This check can fail for variadic functions. 734 if (Index < FD->getNumParams()) { 735 if (const auto *POS = 736 FD->getParamDecl(Index)->getAttr<PassObjectSizeAttr>()) 737 BOSType = POS->getType(); 738 } 739 740 Optional<unsigned> IndexOptional = TranslateIndex(Index); 741 if (!IndexOptional) 742 return llvm::None; 743 unsigned NewIndex = IndexOptional.getValue(); 744 745 const Expr *ObjArg = TheCall->getArg(NewIndex); 746 uint64_t Result; 747 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType)) 748 return llvm::None; 749 750 // Get the object size in the target's size_t width. 751 return llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth); 752 }; 753 754 auto ComputeStrLenArgument = [&](unsigned Index) -> Optional<llvm::APSInt> { 755 Optional<unsigned> IndexOptional = TranslateIndex(Index); 756 if (!IndexOptional) 757 return llvm::None; 758 unsigned NewIndex = IndexOptional.getValue(); 759 760 const Expr *ObjArg = TheCall->getArg(NewIndex); 761 uint64_t Result; 762 if (!ObjArg->tryEvaluateStrLen(Result, getASTContext())) 763 return llvm::None; 764 // Add 1 for null byte. 765 return llvm::APSInt::getUnsigned(Result + 1).extOrTrunc(SizeTypeWidth); 766 }; 767 768 Optional<llvm::APSInt> SourceSize; 769 Optional<llvm::APSInt> DestinationSize; 770 unsigned DiagID = 0; 771 bool IsChkVariant = false; 772 773 auto GetFunctionName = [&]() { 774 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID); 775 // Skim off the details of whichever builtin was called to produce a better 776 // diagnostic, as it's unlikely that the user wrote the __builtin 777 // explicitly. 778 if (IsChkVariant) { 779 FunctionName = FunctionName.drop_front(std::strlen("__builtin___")); 780 FunctionName = FunctionName.drop_back(std::strlen("_chk")); 781 } else if (FunctionName.startswith("__builtin_")) { 782 FunctionName = FunctionName.drop_front(std::strlen("__builtin_")); 783 } 784 return FunctionName; 785 }; 786 787 switch (BuiltinID) { 788 default: 789 return; 790 case Builtin::BI__builtin_strcpy: 791 case Builtin::BIstrcpy: { 792 DiagID = diag::warn_fortify_strlen_overflow; 793 SourceSize = ComputeStrLenArgument(1); 794 DestinationSize = ComputeSizeArgument(0); 795 break; 796 } 797 798 case Builtin::BI__builtin___strcpy_chk: { 799 DiagID = diag::warn_fortify_strlen_overflow; 800 SourceSize = ComputeStrLenArgument(1); 801 DestinationSize = ComputeExplicitObjectSizeArgument(2); 802 IsChkVariant = true; 803 break; 804 } 805 806 case Builtin::BIscanf: 807 case Builtin::BIfscanf: 808 case Builtin::BIsscanf: { 809 unsigned FormatIndex = 1; 810 unsigned DataIndex = 2; 811 if (BuiltinID == Builtin::BIscanf) { 812 FormatIndex = 0; 813 DataIndex = 1; 814 } 815 816 const auto *FormatExpr = 817 TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 818 819 const auto *Format = dyn_cast<StringLiteral>(FormatExpr); 820 if (!Format) 821 return; 822 823 if (!Format->isAscii() && !Format->isUTF8()) 824 return; 825 826 auto Diagnose = [&](unsigned ArgIndex, unsigned DestSize, 827 unsigned SourceSize) { 828 DiagID = diag::warn_fortify_scanf_overflow; 829 unsigned Index = ArgIndex + DataIndex; 830 StringRef FunctionName = GetFunctionName(); 831 DiagRuntimeBehavior(TheCall->getArg(Index)->getBeginLoc(), TheCall, 832 PDiag(DiagID) << FunctionName << (Index + 1) 833 << DestSize << SourceSize); 834 }; 835 836 StringRef FormatStrRef = Format->getString(); 837 auto ShiftedComputeSizeArgument = [&](unsigned Index) { 838 return ComputeSizeArgument(Index + DataIndex); 839 }; 840 ScanfDiagnosticFormatHandler H(ShiftedComputeSizeArgument, Diagnose); 841 const char *FormatBytes = FormatStrRef.data(); 842 const ConstantArrayType *T = 843 Context.getAsConstantArrayType(Format->getType()); 844 assert(T && "String literal not of constant array type!"); 845 size_t TypeSize = T->getSize().getZExtValue(); 846 847 // In case there's a null byte somewhere. 848 size_t StrLen = 849 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 850 851 analyze_format_string::ParseScanfString(H, FormatBytes, 852 FormatBytes + StrLen, getLangOpts(), 853 Context.getTargetInfo()); 854 855 // Unlike the other cases, in this one we have already issued the diagnostic 856 // here, so no need to continue (because unlike the other cases, here the 857 // diagnostic refers to the argument number). 858 return; 859 } 860 861 case Builtin::BIsprintf: 862 case Builtin::BI__builtin___sprintf_chk: { 863 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3; 864 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 865 866 if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) { 867 868 if (!Format->isAscii() && !Format->isUTF8()) 869 return; 870 871 StringRef FormatStrRef = Format->getString(); 872 EstimateSizeFormatHandler H(FormatStrRef); 873 const char *FormatBytes = FormatStrRef.data(); 874 const ConstantArrayType *T = 875 Context.getAsConstantArrayType(Format->getType()); 876 assert(T && "String literal not of constant array type!"); 877 size_t TypeSize = T->getSize().getZExtValue(); 878 879 // In case there's a null byte somewhere. 880 size_t StrLen = 881 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 882 if (!analyze_format_string::ParsePrintfString( 883 H, FormatBytes, FormatBytes + StrLen, getLangOpts(), 884 Context.getTargetInfo(), false)) { 885 DiagID = diag::warn_fortify_source_format_overflow; 886 SourceSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound()) 887 .extOrTrunc(SizeTypeWidth); 888 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) { 889 DestinationSize = ComputeExplicitObjectSizeArgument(2); 890 IsChkVariant = true; 891 } else { 892 DestinationSize = ComputeSizeArgument(0); 893 } 894 break; 895 } 896 } 897 return; 898 } 899 case Builtin::BI__builtin___memcpy_chk: 900 case Builtin::BI__builtin___memmove_chk: 901 case Builtin::BI__builtin___memset_chk: 902 case Builtin::BI__builtin___strlcat_chk: 903 case Builtin::BI__builtin___strlcpy_chk: 904 case Builtin::BI__builtin___strncat_chk: 905 case Builtin::BI__builtin___strncpy_chk: 906 case Builtin::BI__builtin___stpncpy_chk: 907 case Builtin::BI__builtin___memccpy_chk: 908 case Builtin::BI__builtin___mempcpy_chk: { 909 DiagID = diag::warn_builtin_chk_overflow; 910 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 2); 911 DestinationSize = 912 ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 913 IsChkVariant = true; 914 break; 915 } 916 917 case Builtin::BI__builtin___snprintf_chk: 918 case Builtin::BI__builtin___vsnprintf_chk: { 919 DiagID = diag::warn_builtin_chk_overflow; 920 SourceSize = ComputeExplicitObjectSizeArgument(1); 921 DestinationSize = ComputeExplicitObjectSizeArgument(3); 922 IsChkVariant = true; 923 break; 924 } 925 926 case Builtin::BIstrncat: 927 case Builtin::BI__builtin_strncat: 928 case Builtin::BIstrncpy: 929 case Builtin::BI__builtin_strncpy: 930 case Builtin::BIstpncpy: 931 case Builtin::BI__builtin_stpncpy: { 932 // Whether these functions overflow depends on the runtime strlen of the 933 // string, not just the buffer size, so emitting the "always overflow" 934 // diagnostic isn't quite right. We should still diagnose passing a buffer 935 // size larger than the destination buffer though; this is a runtime abort 936 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise. 937 DiagID = diag::warn_fortify_source_size_mismatch; 938 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 939 DestinationSize = ComputeSizeArgument(0); 940 break; 941 } 942 943 case Builtin::BImemcpy: 944 case Builtin::BI__builtin_memcpy: 945 case Builtin::BImemmove: 946 case Builtin::BI__builtin_memmove: 947 case Builtin::BImemset: 948 case Builtin::BI__builtin_memset: 949 case Builtin::BImempcpy: 950 case Builtin::BI__builtin_mempcpy: { 951 DiagID = diag::warn_fortify_source_overflow; 952 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 953 DestinationSize = ComputeSizeArgument(0); 954 break; 955 } 956 case Builtin::BIsnprintf: 957 case Builtin::BI__builtin_snprintf: 958 case Builtin::BIvsnprintf: 959 case Builtin::BI__builtin_vsnprintf: { 960 DiagID = diag::warn_fortify_source_size_mismatch; 961 SourceSize = ComputeExplicitObjectSizeArgument(1); 962 DestinationSize = ComputeSizeArgument(0); 963 break; 964 } 965 } 966 967 if (!SourceSize || !DestinationSize || 968 llvm::APSInt::compareValues(SourceSize.getValue(), 969 DestinationSize.getValue()) <= 0) 970 return; 971 972 StringRef FunctionName = GetFunctionName(); 973 974 SmallString<16> DestinationStr; 975 SmallString<16> SourceStr; 976 DestinationSize->toString(DestinationStr, /*Radix=*/10); 977 SourceSize->toString(SourceStr, /*Radix=*/10); 978 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 979 PDiag(DiagID) 980 << FunctionName << DestinationStr << SourceStr); 981 } 982 983 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 984 Scope::ScopeFlags NeededScopeFlags, 985 unsigned DiagID) { 986 // Scopes aren't available during instantiation. Fortunately, builtin 987 // functions cannot be template args so they cannot be formed through template 988 // instantiation. Therefore checking once during the parse is sufficient. 989 if (SemaRef.inTemplateInstantiation()) 990 return false; 991 992 Scope *S = SemaRef.getCurScope(); 993 while (S && !S->isSEHExceptScope()) 994 S = S->getParent(); 995 if (!S || !(S->getFlags() & NeededScopeFlags)) { 996 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 997 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 998 << DRE->getDecl()->getIdentifier(); 999 return true; 1000 } 1001 1002 return false; 1003 } 1004 1005 static inline bool isBlockPointer(Expr *Arg) { 1006 return Arg->getType()->isBlockPointerType(); 1007 } 1008 1009 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 1010 /// void*, which is a requirement of device side enqueue. 1011 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 1012 const BlockPointerType *BPT = 1013 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1014 ArrayRef<QualType> Params = 1015 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes(); 1016 unsigned ArgCounter = 0; 1017 bool IllegalParams = false; 1018 // Iterate through the block parameters until either one is found that is not 1019 // a local void*, or the block is valid. 1020 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 1021 I != E; ++I, ++ArgCounter) { 1022 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 1023 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 1024 LangAS::opencl_local) { 1025 // Get the location of the error. If a block literal has been passed 1026 // (BlockExpr) then we can point straight to the offending argument, 1027 // else we just point to the variable reference. 1028 SourceLocation ErrorLoc; 1029 if (isa<BlockExpr>(BlockArg)) { 1030 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 1031 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 1032 } else if (isa<DeclRefExpr>(BlockArg)) { 1033 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 1034 } 1035 S.Diag(ErrorLoc, 1036 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 1037 IllegalParams = true; 1038 } 1039 } 1040 1041 return IllegalParams; 1042 } 1043 1044 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 1045 // OpenCL device can support extension but not the feature as extension 1046 // requires subgroup independent forward progress, but subgroup independent 1047 // forward progress is optional in OpenCL C 3.0 __opencl_c_subgroups feature. 1048 if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts()) && 1049 !S.getOpenCLOptions().isSupported("__opencl_c_subgroups", 1050 S.getLangOpts())) { 1051 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 1052 << 1 << Call->getDirectCallee() 1053 << "cl_khr_subgroups or __opencl_c_subgroups"; 1054 return true; 1055 } 1056 return false; 1057 } 1058 1059 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 1060 if (checkArgCount(S, TheCall, 2)) 1061 return true; 1062 1063 if (checkOpenCLSubgroupExt(S, TheCall)) 1064 return true; 1065 1066 // First argument is an ndrange_t type. 1067 Expr *NDRangeArg = TheCall->getArg(0); 1068 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1069 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1070 << TheCall->getDirectCallee() << "'ndrange_t'"; 1071 return true; 1072 } 1073 1074 Expr *BlockArg = TheCall->getArg(1); 1075 if (!isBlockPointer(BlockArg)) { 1076 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1077 << TheCall->getDirectCallee() << "block"; 1078 return true; 1079 } 1080 return checkOpenCLBlockArgs(S, BlockArg); 1081 } 1082 1083 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 1084 /// get_kernel_work_group_size 1085 /// and get_kernel_preferred_work_group_size_multiple builtin functions. 1086 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 1087 if (checkArgCount(S, TheCall, 1)) 1088 return true; 1089 1090 Expr *BlockArg = TheCall->getArg(0); 1091 if (!isBlockPointer(BlockArg)) { 1092 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1093 << TheCall->getDirectCallee() << "block"; 1094 return true; 1095 } 1096 return checkOpenCLBlockArgs(S, BlockArg); 1097 } 1098 1099 /// Diagnose integer type and any valid implicit conversion to it. 1100 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 1101 const QualType &IntType); 1102 1103 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 1104 unsigned Start, unsigned End) { 1105 bool IllegalParams = false; 1106 for (unsigned I = Start; I <= End; ++I) 1107 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 1108 S.Context.getSizeType()); 1109 return IllegalParams; 1110 } 1111 1112 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 1113 /// 'local void*' parameter of passed block. 1114 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 1115 Expr *BlockArg, 1116 unsigned NumNonVarArgs) { 1117 const BlockPointerType *BPT = 1118 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1119 unsigned NumBlockParams = 1120 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams(); 1121 unsigned TotalNumArgs = TheCall->getNumArgs(); 1122 1123 // For each argument passed to the block, a corresponding uint needs to 1124 // be passed to describe the size of the local memory. 1125 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 1126 S.Diag(TheCall->getBeginLoc(), 1127 diag::err_opencl_enqueue_kernel_local_size_args); 1128 return true; 1129 } 1130 1131 // Check that the sizes of the local memory are specified by integers. 1132 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 1133 TotalNumArgs - 1); 1134 } 1135 1136 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 1137 /// overload formats specified in Table 6.13.17.1. 1138 /// int enqueue_kernel(queue_t queue, 1139 /// kernel_enqueue_flags_t flags, 1140 /// const ndrange_t ndrange, 1141 /// void (^block)(void)) 1142 /// int enqueue_kernel(queue_t queue, 1143 /// kernel_enqueue_flags_t flags, 1144 /// const ndrange_t ndrange, 1145 /// uint num_events_in_wait_list, 1146 /// clk_event_t *event_wait_list, 1147 /// clk_event_t *event_ret, 1148 /// void (^block)(void)) 1149 /// int enqueue_kernel(queue_t queue, 1150 /// kernel_enqueue_flags_t flags, 1151 /// const ndrange_t ndrange, 1152 /// void (^block)(local void*, ...), 1153 /// uint size0, ...) 1154 /// int enqueue_kernel(queue_t queue, 1155 /// kernel_enqueue_flags_t flags, 1156 /// const ndrange_t ndrange, 1157 /// uint num_events_in_wait_list, 1158 /// clk_event_t *event_wait_list, 1159 /// clk_event_t *event_ret, 1160 /// void (^block)(local void*, ...), 1161 /// uint size0, ...) 1162 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 1163 unsigned NumArgs = TheCall->getNumArgs(); 1164 1165 if (NumArgs < 4) { 1166 S.Diag(TheCall->getBeginLoc(), 1167 diag::err_typecheck_call_too_few_args_at_least) 1168 << 0 << 4 << NumArgs; 1169 return true; 1170 } 1171 1172 Expr *Arg0 = TheCall->getArg(0); 1173 Expr *Arg1 = TheCall->getArg(1); 1174 Expr *Arg2 = TheCall->getArg(2); 1175 Expr *Arg3 = TheCall->getArg(3); 1176 1177 // First argument always needs to be a queue_t type. 1178 if (!Arg0->getType()->isQueueT()) { 1179 S.Diag(TheCall->getArg(0)->getBeginLoc(), 1180 diag::err_opencl_builtin_expected_type) 1181 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 1182 return true; 1183 } 1184 1185 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 1186 if (!Arg1->getType()->isIntegerType()) { 1187 S.Diag(TheCall->getArg(1)->getBeginLoc(), 1188 diag::err_opencl_builtin_expected_type) 1189 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 1190 return true; 1191 } 1192 1193 // Third argument is always an ndrange_t type. 1194 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1195 S.Diag(TheCall->getArg(2)->getBeginLoc(), 1196 diag::err_opencl_builtin_expected_type) 1197 << TheCall->getDirectCallee() << "'ndrange_t'"; 1198 return true; 1199 } 1200 1201 // With four arguments, there is only one form that the function could be 1202 // called in: no events and no variable arguments. 1203 if (NumArgs == 4) { 1204 // check that the last argument is the right block type. 1205 if (!isBlockPointer(Arg3)) { 1206 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1207 << TheCall->getDirectCallee() << "block"; 1208 return true; 1209 } 1210 // we have a block type, check the prototype 1211 const BlockPointerType *BPT = 1212 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 1213 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) { 1214 S.Diag(Arg3->getBeginLoc(), 1215 diag::err_opencl_enqueue_kernel_blocks_no_args); 1216 return true; 1217 } 1218 return false; 1219 } 1220 // we can have block + varargs. 1221 if (isBlockPointer(Arg3)) 1222 return (checkOpenCLBlockArgs(S, Arg3) || 1223 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 1224 // last two cases with either exactly 7 args or 7 args and varargs. 1225 if (NumArgs >= 7) { 1226 // check common block argument. 1227 Expr *Arg6 = TheCall->getArg(6); 1228 if (!isBlockPointer(Arg6)) { 1229 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1230 << TheCall->getDirectCallee() << "block"; 1231 return true; 1232 } 1233 if (checkOpenCLBlockArgs(S, Arg6)) 1234 return true; 1235 1236 // Forth argument has to be any integer type. 1237 if (!Arg3->getType()->isIntegerType()) { 1238 S.Diag(TheCall->getArg(3)->getBeginLoc(), 1239 diag::err_opencl_builtin_expected_type) 1240 << TheCall->getDirectCallee() << "integer"; 1241 return true; 1242 } 1243 // check remaining common arguments. 1244 Expr *Arg4 = TheCall->getArg(4); 1245 Expr *Arg5 = TheCall->getArg(5); 1246 1247 // Fifth argument is always passed as a pointer to clk_event_t. 1248 if (!Arg4->isNullPointerConstant(S.Context, 1249 Expr::NPC_ValueDependentIsNotNull) && 1250 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 1251 S.Diag(TheCall->getArg(4)->getBeginLoc(), 1252 diag::err_opencl_builtin_expected_type) 1253 << TheCall->getDirectCallee() 1254 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1255 return true; 1256 } 1257 1258 // Sixth argument is always passed as a pointer to clk_event_t. 1259 if (!Arg5->isNullPointerConstant(S.Context, 1260 Expr::NPC_ValueDependentIsNotNull) && 1261 !(Arg5->getType()->isPointerType() && 1262 Arg5->getType()->getPointeeType()->isClkEventT())) { 1263 S.Diag(TheCall->getArg(5)->getBeginLoc(), 1264 diag::err_opencl_builtin_expected_type) 1265 << TheCall->getDirectCallee() 1266 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1267 return true; 1268 } 1269 1270 if (NumArgs == 7) 1271 return false; 1272 1273 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 1274 } 1275 1276 // None of the specific case has been detected, give generic error 1277 S.Diag(TheCall->getBeginLoc(), 1278 diag::err_opencl_enqueue_kernel_incorrect_args); 1279 return true; 1280 } 1281 1282 /// Returns OpenCL access qual. 1283 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 1284 return D->getAttr<OpenCLAccessAttr>(); 1285 } 1286 1287 /// Returns true if pipe element type is different from the pointer. 1288 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 1289 const Expr *Arg0 = Call->getArg(0); 1290 // First argument type should always be pipe. 1291 if (!Arg0->getType()->isPipeType()) { 1292 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1293 << Call->getDirectCallee() << Arg0->getSourceRange(); 1294 return true; 1295 } 1296 OpenCLAccessAttr *AccessQual = 1297 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 1298 // Validates the access qualifier is compatible with the call. 1299 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 1300 // read_only and write_only, and assumed to be read_only if no qualifier is 1301 // specified. 1302 switch (Call->getDirectCallee()->getBuiltinID()) { 1303 case Builtin::BIread_pipe: 1304 case Builtin::BIreserve_read_pipe: 1305 case Builtin::BIcommit_read_pipe: 1306 case Builtin::BIwork_group_reserve_read_pipe: 1307 case Builtin::BIsub_group_reserve_read_pipe: 1308 case Builtin::BIwork_group_commit_read_pipe: 1309 case Builtin::BIsub_group_commit_read_pipe: 1310 if (!(!AccessQual || AccessQual->isReadOnly())) { 1311 S.Diag(Arg0->getBeginLoc(), 1312 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1313 << "read_only" << Arg0->getSourceRange(); 1314 return true; 1315 } 1316 break; 1317 case Builtin::BIwrite_pipe: 1318 case Builtin::BIreserve_write_pipe: 1319 case Builtin::BIcommit_write_pipe: 1320 case Builtin::BIwork_group_reserve_write_pipe: 1321 case Builtin::BIsub_group_reserve_write_pipe: 1322 case Builtin::BIwork_group_commit_write_pipe: 1323 case Builtin::BIsub_group_commit_write_pipe: 1324 if (!(AccessQual && AccessQual->isWriteOnly())) { 1325 S.Diag(Arg0->getBeginLoc(), 1326 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1327 << "write_only" << Arg0->getSourceRange(); 1328 return true; 1329 } 1330 break; 1331 default: 1332 break; 1333 } 1334 return false; 1335 } 1336 1337 /// Returns true if pipe element type is different from the pointer. 1338 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 1339 const Expr *Arg0 = Call->getArg(0); 1340 const Expr *ArgIdx = Call->getArg(Idx); 1341 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 1342 const QualType EltTy = PipeTy->getElementType(); 1343 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 1344 // The Idx argument should be a pointer and the type of the pointer and 1345 // the type of pipe element should also be the same. 1346 if (!ArgTy || 1347 !S.Context.hasSameType( 1348 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 1349 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1350 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 1351 << ArgIdx->getType() << ArgIdx->getSourceRange(); 1352 return true; 1353 } 1354 return false; 1355 } 1356 1357 // Performs semantic analysis for the read/write_pipe call. 1358 // \param S Reference to the semantic analyzer. 1359 // \param Call A pointer to the builtin call. 1360 // \return True if a semantic error has been found, false otherwise. 1361 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 1362 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 1363 // functions have two forms. 1364 switch (Call->getNumArgs()) { 1365 case 2: 1366 if (checkOpenCLPipeArg(S, Call)) 1367 return true; 1368 // The call with 2 arguments should be 1369 // read/write_pipe(pipe T, T*). 1370 // Check packet type T. 1371 if (checkOpenCLPipePacketType(S, Call, 1)) 1372 return true; 1373 break; 1374 1375 case 4: { 1376 if (checkOpenCLPipeArg(S, Call)) 1377 return true; 1378 // The call with 4 arguments should be 1379 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 1380 // Check reserve_id_t. 1381 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1382 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1383 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1384 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1385 return true; 1386 } 1387 1388 // Check the index. 1389 const Expr *Arg2 = Call->getArg(2); 1390 if (!Arg2->getType()->isIntegerType() && 1391 !Arg2->getType()->isUnsignedIntegerType()) { 1392 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1393 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1394 << Arg2->getType() << Arg2->getSourceRange(); 1395 return true; 1396 } 1397 1398 // Check packet type T. 1399 if (checkOpenCLPipePacketType(S, Call, 3)) 1400 return true; 1401 } break; 1402 default: 1403 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 1404 << Call->getDirectCallee() << Call->getSourceRange(); 1405 return true; 1406 } 1407 1408 return false; 1409 } 1410 1411 // Performs a semantic analysis on the {work_group_/sub_group_ 1412 // /_}reserve_{read/write}_pipe 1413 // \param S Reference to the semantic analyzer. 1414 // \param Call The call to the builtin function to be analyzed. 1415 // \return True if a semantic error was found, false otherwise. 1416 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 1417 if (checkArgCount(S, Call, 2)) 1418 return true; 1419 1420 if (checkOpenCLPipeArg(S, Call)) 1421 return true; 1422 1423 // Check the reserve size. 1424 if (!Call->getArg(1)->getType()->isIntegerType() && 1425 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 1426 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1427 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1428 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1429 return true; 1430 } 1431 1432 // Since return type of reserve_read/write_pipe built-in function is 1433 // reserve_id_t, which is not defined in the builtin def file , we used int 1434 // as return type and need to override the return type of these functions. 1435 Call->setType(S.Context.OCLReserveIDTy); 1436 1437 return false; 1438 } 1439 1440 // Performs a semantic analysis on {work_group_/sub_group_ 1441 // /_}commit_{read/write}_pipe 1442 // \param S Reference to the semantic analyzer. 1443 // \param Call The call to the builtin function to be analyzed. 1444 // \return True if a semantic error was found, false otherwise. 1445 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 1446 if (checkArgCount(S, Call, 2)) 1447 return true; 1448 1449 if (checkOpenCLPipeArg(S, Call)) 1450 return true; 1451 1452 // Check reserve_id_t. 1453 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1454 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1455 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1456 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1457 return true; 1458 } 1459 1460 return false; 1461 } 1462 1463 // Performs a semantic analysis on the call to built-in Pipe 1464 // Query Functions. 1465 // \param S Reference to the semantic analyzer. 1466 // \param Call The call to the builtin function to be analyzed. 1467 // \return True if a semantic error was found, false otherwise. 1468 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 1469 if (checkArgCount(S, Call, 1)) 1470 return true; 1471 1472 if (!Call->getArg(0)->getType()->isPipeType()) { 1473 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1474 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 1475 return true; 1476 } 1477 1478 return false; 1479 } 1480 1481 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. 1482 // Performs semantic analysis for the to_global/local/private call. 1483 // \param S Reference to the semantic analyzer. 1484 // \param BuiltinID ID of the builtin function. 1485 // \param Call A pointer to the builtin call. 1486 // \return True if a semantic error has been found, false otherwise. 1487 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 1488 CallExpr *Call) { 1489 if (checkArgCount(S, Call, 1)) 1490 return true; 1491 1492 auto RT = Call->getArg(0)->getType(); 1493 if (!RT->isPointerType() || RT->getPointeeType() 1494 .getAddressSpace() == LangAS::opencl_constant) { 1495 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 1496 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 1497 return true; 1498 } 1499 1500 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 1501 S.Diag(Call->getArg(0)->getBeginLoc(), 1502 diag::warn_opencl_generic_address_space_arg) 1503 << Call->getDirectCallee()->getNameInfo().getAsString() 1504 << Call->getArg(0)->getSourceRange(); 1505 } 1506 1507 RT = RT->getPointeeType(); 1508 auto Qual = RT.getQualifiers(); 1509 switch (BuiltinID) { 1510 case Builtin::BIto_global: 1511 Qual.setAddressSpace(LangAS::opencl_global); 1512 break; 1513 case Builtin::BIto_local: 1514 Qual.setAddressSpace(LangAS::opencl_local); 1515 break; 1516 case Builtin::BIto_private: 1517 Qual.setAddressSpace(LangAS::opencl_private); 1518 break; 1519 default: 1520 llvm_unreachable("Invalid builtin function"); 1521 } 1522 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 1523 RT.getUnqualifiedType(), Qual))); 1524 1525 return false; 1526 } 1527 1528 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { 1529 if (checkArgCount(S, TheCall, 1)) 1530 return ExprError(); 1531 1532 // Compute __builtin_launder's parameter type from the argument. 1533 // The parameter type is: 1534 // * The type of the argument if it's not an array or function type, 1535 // Otherwise, 1536 // * The decayed argument type. 1537 QualType ParamTy = [&]() { 1538 QualType ArgTy = TheCall->getArg(0)->getType(); 1539 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe()) 1540 return S.Context.getPointerType(Ty->getElementType()); 1541 if (ArgTy->isFunctionType()) { 1542 return S.Context.getPointerType(ArgTy); 1543 } 1544 return ArgTy; 1545 }(); 1546 1547 TheCall->setType(ParamTy); 1548 1549 auto DiagSelect = [&]() -> llvm::Optional<unsigned> { 1550 if (!ParamTy->isPointerType()) 1551 return 0; 1552 if (ParamTy->isFunctionPointerType()) 1553 return 1; 1554 if (ParamTy->isVoidPointerType()) 1555 return 2; 1556 return llvm::Optional<unsigned>{}; 1557 }(); 1558 if (DiagSelect.hasValue()) { 1559 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) 1560 << DiagSelect.getValue() << TheCall->getSourceRange(); 1561 return ExprError(); 1562 } 1563 1564 // We either have an incomplete class type, or we have a class template 1565 // whose instantiation has not been forced. Example: 1566 // 1567 // template <class T> struct Foo { T value; }; 1568 // Foo<int> *p = nullptr; 1569 // auto *d = __builtin_launder(p); 1570 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(), 1571 diag::err_incomplete_type)) 1572 return ExprError(); 1573 1574 assert(ParamTy->getPointeeType()->isObjectType() && 1575 "Unhandled non-object pointer case"); 1576 1577 InitializedEntity Entity = 1578 InitializedEntity::InitializeParameter(S.Context, ParamTy, false); 1579 ExprResult Arg = 1580 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0)); 1581 if (Arg.isInvalid()) 1582 return ExprError(); 1583 TheCall->setArg(0, Arg.get()); 1584 1585 return TheCall; 1586 } 1587 1588 // Emit an error and return true if the current object format type is in the 1589 // list of unsupported types. 1590 static bool CheckBuiltinTargetNotInUnsupported( 1591 Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1592 ArrayRef<llvm::Triple::ObjectFormatType> UnsupportedObjectFormatTypes) { 1593 llvm::Triple::ObjectFormatType CurObjFormat = 1594 S.getASTContext().getTargetInfo().getTriple().getObjectFormat(); 1595 if (llvm::is_contained(UnsupportedObjectFormatTypes, CurObjFormat)) { 1596 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1597 << TheCall->getSourceRange(); 1598 return true; 1599 } 1600 return false; 1601 } 1602 1603 // Emit an error and return true if the current architecture is not in the list 1604 // of supported architectures. 1605 static bool 1606 CheckBuiltinTargetInSupported(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1607 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 1608 llvm::Triple::ArchType CurArch = 1609 S.getASTContext().getTargetInfo().getTriple().getArch(); 1610 if (llvm::is_contained(SupportedArchs, CurArch)) 1611 return false; 1612 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1613 << TheCall->getSourceRange(); 1614 return true; 1615 } 1616 1617 static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr, 1618 SourceLocation CallSiteLoc); 1619 1620 bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 1621 CallExpr *TheCall) { 1622 switch (TI.getTriple().getArch()) { 1623 default: 1624 // Some builtins don't require additional checking, so just consider these 1625 // acceptable. 1626 return false; 1627 case llvm::Triple::arm: 1628 case llvm::Triple::armeb: 1629 case llvm::Triple::thumb: 1630 case llvm::Triple::thumbeb: 1631 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall); 1632 case llvm::Triple::aarch64: 1633 case llvm::Triple::aarch64_32: 1634 case llvm::Triple::aarch64_be: 1635 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall); 1636 case llvm::Triple::bpfeb: 1637 case llvm::Triple::bpfel: 1638 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall); 1639 case llvm::Triple::hexagon: 1640 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall); 1641 case llvm::Triple::mips: 1642 case llvm::Triple::mipsel: 1643 case llvm::Triple::mips64: 1644 case llvm::Triple::mips64el: 1645 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall); 1646 case llvm::Triple::systemz: 1647 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall); 1648 case llvm::Triple::x86: 1649 case llvm::Triple::x86_64: 1650 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall); 1651 case llvm::Triple::ppc: 1652 case llvm::Triple::ppcle: 1653 case llvm::Triple::ppc64: 1654 case llvm::Triple::ppc64le: 1655 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall); 1656 case llvm::Triple::amdgcn: 1657 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall); 1658 case llvm::Triple::riscv32: 1659 case llvm::Triple::riscv64: 1660 return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall); 1661 } 1662 } 1663 1664 ExprResult 1665 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 1666 CallExpr *TheCall) { 1667 ExprResult TheCallResult(TheCall); 1668 1669 // Find out if any arguments are required to be integer constant expressions. 1670 unsigned ICEArguments = 0; 1671 ASTContext::GetBuiltinTypeError Error; 1672 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 1673 if (Error != ASTContext::GE_None) 1674 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 1675 1676 // If any arguments are required to be ICE's, check and diagnose. 1677 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 1678 // Skip arguments not required to be ICE's. 1679 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 1680 1681 llvm::APSInt Result; 1682 // If we don't have enough arguments, continue so we can issue better 1683 // diagnostic in checkArgCount(...) 1684 if (ArgNo < TheCall->getNumArgs() && 1685 SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 1686 return true; 1687 ICEArguments &= ~(1 << ArgNo); 1688 } 1689 1690 switch (BuiltinID) { 1691 case Builtin::BI__builtin___CFStringMakeConstantString: 1692 // CFStringMakeConstantString is currently not implemented for GOFF (i.e., 1693 // on z/OS) and for XCOFF (i.e., on AIX). Emit unsupported 1694 if (CheckBuiltinTargetNotInUnsupported( 1695 *this, BuiltinID, TheCall, 1696 {llvm::Triple::GOFF, llvm::Triple::XCOFF})) 1697 return ExprError(); 1698 assert(TheCall->getNumArgs() == 1 && 1699 "Wrong # arguments to builtin CFStringMakeConstantString"); 1700 if (CheckObjCString(TheCall->getArg(0))) 1701 return ExprError(); 1702 break; 1703 case Builtin::BI__builtin_ms_va_start: 1704 case Builtin::BI__builtin_stdarg_start: 1705 case Builtin::BI__builtin_va_start: 1706 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1707 return ExprError(); 1708 break; 1709 case Builtin::BI__va_start: { 1710 switch (Context.getTargetInfo().getTriple().getArch()) { 1711 case llvm::Triple::aarch64: 1712 case llvm::Triple::arm: 1713 case llvm::Triple::thumb: 1714 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 1715 return ExprError(); 1716 break; 1717 default: 1718 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1719 return ExprError(); 1720 break; 1721 } 1722 break; 1723 } 1724 1725 // The acquire, release, and no fence variants are ARM and AArch64 only. 1726 case Builtin::BI_interlockedbittestandset_acq: 1727 case Builtin::BI_interlockedbittestandset_rel: 1728 case Builtin::BI_interlockedbittestandset_nf: 1729 case Builtin::BI_interlockedbittestandreset_acq: 1730 case Builtin::BI_interlockedbittestandreset_rel: 1731 case Builtin::BI_interlockedbittestandreset_nf: 1732 if (CheckBuiltinTargetInSupported( 1733 *this, BuiltinID, TheCall, 1734 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 1735 return ExprError(); 1736 break; 1737 1738 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 1739 case Builtin::BI_bittest64: 1740 case Builtin::BI_bittestandcomplement64: 1741 case Builtin::BI_bittestandreset64: 1742 case Builtin::BI_bittestandset64: 1743 case Builtin::BI_interlockedbittestandreset64: 1744 case Builtin::BI_interlockedbittestandset64: 1745 if (CheckBuiltinTargetInSupported(*this, BuiltinID, TheCall, 1746 {llvm::Triple::x86_64, llvm::Triple::arm, 1747 llvm::Triple::thumb, 1748 llvm::Triple::aarch64})) 1749 return ExprError(); 1750 break; 1751 1752 case Builtin::BI__builtin_isgreater: 1753 case Builtin::BI__builtin_isgreaterequal: 1754 case Builtin::BI__builtin_isless: 1755 case Builtin::BI__builtin_islessequal: 1756 case Builtin::BI__builtin_islessgreater: 1757 case Builtin::BI__builtin_isunordered: 1758 if (SemaBuiltinUnorderedCompare(TheCall)) 1759 return ExprError(); 1760 break; 1761 case Builtin::BI__builtin_fpclassify: 1762 if (SemaBuiltinFPClassification(TheCall, 6)) 1763 return ExprError(); 1764 break; 1765 case Builtin::BI__builtin_isfinite: 1766 case Builtin::BI__builtin_isinf: 1767 case Builtin::BI__builtin_isinf_sign: 1768 case Builtin::BI__builtin_isnan: 1769 case Builtin::BI__builtin_isnormal: 1770 case Builtin::BI__builtin_signbit: 1771 case Builtin::BI__builtin_signbitf: 1772 case Builtin::BI__builtin_signbitl: 1773 if (SemaBuiltinFPClassification(TheCall, 1)) 1774 return ExprError(); 1775 break; 1776 case Builtin::BI__builtin_shufflevector: 1777 return SemaBuiltinShuffleVector(TheCall); 1778 // TheCall will be freed by the smart pointer here, but that's fine, since 1779 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 1780 case Builtin::BI__builtin_prefetch: 1781 if (SemaBuiltinPrefetch(TheCall)) 1782 return ExprError(); 1783 break; 1784 case Builtin::BI__builtin_alloca_with_align: 1785 case Builtin::BI__builtin_alloca_with_align_uninitialized: 1786 if (SemaBuiltinAllocaWithAlign(TheCall)) 1787 return ExprError(); 1788 LLVM_FALLTHROUGH; 1789 case Builtin::BI__builtin_alloca: 1790 case Builtin::BI__builtin_alloca_uninitialized: 1791 Diag(TheCall->getBeginLoc(), diag::warn_alloca) 1792 << TheCall->getDirectCallee(); 1793 break; 1794 case Builtin::BI__arithmetic_fence: 1795 if (SemaBuiltinArithmeticFence(TheCall)) 1796 return ExprError(); 1797 break; 1798 case Builtin::BI__assume: 1799 case Builtin::BI__builtin_assume: 1800 if (SemaBuiltinAssume(TheCall)) 1801 return ExprError(); 1802 break; 1803 case Builtin::BI__builtin_assume_aligned: 1804 if (SemaBuiltinAssumeAligned(TheCall)) 1805 return ExprError(); 1806 break; 1807 case Builtin::BI__builtin_dynamic_object_size: 1808 case Builtin::BI__builtin_object_size: 1809 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 1810 return ExprError(); 1811 break; 1812 case Builtin::BI__builtin_longjmp: 1813 if (SemaBuiltinLongjmp(TheCall)) 1814 return ExprError(); 1815 break; 1816 case Builtin::BI__builtin_setjmp: 1817 if (SemaBuiltinSetjmp(TheCall)) 1818 return ExprError(); 1819 break; 1820 case Builtin::BI__builtin_classify_type: 1821 if (checkArgCount(*this, TheCall, 1)) return true; 1822 TheCall->setType(Context.IntTy); 1823 break; 1824 case Builtin::BI__builtin_complex: 1825 if (SemaBuiltinComplex(TheCall)) 1826 return ExprError(); 1827 break; 1828 case Builtin::BI__builtin_constant_p: { 1829 if (checkArgCount(*this, TheCall, 1)) return true; 1830 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 1831 if (Arg.isInvalid()) return true; 1832 TheCall->setArg(0, Arg.get()); 1833 TheCall->setType(Context.IntTy); 1834 break; 1835 } 1836 case Builtin::BI__builtin_launder: 1837 return SemaBuiltinLaunder(*this, TheCall); 1838 case Builtin::BI__sync_fetch_and_add: 1839 case Builtin::BI__sync_fetch_and_add_1: 1840 case Builtin::BI__sync_fetch_and_add_2: 1841 case Builtin::BI__sync_fetch_and_add_4: 1842 case Builtin::BI__sync_fetch_and_add_8: 1843 case Builtin::BI__sync_fetch_and_add_16: 1844 case Builtin::BI__sync_fetch_and_sub: 1845 case Builtin::BI__sync_fetch_and_sub_1: 1846 case Builtin::BI__sync_fetch_and_sub_2: 1847 case Builtin::BI__sync_fetch_and_sub_4: 1848 case Builtin::BI__sync_fetch_and_sub_8: 1849 case Builtin::BI__sync_fetch_and_sub_16: 1850 case Builtin::BI__sync_fetch_and_or: 1851 case Builtin::BI__sync_fetch_and_or_1: 1852 case Builtin::BI__sync_fetch_and_or_2: 1853 case Builtin::BI__sync_fetch_and_or_4: 1854 case Builtin::BI__sync_fetch_and_or_8: 1855 case Builtin::BI__sync_fetch_and_or_16: 1856 case Builtin::BI__sync_fetch_and_and: 1857 case Builtin::BI__sync_fetch_and_and_1: 1858 case Builtin::BI__sync_fetch_and_and_2: 1859 case Builtin::BI__sync_fetch_and_and_4: 1860 case Builtin::BI__sync_fetch_and_and_8: 1861 case Builtin::BI__sync_fetch_and_and_16: 1862 case Builtin::BI__sync_fetch_and_xor: 1863 case Builtin::BI__sync_fetch_and_xor_1: 1864 case Builtin::BI__sync_fetch_and_xor_2: 1865 case Builtin::BI__sync_fetch_and_xor_4: 1866 case Builtin::BI__sync_fetch_and_xor_8: 1867 case Builtin::BI__sync_fetch_and_xor_16: 1868 case Builtin::BI__sync_fetch_and_nand: 1869 case Builtin::BI__sync_fetch_and_nand_1: 1870 case Builtin::BI__sync_fetch_and_nand_2: 1871 case Builtin::BI__sync_fetch_and_nand_4: 1872 case Builtin::BI__sync_fetch_and_nand_8: 1873 case Builtin::BI__sync_fetch_and_nand_16: 1874 case Builtin::BI__sync_add_and_fetch: 1875 case Builtin::BI__sync_add_and_fetch_1: 1876 case Builtin::BI__sync_add_and_fetch_2: 1877 case Builtin::BI__sync_add_and_fetch_4: 1878 case Builtin::BI__sync_add_and_fetch_8: 1879 case Builtin::BI__sync_add_and_fetch_16: 1880 case Builtin::BI__sync_sub_and_fetch: 1881 case Builtin::BI__sync_sub_and_fetch_1: 1882 case Builtin::BI__sync_sub_and_fetch_2: 1883 case Builtin::BI__sync_sub_and_fetch_4: 1884 case Builtin::BI__sync_sub_and_fetch_8: 1885 case Builtin::BI__sync_sub_and_fetch_16: 1886 case Builtin::BI__sync_and_and_fetch: 1887 case Builtin::BI__sync_and_and_fetch_1: 1888 case Builtin::BI__sync_and_and_fetch_2: 1889 case Builtin::BI__sync_and_and_fetch_4: 1890 case Builtin::BI__sync_and_and_fetch_8: 1891 case Builtin::BI__sync_and_and_fetch_16: 1892 case Builtin::BI__sync_or_and_fetch: 1893 case Builtin::BI__sync_or_and_fetch_1: 1894 case Builtin::BI__sync_or_and_fetch_2: 1895 case Builtin::BI__sync_or_and_fetch_4: 1896 case Builtin::BI__sync_or_and_fetch_8: 1897 case Builtin::BI__sync_or_and_fetch_16: 1898 case Builtin::BI__sync_xor_and_fetch: 1899 case Builtin::BI__sync_xor_and_fetch_1: 1900 case Builtin::BI__sync_xor_and_fetch_2: 1901 case Builtin::BI__sync_xor_and_fetch_4: 1902 case Builtin::BI__sync_xor_and_fetch_8: 1903 case Builtin::BI__sync_xor_and_fetch_16: 1904 case Builtin::BI__sync_nand_and_fetch: 1905 case Builtin::BI__sync_nand_and_fetch_1: 1906 case Builtin::BI__sync_nand_and_fetch_2: 1907 case Builtin::BI__sync_nand_and_fetch_4: 1908 case Builtin::BI__sync_nand_and_fetch_8: 1909 case Builtin::BI__sync_nand_and_fetch_16: 1910 case Builtin::BI__sync_val_compare_and_swap: 1911 case Builtin::BI__sync_val_compare_and_swap_1: 1912 case Builtin::BI__sync_val_compare_and_swap_2: 1913 case Builtin::BI__sync_val_compare_and_swap_4: 1914 case Builtin::BI__sync_val_compare_and_swap_8: 1915 case Builtin::BI__sync_val_compare_and_swap_16: 1916 case Builtin::BI__sync_bool_compare_and_swap: 1917 case Builtin::BI__sync_bool_compare_and_swap_1: 1918 case Builtin::BI__sync_bool_compare_and_swap_2: 1919 case Builtin::BI__sync_bool_compare_and_swap_4: 1920 case Builtin::BI__sync_bool_compare_and_swap_8: 1921 case Builtin::BI__sync_bool_compare_and_swap_16: 1922 case Builtin::BI__sync_lock_test_and_set: 1923 case Builtin::BI__sync_lock_test_and_set_1: 1924 case Builtin::BI__sync_lock_test_and_set_2: 1925 case Builtin::BI__sync_lock_test_and_set_4: 1926 case Builtin::BI__sync_lock_test_and_set_8: 1927 case Builtin::BI__sync_lock_test_and_set_16: 1928 case Builtin::BI__sync_lock_release: 1929 case Builtin::BI__sync_lock_release_1: 1930 case Builtin::BI__sync_lock_release_2: 1931 case Builtin::BI__sync_lock_release_4: 1932 case Builtin::BI__sync_lock_release_8: 1933 case Builtin::BI__sync_lock_release_16: 1934 case Builtin::BI__sync_swap: 1935 case Builtin::BI__sync_swap_1: 1936 case Builtin::BI__sync_swap_2: 1937 case Builtin::BI__sync_swap_4: 1938 case Builtin::BI__sync_swap_8: 1939 case Builtin::BI__sync_swap_16: 1940 return SemaBuiltinAtomicOverloaded(TheCallResult); 1941 case Builtin::BI__sync_synchronize: 1942 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 1943 << TheCall->getCallee()->getSourceRange(); 1944 break; 1945 case Builtin::BI__builtin_nontemporal_load: 1946 case Builtin::BI__builtin_nontemporal_store: 1947 return SemaBuiltinNontemporalOverloaded(TheCallResult); 1948 case Builtin::BI__builtin_memcpy_inline: { 1949 if (checkArgCount(*this, TheCall, 3)) 1950 return ExprError(); 1951 auto ArgArrayConversionFailed = [&](unsigned Arg) { 1952 ExprResult ArgExpr = 1953 DefaultFunctionArrayLvalueConversion(TheCall->getArg(Arg)); 1954 if (ArgExpr.isInvalid()) 1955 return true; 1956 TheCall->setArg(Arg, ArgExpr.get()); 1957 return false; 1958 }; 1959 1960 if (ArgArrayConversionFailed(0) || ArgArrayConversionFailed(1)) 1961 return true; 1962 clang::Expr *SizeOp = TheCall->getArg(2); 1963 // We warn about copying to or from `nullptr` pointers when `size` is 1964 // greater than 0. When `size` is value dependent we cannot evaluate its 1965 // value so we bail out. 1966 if (SizeOp->isValueDependent()) 1967 break; 1968 if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) { 1969 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 1970 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc()); 1971 } 1972 break; 1973 } 1974 #define BUILTIN(ID, TYPE, ATTRS) 1975 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 1976 case Builtin::BI##ID: \ 1977 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 1978 #include "clang/Basic/Builtins.def" 1979 case Builtin::BI__annotation: 1980 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 1981 return ExprError(); 1982 break; 1983 case Builtin::BI__builtin_annotation: 1984 if (SemaBuiltinAnnotation(*this, TheCall)) 1985 return ExprError(); 1986 break; 1987 case Builtin::BI__builtin_addressof: 1988 if (SemaBuiltinAddressof(*this, TheCall)) 1989 return ExprError(); 1990 break; 1991 case Builtin::BI__builtin_function_start: 1992 if (SemaBuiltinFunctionStart(*this, TheCall)) 1993 return ExprError(); 1994 break; 1995 case Builtin::BI__builtin_is_aligned: 1996 case Builtin::BI__builtin_align_up: 1997 case Builtin::BI__builtin_align_down: 1998 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID)) 1999 return ExprError(); 2000 break; 2001 case Builtin::BI__builtin_add_overflow: 2002 case Builtin::BI__builtin_sub_overflow: 2003 case Builtin::BI__builtin_mul_overflow: 2004 if (SemaBuiltinOverflow(*this, TheCall, BuiltinID)) 2005 return ExprError(); 2006 break; 2007 case Builtin::BI__builtin_operator_new: 2008 case Builtin::BI__builtin_operator_delete: { 2009 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 2010 ExprResult Res = 2011 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 2012 if (Res.isInvalid()) 2013 CorrectDelayedTyposInExpr(TheCallResult.get()); 2014 return Res; 2015 } 2016 case Builtin::BI__builtin_dump_struct: { 2017 // We first want to ensure we are called with 2 arguments 2018 if (checkArgCount(*this, TheCall, 2)) 2019 return ExprError(); 2020 // Ensure that the first argument is of type 'struct XX *' 2021 const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts(); 2022 const QualType PtrArgType = PtrArg->getType(); 2023 if (!PtrArgType->isPointerType() || 2024 !PtrArgType->getPointeeType()->isRecordType()) { 2025 Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 2026 << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType 2027 << "structure pointer"; 2028 return ExprError(); 2029 } 2030 2031 // Ensure that the second argument is of type 'FunctionType' 2032 const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts(); 2033 const QualType FnPtrArgType = FnPtrArg->getType(); 2034 if (!FnPtrArgType->isPointerType()) { 2035 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 2036 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 2037 << FnPtrArgType << "'int (*)(const char *, ...)'"; 2038 return ExprError(); 2039 } 2040 2041 const auto *FuncType = 2042 FnPtrArgType->getPointeeType()->getAs<FunctionType>(); 2043 2044 if (!FuncType) { 2045 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 2046 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 2047 << FnPtrArgType << "'int (*)(const char *, ...)'"; 2048 return ExprError(); 2049 } 2050 2051 if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) { 2052 if (!FT->getNumParams()) { 2053 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 2054 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 2055 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 2056 return ExprError(); 2057 } 2058 QualType PT = FT->getParamType(0); 2059 if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy || 2060 !PT->isPointerType() || !PT->getPointeeType()->isCharType() || 2061 !PT->getPointeeType().isConstQualified()) { 2062 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 2063 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 2064 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 2065 return ExprError(); 2066 } 2067 } 2068 2069 TheCall->setType(Context.IntTy); 2070 break; 2071 } 2072 case Builtin::BI__builtin_expect_with_probability: { 2073 // We first want to ensure we are called with 3 arguments 2074 if (checkArgCount(*this, TheCall, 3)) 2075 return ExprError(); 2076 // then check probability is constant float in range [0.0, 1.0] 2077 const Expr *ProbArg = TheCall->getArg(2); 2078 SmallVector<PartialDiagnosticAt, 8> Notes; 2079 Expr::EvalResult Eval; 2080 Eval.Diag = &Notes; 2081 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) || 2082 !Eval.Val.isFloat()) { 2083 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float) 2084 << ProbArg->getSourceRange(); 2085 for (const PartialDiagnosticAt &PDiag : Notes) 2086 Diag(PDiag.first, PDiag.second); 2087 return ExprError(); 2088 } 2089 llvm::APFloat Probability = Eval.Val.getFloat(); 2090 bool LoseInfo = false; 2091 Probability.convert(llvm::APFloat::IEEEdouble(), 2092 llvm::RoundingMode::Dynamic, &LoseInfo); 2093 if (!(Probability >= llvm::APFloat(0.0) && 2094 Probability <= llvm::APFloat(1.0))) { 2095 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range) 2096 << ProbArg->getSourceRange(); 2097 return ExprError(); 2098 } 2099 break; 2100 } 2101 case Builtin::BI__builtin_preserve_access_index: 2102 if (SemaBuiltinPreserveAI(*this, TheCall)) 2103 return ExprError(); 2104 break; 2105 case Builtin::BI__builtin_call_with_static_chain: 2106 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 2107 return ExprError(); 2108 break; 2109 case Builtin::BI__exception_code: 2110 case Builtin::BI_exception_code: 2111 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 2112 diag::err_seh___except_block)) 2113 return ExprError(); 2114 break; 2115 case Builtin::BI__exception_info: 2116 case Builtin::BI_exception_info: 2117 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 2118 diag::err_seh___except_filter)) 2119 return ExprError(); 2120 break; 2121 case Builtin::BI__GetExceptionInfo: 2122 if (checkArgCount(*this, TheCall, 1)) 2123 return ExprError(); 2124 2125 if (CheckCXXThrowOperand( 2126 TheCall->getBeginLoc(), 2127 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 2128 TheCall)) 2129 return ExprError(); 2130 2131 TheCall->setType(Context.VoidPtrTy); 2132 break; 2133 case Builtin::BIaddressof: 2134 case Builtin::BI__addressof: 2135 case Builtin::BIforward: 2136 case Builtin::BImove: 2137 case Builtin::BImove_if_noexcept: 2138 case Builtin::BIas_const: { 2139 // These are all expected to be of the form 2140 // T &/&&/* f(U &/&&) 2141 // where T and U only differ in qualification. 2142 if (checkArgCount(*this, TheCall, 1)) 2143 return ExprError(); 2144 QualType Param = FDecl->getParamDecl(0)->getType(); 2145 QualType Result = FDecl->getReturnType(); 2146 bool ReturnsPointer = BuiltinID == Builtin::BIaddressof || 2147 BuiltinID == Builtin::BI__addressof; 2148 if (!(Param->isReferenceType() && 2149 (ReturnsPointer ? Result->isPointerType() 2150 : Result->isReferenceType()) && 2151 Context.hasSameUnqualifiedType(Param->getPointeeType(), 2152 Result->getPointeeType()))) { 2153 Diag(TheCall->getBeginLoc(), diag::err_builtin_move_forward_unsupported) 2154 << FDecl; 2155 return ExprError(); 2156 } 2157 break; 2158 } 2159 // OpenCL v2.0, s6.13.16 - Pipe functions 2160 case Builtin::BIread_pipe: 2161 case Builtin::BIwrite_pipe: 2162 // Since those two functions are declared with var args, we need a semantic 2163 // check for the argument. 2164 if (SemaBuiltinRWPipe(*this, TheCall)) 2165 return ExprError(); 2166 break; 2167 case Builtin::BIreserve_read_pipe: 2168 case Builtin::BIreserve_write_pipe: 2169 case Builtin::BIwork_group_reserve_read_pipe: 2170 case Builtin::BIwork_group_reserve_write_pipe: 2171 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 2172 return ExprError(); 2173 break; 2174 case Builtin::BIsub_group_reserve_read_pipe: 2175 case Builtin::BIsub_group_reserve_write_pipe: 2176 if (checkOpenCLSubgroupExt(*this, TheCall) || 2177 SemaBuiltinReserveRWPipe(*this, TheCall)) 2178 return ExprError(); 2179 break; 2180 case Builtin::BIcommit_read_pipe: 2181 case Builtin::BIcommit_write_pipe: 2182 case Builtin::BIwork_group_commit_read_pipe: 2183 case Builtin::BIwork_group_commit_write_pipe: 2184 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 2185 return ExprError(); 2186 break; 2187 case Builtin::BIsub_group_commit_read_pipe: 2188 case Builtin::BIsub_group_commit_write_pipe: 2189 if (checkOpenCLSubgroupExt(*this, TheCall) || 2190 SemaBuiltinCommitRWPipe(*this, TheCall)) 2191 return ExprError(); 2192 break; 2193 case Builtin::BIget_pipe_num_packets: 2194 case Builtin::BIget_pipe_max_packets: 2195 if (SemaBuiltinPipePackets(*this, TheCall)) 2196 return ExprError(); 2197 break; 2198 case Builtin::BIto_global: 2199 case Builtin::BIto_local: 2200 case Builtin::BIto_private: 2201 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 2202 return ExprError(); 2203 break; 2204 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 2205 case Builtin::BIenqueue_kernel: 2206 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 2207 return ExprError(); 2208 break; 2209 case Builtin::BIget_kernel_work_group_size: 2210 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 2211 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 2212 return ExprError(); 2213 break; 2214 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 2215 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 2216 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 2217 return ExprError(); 2218 break; 2219 case Builtin::BI__builtin_os_log_format: 2220 Cleanup.setExprNeedsCleanups(true); 2221 LLVM_FALLTHROUGH; 2222 case Builtin::BI__builtin_os_log_format_buffer_size: 2223 if (SemaBuiltinOSLogFormat(TheCall)) 2224 return ExprError(); 2225 break; 2226 case Builtin::BI__builtin_frame_address: 2227 case Builtin::BI__builtin_return_address: { 2228 if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF)) 2229 return ExprError(); 2230 2231 // -Wframe-address warning if non-zero passed to builtin 2232 // return/frame address. 2233 Expr::EvalResult Result; 2234 if (!TheCall->getArg(0)->isValueDependent() && 2235 TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) && 2236 Result.Val.getInt() != 0) 2237 Diag(TheCall->getBeginLoc(), diag::warn_frame_address) 2238 << ((BuiltinID == Builtin::BI__builtin_return_address) 2239 ? "__builtin_return_address" 2240 : "__builtin_frame_address") 2241 << TheCall->getSourceRange(); 2242 break; 2243 } 2244 2245 // __builtin_elementwise_abs restricts the element type to signed integers or 2246 // floating point types only. 2247 case Builtin::BI__builtin_elementwise_abs: { 2248 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2249 return ExprError(); 2250 2251 QualType ArgTy = TheCall->getArg(0)->getType(); 2252 QualType EltTy = ArgTy; 2253 2254 if (auto *VecTy = EltTy->getAs<VectorType>()) 2255 EltTy = VecTy->getElementType(); 2256 if (EltTy->isUnsignedIntegerType()) { 2257 Diag(TheCall->getArg(0)->getBeginLoc(), 2258 diag::err_builtin_invalid_arg_type) 2259 << 1 << /* signed integer or float ty*/ 3 << ArgTy; 2260 return ExprError(); 2261 } 2262 break; 2263 } 2264 2265 // These builtins restrict the element type to floating point 2266 // types only. 2267 case Builtin::BI__builtin_elementwise_ceil: 2268 case Builtin::BI__builtin_elementwise_floor: 2269 case Builtin::BI__builtin_elementwise_roundeven: 2270 case Builtin::BI__builtin_elementwise_trunc: { 2271 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2272 return ExprError(); 2273 2274 QualType ArgTy = TheCall->getArg(0)->getType(); 2275 QualType EltTy = ArgTy; 2276 2277 if (auto *VecTy = EltTy->getAs<VectorType>()) 2278 EltTy = VecTy->getElementType(); 2279 if (!EltTy->isFloatingType()) { 2280 Diag(TheCall->getArg(0)->getBeginLoc(), 2281 diag::err_builtin_invalid_arg_type) 2282 << 1 << /* float ty*/ 5 << ArgTy; 2283 2284 return ExprError(); 2285 } 2286 break; 2287 } 2288 2289 // These builtins restrict the element type to integer 2290 // types only. 2291 case Builtin::BI__builtin_elementwise_add_sat: 2292 case Builtin::BI__builtin_elementwise_sub_sat: { 2293 if (SemaBuiltinElementwiseMath(TheCall)) 2294 return ExprError(); 2295 2296 const Expr *Arg = TheCall->getArg(0); 2297 QualType ArgTy = Arg->getType(); 2298 QualType EltTy = ArgTy; 2299 2300 if (auto *VecTy = EltTy->getAs<VectorType>()) 2301 EltTy = VecTy->getElementType(); 2302 2303 if (!EltTy->isIntegerType()) { 2304 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2305 << 1 << /* integer ty */ 6 << ArgTy; 2306 return ExprError(); 2307 } 2308 break; 2309 } 2310 2311 case Builtin::BI__builtin_elementwise_min: 2312 case Builtin::BI__builtin_elementwise_max: 2313 if (SemaBuiltinElementwiseMath(TheCall)) 2314 return ExprError(); 2315 break; 2316 case Builtin::BI__builtin_reduce_max: 2317 case Builtin::BI__builtin_reduce_min: { 2318 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2319 return ExprError(); 2320 2321 const Expr *Arg = TheCall->getArg(0); 2322 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2323 if (!TyA) { 2324 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2325 << 1 << /* vector ty*/ 4 << Arg->getType(); 2326 return ExprError(); 2327 } 2328 2329 TheCall->setType(TyA->getElementType()); 2330 break; 2331 } 2332 2333 // These builtins support vectors of integers only. 2334 // TODO: ADD should support floating-point types. 2335 case Builtin::BI__builtin_reduce_add: 2336 case Builtin::BI__builtin_reduce_xor: 2337 case Builtin::BI__builtin_reduce_or: 2338 case Builtin::BI__builtin_reduce_and: { 2339 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2340 return ExprError(); 2341 2342 const Expr *Arg = TheCall->getArg(0); 2343 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2344 if (!TyA || !TyA->getElementType()->isIntegerType()) { 2345 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2346 << 1 << /* vector of integers */ 6 << Arg->getType(); 2347 return ExprError(); 2348 } 2349 TheCall->setType(TyA->getElementType()); 2350 break; 2351 } 2352 2353 case Builtin::BI__builtin_matrix_transpose: 2354 return SemaBuiltinMatrixTranspose(TheCall, TheCallResult); 2355 2356 case Builtin::BI__builtin_matrix_column_major_load: 2357 return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult); 2358 2359 case Builtin::BI__builtin_matrix_column_major_store: 2360 return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult); 2361 2362 case Builtin::BI__builtin_get_device_side_mangled_name: { 2363 auto Check = [](CallExpr *TheCall) { 2364 if (TheCall->getNumArgs() != 1) 2365 return false; 2366 auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts()); 2367 if (!DRE) 2368 return false; 2369 auto *D = DRE->getDecl(); 2370 if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D)) 2371 return false; 2372 return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() || 2373 D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>(); 2374 }; 2375 if (!Check(TheCall)) { 2376 Diag(TheCall->getBeginLoc(), 2377 diag::err_hip_invalid_args_builtin_mangled_name); 2378 return ExprError(); 2379 } 2380 } 2381 } 2382 2383 // Since the target specific builtins for each arch overlap, only check those 2384 // of the arch we are compiling for. 2385 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 2386 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) { 2387 assert(Context.getAuxTargetInfo() && 2388 "Aux Target Builtin, but not an aux target?"); 2389 2390 if (CheckTSBuiltinFunctionCall( 2391 *Context.getAuxTargetInfo(), 2392 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall)) 2393 return ExprError(); 2394 } else { 2395 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID, 2396 TheCall)) 2397 return ExprError(); 2398 } 2399 } 2400 2401 return TheCallResult; 2402 } 2403 2404 // Get the valid immediate range for the specified NEON type code. 2405 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 2406 NeonTypeFlags Type(t); 2407 int IsQuad = ForceQuad ? true : Type.isQuad(); 2408 switch (Type.getEltType()) { 2409 case NeonTypeFlags::Int8: 2410 case NeonTypeFlags::Poly8: 2411 return shift ? 7 : (8 << IsQuad) - 1; 2412 case NeonTypeFlags::Int16: 2413 case NeonTypeFlags::Poly16: 2414 return shift ? 15 : (4 << IsQuad) - 1; 2415 case NeonTypeFlags::Int32: 2416 return shift ? 31 : (2 << IsQuad) - 1; 2417 case NeonTypeFlags::Int64: 2418 case NeonTypeFlags::Poly64: 2419 return shift ? 63 : (1 << IsQuad) - 1; 2420 case NeonTypeFlags::Poly128: 2421 return shift ? 127 : (1 << IsQuad) - 1; 2422 case NeonTypeFlags::Float16: 2423 assert(!shift && "cannot shift float types!"); 2424 return (4 << IsQuad) - 1; 2425 case NeonTypeFlags::Float32: 2426 assert(!shift && "cannot shift float types!"); 2427 return (2 << IsQuad) - 1; 2428 case NeonTypeFlags::Float64: 2429 assert(!shift && "cannot shift float types!"); 2430 return (1 << IsQuad) - 1; 2431 case NeonTypeFlags::BFloat16: 2432 assert(!shift && "cannot shift float types!"); 2433 return (4 << IsQuad) - 1; 2434 } 2435 llvm_unreachable("Invalid NeonTypeFlag!"); 2436 } 2437 2438 /// getNeonEltType - Return the QualType corresponding to the elements of 2439 /// the vector type specified by the NeonTypeFlags. This is used to check 2440 /// the pointer arguments for Neon load/store intrinsics. 2441 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 2442 bool IsPolyUnsigned, bool IsInt64Long) { 2443 switch (Flags.getEltType()) { 2444 case NeonTypeFlags::Int8: 2445 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 2446 case NeonTypeFlags::Int16: 2447 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 2448 case NeonTypeFlags::Int32: 2449 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 2450 case NeonTypeFlags::Int64: 2451 if (IsInt64Long) 2452 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 2453 else 2454 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 2455 : Context.LongLongTy; 2456 case NeonTypeFlags::Poly8: 2457 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 2458 case NeonTypeFlags::Poly16: 2459 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 2460 case NeonTypeFlags::Poly64: 2461 if (IsInt64Long) 2462 return Context.UnsignedLongTy; 2463 else 2464 return Context.UnsignedLongLongTy; 2465 case NeonTypeFlags::Poly128: 2466 break; 2467 case NeonTypeFlags::Float16: 2468 return Context.HalfTy; 2469 case NeonTypeFlags::Float32: 2470 return Context.FloatTy; 2471 case NeonTypeFlags::Float64: 2472 return Context.DoubleTy; 2473 case NeonTypeFlags::BFloat16: 2474 return Context.BFloat16Ty; 2475 } 2476 llvm_unreachable("Invalid NeonTypeFlag!"); 2477 } 2478 2479 bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2480 // Range check SVE intrinsics that take immediate values. 2481 SmallVector<std::tuple<int,int,int>, 3> ImmChecks; 2482 2483 switch (BuiltinID) { 2484 default: 2485 return false; 2486 #define GET_SVE_IMMEDIATE_CHECK 2487 #include "clang/Basic/arm_sve_sema_rangechecks.inc" 2488 #undef GET_SVE_IMMEDIATE_CHECK 2489 } 2490 2491 // Perform all the immediate checks for this builtin call. 2492 bool HasError = false; 2493 for (auto &I : ImmChecks) { 2494 int ArgNum, CheckTy, ElementSizeInBits; 2495 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I; 2496 2497 typedef bool(*OptionSetCheckFnTy)(int64_t Value); 2498 2499 // Function that checks whether the operand (ArgNum) is an immediate 2500 // that is one of the predefined values. 2501 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm, 2502 int ErrDiag) -> bool { 2503 // We can't check the value of a dependent argument. 2504 Expr *Arg = TheCall->getArg(ArgNum); 2505 if (Arg->isTypeDependent() || Arg->isValueDependent()) 2506 return false; 2507 2508 // Check constant-ness first. 2509 llvm::APSInt Imm; 2510 if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm)) 2511 return true; 2512 2513 if (!CheckImm(Imm.getSExtValue())) 2514 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange(); 2515 return false; 2516 }; 2517 2518 switch ((SVETypeFlags::ImmCheckType)CheckTy) { 2519 case SVETypeFlags::ImmCheck0_31: 2520 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31)) 2521 HasError = true; 2522 break; 2523 case SVETypeFlags::ImmCheck0_13: 2524 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13)) 2525 HasError = true; 2526 break; 2527 case SVETypeFlags::ImmCheck1_16: 2528 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16)) 2529 HasError = true; 2530 break; 2531 case SVETypeFlags::ImmCheck0_7: 2532 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7)) 2533 HasError = true; 2534 break; 2535 case SVETypeFlags::ImmCheckExtract: 2536 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2537 (2048 / ElementSizeInBits) - 1)) 2538 HasError = true; 2539 break; 2540 case SVETypeFlags::ImmCheckShiftRight: 2541 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits)) 2542 HasError = true; 2543 break; 2544 case SVETypeFlags::ImmCheckShiftRightNarrow: 2545 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 2546 ElementSizeInBits / 2)) 2547 HasError = true; 2548 break; 2549 case SVETypeFlags::ImmCheckShiftLeft: 2550 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2551 ElementSizeInBits - 1)) 2552 HasError = true; 2553 break; 2554 case SVETypeFlags::ImmCheckLaneIndex: 2555 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2556 (128 / (1 * ElementSizeInBits)) - 1)) 2557 HasError = true; 2558 break; 2559 case SVETypeFlags::ImmCheckLaneIndexCompRotate: 2560 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2561 (128 / (2 * ElementSizeInBits)) - 1)) 2562 HasError = true; 2563 break; 2564 case SVETypeFlags::ImmCheckLaneIndexDot: 2565 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2566 (128 / (4 * ElementSizeInBits)) - 1)) 2567 HasError = true; 2568 break; 2569 case SVETypeFlags::ImmCheckComplexRot90_270: 2570 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; }, 2571 diag::err_rotation_argument_to_cadd)) 2572 HasError = true; 2573 break; 2574 case SVETypeFlags::ImmCheckComplexRotAll90: 2575 if (CheckImmediateInSet( 2576 [](int64_t V) { 2577 return V == 0 || V == 90 || V == 180 || V == 270; 2578 }, 2579 diag::err_rotation_argument_to_cmla)) 2580 HasError = true; 2581 break; 2582 case SVETypeFlags::ImmCheck0_1: 2583 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1)) 2584 HasError = true; 2585 break; 2586 case SVETypeFlags::ImmCheck0_2: 2587 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2)) 2588 HasError = true; 2589 break; 2590 case SVETypeFlags::ImmCheck0_3: 2591 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3)) 2592 HasError = true; 2593 break; 2594 } 2595 } 2596 2597 return HasError; 2598 } 2599 2600 bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI, 2601 unsigned BuiltinID, CallExpr *TheCall) { 2602 llvm::APSInt Result; 2603 uint64_t mask = 0; 2604 unsigned TV = 0; 2605 int PtrArgNum = -1; 2606 bool HasConstPtr = false; 2607 switch (BuiltinID) { 2608 #define GET_NEON_OVERLOAD_CHECK 2609 #include "clang/Basic/arm_neon.inc" 2610 #include "clang/Basic/arm_fp16.inc" 2611 #undef GET_NEON_OVERLOAD_CHECK 2612 } 2613 2614 // For NEON intrinsics which are overloaded on vector element type, validate 2615 // the immediate which specifies which variant to emit. 2616 unsigned ImmArg = TheCall->getNumArgs()-1; 2617 if (mask) { 2618 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 2619 return true; 2620 2621 TV = Result.getLimitedValue(64); 2622 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 2623 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 2624 << TheCall->getArg(ImmArg)->getSourceRange(); 2625 } 2626 2627 if (PtrArgNum >= 0) { 2628 // Check that pointer arguments have the specified type. 2629 Expr *Arg = TheCall->getArg(PtrArgNum); 2630 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 2631 Arg = ICE->getSubExpr(); 2632 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 2633 QualType RHSTy = RHS.get()->getType(); 2634 2635 llvm::Triple::ArchType Arch = TI.getTriple().getArch(); 2636 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 2637 Arch == llvm::Triple::aarch64_32 || 2638 Arch == llvm::Triple::aarch64_be; 2639 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong; 2640 QualType EltTy = 2641 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 2642 if (HasConstPtr) 2643 EltTy = EltTy.withConst(); 2644 QualType LHSTy = Context.getPointerType(EltTy); 2645 AssignConvertType ConvTy; 2646 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 2647 if (RHS.isInvalid()) 2648 return true; 2649 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 2650 RHS.get(), AA_Assigning)) 2651 return true; 2652 } 2653 2654 // For NEON intrinsics which take an immediate value as part of the 2655 // instruction, range check them here. 2656 unsigned i = 0, l = 0, u = 0; 2657 switch (BuiltinID) { 2658 default: 2659 return false; 2660 #define GET_NEON_IMMEDIATE_CHECK 2661 #include "clang/Basic/arm_neon.inc" 2662 #include "clang/Basic/arm_fp16.inc" 2663 #undef GET_NEON_IMMEDIATE_CHECK 2664 } 2665 2666 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2667 } 2668 2669 bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2670 switch (BuiltinID) { 2671 default: 2672 return false; 2673 #include "clang/Basic/arm_mve_builtin_sema.inc" 2674 } 2675 } 2676 2677 bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2678 CallExpr *TheCall) { 2679 bool Err = false; 2680 switch (BuiltinID) { 2681 default: 2682 return false; 2683 #include "clang/Basic/arm_cde_builtin_sema.inc" 2684 } 2685 2686 if (Err) 2687 return true; 2688 2689 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true); 2690 } 2691 2692 bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI, 2693 const Expr *CoprocArg, bool WantCDE) { 2694 if (isConstantEvaluated()) 2695 return false; 2696 2697 // We can't check the value of a dependent argument. 2698 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent()) 2699 return false; 2700 2701 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context); 2702 int64_t CoprocNo = CoprocNoAP.getExtValue(); 2703 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative"); 2704 2705 uint32_t CDECoprocMask = TI.getARMCDECoprocMask(); 2706 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo)); 2707 2708 if (IsCDECoproc != WantCDE) 2709 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc) 2710 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange(); 2711 2712 return false; 2713 } 2714 2715 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 2716 unsigned MaxWidth) { 2717 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 2718 BuiltinID == ARM::BI__builtin_arm_ldaex || 2719 BuiltinID == ARM::BI__builtin_arm_strex || 2720 BuiltinID == ARM::BI__builtin_arm_stlex || 2721 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2722 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2723 BuiltinID == AArch64::BI__builtin_arm_strex || 2724 BuiltinID == AArch64::BI__builtin_arm_stlex) && 2725 "unexpected ARM builtin"); 2726 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 2727 BuiltinID == ARM::BI__builtin_arm_ldaex || 2728 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2729 BuiltinID == AArch64::BI__builtin_arm_ldaex; 2730 2731 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 2732 2733 // Ensure that we have the proper number of arguments. 2734 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 2735 return true; 2736 2737 // Inspect the pointer argument of the atomic builtin. This should always be 2738 // a pointer type, whose element is an integral scalar or pointer type. 2739 // Because it is a pointer type, we don't have to worry about any implicit 2740 // casts here. 2741 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 2742 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 2743 if (PointerArgRes.isInvalid()) 2744 return true; 2745 PointerArg = PointerArgRes.get(); 2746 2747 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 2748 if (!pointerType) { 2749 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 2750 << PointerArg->getType() << PointerArg->getSourceRange(); 2751 return true; 2752 } 2753 2754 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 2755 // task is to insert the appropriate casts into the AST. First work out just 2756 // what the appropriate type is. 2757 QualType ValType = pointerType->getPointeeType(); 2758 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 2759 if (IsLdrex) 2760 AddrType.addConst(); 2761 2762 // Issue a warning if the cast is dodgy. 2763 CastKind CastNeeded = CK_NoOp; 2764 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 2765 CastNeeded = CK_BitCast; 2766 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 2767 << PointerArg->getType() << Context.getPointerType(AddrType) 2768 << AA_Passing << PointerArg->getSourceRange(); 2769 } 2770 2771 // Finally, do the cast and replace the argument with the corrected version. 2772 AddrType = Context.getPointerType(AddrType); 2773 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 2774 if (PointerArgRes.isInvalid()) 2775 return true; 2776 PointerArg = PointerArgRes.get(); 2777 2778 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 2779 2780 // In general, we allow ints, floats and pointers to be loaded and stored. 2781 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 2782 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 2783 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 2784 << PointerArg->getType() << PointerArg->getSourceRange(); 2785 return true; 2786 } 2787 2788 // But ARM doesn't have instructions to deal with 128-bit versions. 2789 if (Context.getTypeSize(ValType) > MaxWidth) { 2790 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 2791 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 2792 << PointerArg->getType() << PointerArg->getSourceRange(); 2793 return true; 2794 } 2795 2796 switch (ValType.getObjCLifetime()) { 2797 case Qualifiers::OCL_None: 2798 case Qualifiers::OCL_ExplicitNone: 2799 // okay 2800 break; 2801 2802 case Qualifiers::OCL_Weak: 2803 case Qualifiers::OCL_Strong: 2804 case Qualifiers::OCL_Autoreleasing: 2805 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 2806 << ValType << PointerArg->getSourceRange(); 2807 return true; 2808 } 2809 2810 if (IsLdrex) { 2811 TheCall->setType(ValType); 2812 return false; 2813 } 2814 2815 // Initialize the argument to be stored. 2816 ExprResult ValArg = TheCall->getArg(0); 2817 InitializedEntity Entity = InitializedEntity::InitializeParameter( 2818 Context, ValType, /*consume*/ false); 2819 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 2820 if (ValArg.isInvalid()) 2821 return true; 2822 TheCall->setArg(0, ValArg.get()); 2823 2824 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 2825 // but the custom checker bypasses all default analysis. 2826 TheCall->setType(Context.IntTy); 2827 return false; 2828 } 2829 2830 bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2831 CallExpr *TheCall) { 2832 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 2833 BuiltinID == ARM::BI__builtin_arm_ldaex || 2834 BuiltinID == ARM::BI__builtin_arm_strex || 2835 BuiltinID == ARM::BI__builtin_arm_stlex) { 2836 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 2837 } 2838 2839 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 2840 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 2841 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 2842 } 2843 2844 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 2845 BuiltinID == ARM::BI__builtin_arm_wsr64) 2846 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 2847 2848 if (BuiltinID == ARM::BI__builtin_arm_rsr || 2849 BuiltinID == ARM::BI__builtin_arm_rsrp || 2850 BuiltinID == ARM::BI__builtin_arm_wsr || 2851 BuiltinID == ARM::BI__builtin_arm_wsrp) 2852 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2853 2854 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2855 return true; 2856 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall)) 2857 return true; 2858 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2859 return true; 2860 2861 // For intrinsics which take an immediate value as part of the instruction, 2862 // range check them here. 2863 // FIXME: VFP Intrinsics should error if VFP not present. 2864 switch (BuiltinID) { 2865 default: return false; 2866 case ARM::BI__builtin_arm_ssat: 2867 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 2868 case ARM::BI__builtin_arm_usat: 2869 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 2870 case ARM::BI__builtin_arm_ssat16: 2871 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 2872 case ARM::BI__builtin_arm_usat16: 2873 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 2874 case ARM::BI__builtin_arm_vcvtr_f: 2875 case ARM::BI__builtin_arm_vcvtr_d: 2876 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 2877 case ARM::BI__builtin_arm_dmb: 2878 case ARM::BI__builtin_arm_dsb: 2879 case ARM::BI__builtin_arm_isb: 2880 case ARM::BI__builtin_arm_dbg: 2881 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 2882 case ARM::BI__builtin_arm_cdp: 2883 case ARM::BI__builtin_arm_cdp2: 2884 case ARM::BI__builtin_arm_mcr: 2885 case ARM::BI__builtin_arm_mcr2: 2886 case ARM::BI__builtin_arm_mrc: 2887 case ARM::BI__builtin_arm_mrc2: 2888 case ARM::BI__builtin_arm_mcrr: 2889 case ARM::BI__builtin_arm_mcrr2: 2890 case ARM::BI__builtin_arm_mrrc: 2891 case ARM::BI__builtin_arm_mrrc2: 2892 case ARM::BI__builtin_arm_ldc: 2893 case ARM::BI__builtin_arm_ldcl: 2894 case ARM::BI__builtin_arm_ldc2: 2895 case ARM::BI__builtin_arm_ldc2l: 2896 case ARM::BI__builtin_arm_stc: 2897 case ARM::BI__builtin_arm_stcl: 2898 case ARM::BI__builtin_arm_stc2: 2899 case ARM::BI__builtin_arm_stc2l: 2900 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) || 2901 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), 2902 /*WantCDE*/ false); 2903 } 2904 } 2905 2906 bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, 2907 unsigned BuiltinID, 2908 CallExpr *TheCall) { 2909 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 2910 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2911 BuiltinID == AArch64::BI__builtin_arm_strex || 2912 BuiltinID == AArch64::BI__builtin_arm_stlex) { 2913 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 2914 } 2915 2916 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 2917 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 2918 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || 2919 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 2920 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 2921 } 2922 2923 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 2924 BuiltinID == AArch64::BI__builtin_arm_wsr64) 2925 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2926 2927 // Memory Tagging Extensions (MTE) Intrinsics 2928 if (BuiltinID == AArch64::BI__builtin_arm_irg || 2929 BuiltinID == AArch64::BI__builtin_arm_addg || 2930 BuiltinID == AArch64::BI__builtin_arm_gmi || 2931 BuiltinID == AArch64::BI__builtin_arm_ldg || 2932 BuiltinID == AArch64::BI__builtin_arm_stg || 2933 BuiltinID == AArch64::BI__builtin_arm_subp) { 2934 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 2935 } 2936 2937 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 2938 BuiltinID == AArch64::BI__builtin_arm_rsrp || 2939 BuiltinID == AArch64::BI__builtin_arm_wsr || 2940 BuiltinID == AArch64::BI__builtin_arm_wsrp) 2941 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2942 2943 // Only check the valid encoding range. Any constant in this range would be 2944 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 2945 // an exception for incorrect registers. This matches MSVC behavior. 2946 if (BuiltinID == AArch64::BI_ReadStatusReg || 2947 BuiltinID == AArch64::BI_WriteStatusReg) 2948 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 2949 2950 if (BuiltinID == AArch64::BI__getReg) 2951 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 2952 2953 if (BuiltinID == AArch64::BI__break) 2954 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xffff); 2955 2956 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2957 return true; 2958 2959 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall)) 2960 return true; 2961 2962 // For intrinsics which take an immediate value as part of the instruction, 2963 // range check them here. 2964 unsigned i = 0, l = 0, u = 0; 2965 switch (BuiltinID) { 2966 default: return false; 2967 case AArch64::BI__builtin_arm_dmb: 2968 case AArch64::BI__builtin_arm_dsb: 2969 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 2970 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break; 2971 } 2972 2973 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2974 } 2975 2976 static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) { 2977 if (Arg->getType()->getAsPlaceholderType()) 2978 return false; 2979 2980 // The first argument needs to be a record field access. 2981 // If it is an array element access, we delay decision 2982 // to BPF backend to check whether the access is a 2983 // field access or not. 2984 return (Arg->IgnoreParens()->getObjectKind() == OK_BitField || 2985 isa<MemberExpr>(Arg->IgnoreParens()) || 2986 isa<ArraySubscriptExpr>(Arg->IgnoreParens())); 2987 } 2988 2989 static bool isEltOfVectorTy(ASTContext &Context, CallExpr *Call, Sema &S, 2990 QualType VectorTy, QualType EltTy) { 2991 QualType VectorEltTy = VectorTy->castAs<VectorType>()->getElementType(); 2992 if (!Context.hasSameType(VectorEltTy, EltTy)) { 2993 S.Diag(Call->getBeginLoc(), diag::err_typecheck_call_different_arg_types) 2994 << Call->getSourceRange() << VectorEltTy << EltTy; 2995 return false; 2996 } 2997 return true; 2998 } 2999 3000 static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) { 3001 QualType ArgType = Arg->getType(); 3002 if (ArgType->getAsPlaceholderType()) 3003 return false; 3004 3005 // for TYPE_EXISTENCE/TYPE_SIZEOF reloc type 3006 // format: 3007 // 1. __builtin_preserve_type_info(*(<type> *)0, flag); 3008 // 2. <type> var; 3009 // __builtin_preserve_type_info(var, flag); 3010 if (!isa<DeclRefExpr>(Arg->IgnoreParens()) && 3011 !isa<UnaryOperator>(Arg->IgnoreParens())) 3012 return false; 3013 3014 // Typedef type. 3015 if (ArgType->getAs<TypedefType>()) 3016 return true; 3017 3018 // Record type or Enum type. 3019 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 3020 if (const auto *RT = Ty->getAs<RecordType>()) { 3021 if (!RT->getDecl()->getDeclName().isEmpty()) 3022 return true; 3023 } else if (const auto *ET = Ty->getAs<EnumType>()) { 3024 if (!ET->getDecl()->getDeclName().isEmpty()) 3025 return true; 3026 } 3027 3028 return false; 3029 } 3030 3031 static bool isValidBPFPreserveEnumValueArg(Expr *Arg) { 3032 QualType ArgType = Arg->getType(); 3033 if (ArgType->getAsPlaceholderType()) 3034 return false; 3035 3036 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type 3037 // format: 3038 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>, 3039 // flag); 3040 const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens()); 3041 if (!UO) 3042 return false; 3043 3044 const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr()); 3045 if (!CE) 3046 return false; 3047 if (CE->getCastKind() != CK_IntegralToPointer && 3048 CE->getCastKind() != CK_NullToPointer) 3049 return false; 3050 3051 // The integer must be from an EnumConstantDecl. 3052 const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr()); 3053 if (!DR) 3054 return false; 3055 3056 const EnumConstantDecl *Enumerator = 3057 dyn_cast<EnumConstantDecl>(DR->getDecl()); 3058 if (!Enumerator) 3059 return false; 3060 3061 // The type must be EnumType. 3062 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 3063 const auto *ET = Ty->getAs<EnumType>(); 3064 if (!ET) 3065 return false; 3066 3067 // The enum value must be supported. 3068 return llvm::is_contained(ET->getDecl()->enumerators(), Enumerator); 3069 } 3070 3071 bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID, 3072 CallExpr *TheCall) { 3073 assert((BuiltinID == BPF::BI__builtin_preserve_field_info || 3074 BuiltinID == BPF::BI__builtin_btf_type_id || 3075 BuiltinID == BPF::BI__builtin_preserve_type_info || 3076 BuiltinID == BPF::BI__builtin_preserve_enum_value) && 3077 "unexpected BPF builtin"); 3078 3079 if (checkArgCount(*this, TheCall, 2)) 3080 return true; 3081 3082 // The second argument needs to be a constant int 3083 Expr *Arg = TheCall->getArg(1); 3084 Optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context); 3085 diag::kind kind; 3086 if (!Value) { 3087 if (BuiltinID == BPF::BI__builtin_preserve_field_info) 3088 kind = diag::err_preserve_field_info_not_const; 3089 else if (BuiltinID == BPF::BI__builtin_btf_type_id) 3090 kind = diag::err_btf_type_id_not_const; 3091 else if (BuiltinID == BPF::BI__builtin_preserve_type_info) 3092 kind = diag::err_preserve_type_info_not_const; 3093 else 3094 kind = diag::err_preserve_enum_value_not_const; 3095 Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange(); 3096 return true; 3097 } 3098 3099 // The first argument 3100 Arg = TheCall->getArg(0); 3101 bool InvalidArg = false; 3102 bool ReturnUnsignedInt = true; 3103 if (BuiltinID == BPF::BI__builtin_preserve_field_info) { 3104 if (!isValidBPFPreserveFieldInfoArg(Arg)) { 3105 InvalidArg = true; 3106 kind = diag::err_preserve_field_info_not_field; 3107 } 3108 } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) { 3109 if (!isValidBPFPreserveTypeInfoArg(Arg)) { 3110 InvalidArg = true; 3111 kind = diag::err_preserve_type_info_invalid; 3112 } 3113 } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) { 3114 if (!isValidBPFPreserveEnumValueArg(Arg)) { 3115 InvalidArg = true; 3116 kind = diag::err_preserve_enum_value_invalid; 3117 } 3118 ReturnUnsignedInt = false; 3119 } else if (BuiltinID == BPF::BI__builtin_btf_type_id) { 3120 ReturnUnsignedInt = false; 3121 } 3122 3123 if (InvalidArg) { 3124 Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange(); 3125 return true; 3126 } 3127 3128 if (ReturnUnsignedInt) 3129 TheCall->setType(Context.UnsignedIntTy); 3130 else 3131 TheCall->setType(Context.UnsignedLongTy); 3132 return false; 3133 } 3134 3135 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3136 struct ArgInfo { 3137 uint8_t OpNum; 3138 bool IsSigned; 3139 uint8_t BitWidth; 3140 uint8_t Align; 3141 }; 3142 struct BuiltinInfo { 3143 unsigned BuiltinID; 3144 ArgInfo Infos[2]; 3145 }; 3146 3147 static BuiltinInfo Infos[] = { 3148 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 3149 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 3150 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 3151 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} }, 3152 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 3153 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 3154 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 3155 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 3156 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 3157 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 3158 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 3159 3160 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 3161 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 3162 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 3163 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 3164 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 3165 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 3166 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 3167 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 3168 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 3169 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 3170 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 3171 3172 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 3173 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 3174 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 3175 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 3176 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 3177 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 3178 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 3179 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 3180 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 3181 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 3182 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 3183 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 3184 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 3185 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 3186 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 3187 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 3188 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 3189 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 3190 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 3191 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 3192 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 3193 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 3194 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 3195 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 3196 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 3197 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 3198 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 3199 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 3200 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 3201 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 3202 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 3203 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 3204 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 3205 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 3206 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 3207 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 3208 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 3209 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 3210 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 3211 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 3212 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 3213 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 3214 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 3215 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 3216 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 3217 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 3218 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 3219 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 3220 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 3221 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 3222 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 3223 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 3224 {{ 1, false, 6, 0 }} }, 3225 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 3226 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 3227 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 3228 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 3229 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 3230 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 3231 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 3232 {{ 1, false, 5, 0 }} }, 3233 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 3234 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 3235 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 3236 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 3237 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 3238 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 3239 { 2, false, 5, 0 }} }, 3240 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 3241 { 2, false, 6, 0 }} }, 3242 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 3243 { 3, false, 5, 0 }} }, 3244 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 3245 { 3, false, 6, 0 }} }, 3246 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 3247 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 3248 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 3249 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 3250 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 3251 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 3252 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 3253 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 3254 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 3255 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 3256 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 3257 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 3258 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 3259 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 3260 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 3261 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 3262 {{ 2, false, 4, 0 }, 3263 { 3, false, 5, 0 }} }, 3264 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 3265 {{ 2, false, 4, 0 }, 3266 { 3, false, 5, 0 }} }, 3267 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 3268 {{ 2, false, 4, 0 }, 3269 { 3, false, 5, 0 }} }, 3270 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 3271 {{ 2, false, 4, 0 }, 3272 { 3, false, 5, 0 }} }, 3273 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 3274 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 3275 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 3276 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 3277 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 3278 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 3279 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 3280 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 3281 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 3282 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 3283 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 3284 { 2, false, 5, 0 }} }, 3285 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 3286 { 2, false, 6, 0 }} }, 3287 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 3288 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 3289 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 3290 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 3291 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 3292 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 3293 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 3294 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 3295 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 3296 {{ 1, false, 4, 0 }} }, 3297 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 3298 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 3299 {{ 1, false, 4, 0 }} }, 3300 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 3301 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 3302 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 3303 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 3304 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 3305 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 3306 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 3307 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 3308 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 3309 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 3310 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 3311 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 3312 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 3313 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 3314 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 3315 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 3316 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 3317 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 3318 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 3319 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 3320 {{ 3, false, 1, 0 }} }, 3321 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 3322 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 3323 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 3324 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 3325 {{ 3, false, 1, 0 }} }, 3326 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 3327 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 3328 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 3329 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 3330 {{ 3, false, 1, 0 }} }, 3331 }; 3332 3333 // Use a dynamically initialized static to sort the table exactly once on 3334 // first run. 3335 static const bool SortOnce = 3336 (llvm::sort(Infos, 3337 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 3338 return LHS.BuiltinID < RHS.BuiltinID; 3339 }), 3340 true); 3341 (void)SortOnce; 3342 3343 const BuiltinInfo *F = llvm::partition_point( 3344 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; }); 3345 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 3346 return false; 3347 3348 bool Error = false; 3349 3350 for (const ArgInfo &A : F->Infos) { 3351 // Ignore empty ArgInfo elements. 3352 if (A.BitWidth == 0) 3353 continue; 3354 3355 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 3356 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 3357 if (!A.Align) { 3358 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3359 } else { 3360 unsigned M = 1 << A.Align; 3361 Min *= M; 3362 Max *= M; 3363 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3364 Error |= SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 3365 } 3366 } 3367 return Error; 3368 } 3369 3370 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 3371 CallExpr *TheCall) { 3372 return CheckHexagonBuiltinArgument(BuiltinID, TheCall); 3373 } 3374 3375 bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI, 3376 unsigned BuiltinID, CallExpr *TheCall) { 3377 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) || 3378 CheckMipsBuiltinArgument(BuiltinID, TheCall); 3379 } 3380 3381 bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, 3382 CallExpr *TheCall) { 3383 3384 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID && 3385 BuiltinID <= Mips::BI__builtin_mips_lwx) { 3386 if (!TI.hasFeature("dsp")) 3387 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp); 3388 } 3389 3390 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID && 3391 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) { 3392 if (!TI.hasFeature("dspr2")) 3393 return Diag(TheCall->getBeginLoc(), 3394 diag::err_mips_builtin_requires_dspr2); 3395 } 3396 3397 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID && 3398 BuiltinID <= Mips::BI__builtin_msa_xori_b) { 3399 if (!TI.hasFeature("msa")) 3400 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa); 3401 } 3402 3403 return false; 3404 } 3405 3406 // CheckMipsBuiltinArgument - Checks the constant value passed to the 3407 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The 3408 // ordering for DSP is unspecified. MSA is ordered by the data format used 3409 // by the underlying instruction i.e., df/m, df/n and then by size. 3410 // 3411 // FIXME: The size tests here should instead be tablegen'd along with the 3412 // definitions from include/clang/Basic/BuiltinsMips.def. 3413 // FIXME: GCC is strict on signedness for some of these intrinsics, we should 3414 // be too. 3415 bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3416 unsigned i = 0, l = 0, u = 0, m = 0; 3417 switch (BuiltinID) { 3418 default: return false; 3419 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 3420 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 3421 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 3422 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 3423 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 3424 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 3425 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 3426 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the 3427 // df/m field. 3428 // These intrinsics take an unsigned 3 bit immediate. 3429 case Mips::BI__builtin_msa_bclri_b: 3430 case Mips::BI__builtin_msa_bnegi_b: 3431 case Mips::BI__builtin_msa_bseti_b: 3432 case Mips::BI__builtin_msa_sat_s_b: 3433 case Mips::BI__builtin_msa_sat_u_b: 3434 case Mips::BI__builtin_msa_slli_b: 3435 case Mips::BI__builtin_msa_srai_b: 3436 case Mips::BI__builtin_msa_srari_b: 3437 case Mips::BI__builtin_msa_srli_b: 3438 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 3439 case Mips::BI__builtin_msa_binsli_b: 3440 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 3441 // These intrinsics take an unsigned 4 bit immediate. 3442 case Mips::BI__builtin_msa_bclri_h: 3443 case Mips::BI__builtin_msa_bnegi_h: 3444 case Mips::BI__builtin_msa_bseti_h: 3445 case Mips::BI__builtin_msa_sat_s_h: 3446 case Mips::BI__builtin_msa_sat_u_h: 3447 case Mips::BI__builtin_msa_slli_h: 3448 case Mips::BI__builtin_msa_srai_h: 3449 case Mips::BI__builtin_msa_srari_h: 3450 case Mips::BI__builtin_msa_srli_h: 3451 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 3452 case Mips::BI__builtin_msa_binsli_h: 3453 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 3454 // These intrinsics take an unsigned 5 bit immediate. 3455 // The first block of intrinsics actually have an unsigned 5 bit field, 3456 // not a df/n field. 3457 case Mips::BI__builtin_msa_cfcmsa: 3458 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; 3459 case Mips::BI__builtin_msa_clei_u_b: 3460 case Mips::BI__builtin_msa_clei_u_h: 3461 case Mips::BI__builtin_msa_clei_u_w: 3462 case Mips::BI__builtin_msa_clei_u_d: 3463 case Mips::BI__builtin_msa_clti_u_b: 3464 case Mips::BI__builtin_msa_clti_u_h: 3465 case Mips::BI__builtin_msa_clti_u_w: 3466 case Mips::BI__builtin_msa_clti_u_d: 3467 case Mips::BI__builtin_msa_maxi_u_b: 3468 case Mips::BI__builtin_msa_maxi_u_h: 3469 case Mips::BI__builtin_msa_maxi_u_w: 3470 case Mips::BI__builtin_msa_maxi_u_d: 3471 case Mips::BI__builtin_msa_mini_u_b: 3472 case Mips::BI__builtin_msa_mini_u_h: 3473 case Mips::BI__builtin_msa_mini_u_w: 3474 case Mips::BI__builtin_msa_mini_u_d: 3475 case Mips::BI__builtin_msa_addvi_b: 3476 case Mips::BI__builtin_msa_addvi_h: 3477 case Mips::BI__builtin_msa_addvi_w: 3478 case Mips::BI__builtin_msa_addvi_d: 3479 case Mips::BI__builtin_msa_bclri_w: 3480 case Mips::BI__builtin_msa_bnegi_w: 3481 case Mips::BI__builtin_msa_bseti_w: 3482 case Mips::BI__builtin_msa_sat_s_w: 3483 case Mips::BI__builtin_msa_sat_u_w: 3484 case Mips::BI__builtin_msa_slli_w: 3485 case Mips::BI__builtin_msa_srai_w: 3486 case Mips::BI__builtin_msa_srari_w: 3487 case Mips::BI__builtin_msa_srli_w: 3488 case Mips::BI__builtin_msa_srlri_w: 3489 case Mips::BI__builtin_msa_subvi_b: 3490 case Mips::BI__builtin_msa_subvi_h: 3491 case Mips::BI__builtin_msa_subvi_w: 3492 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 3493 case Mips::BI__builtin_msa_binsli_w: 3494 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 3495 // These intrinsics take an unsigned 6 bit immediate. 3496 case Mips::BI__builtin_msa_bclri_d: 3497 case Mips::BI__builtin_msa_bnegi_d: 3498 case Mips::BI__builtin_msa_bseti_d: 3499 case Mips::BI__builtin_msa_sat_s_d: 3500 case Mips::BI__builtin_msa_sat_u_d: 3501 case Mips::BI__builtin_msa_slli_d: 3502 case Mips::BI__builtin_msa_srai_d: 3503 case Mips::BI__builtin_msa_srari_d: 3504 case Mips::BI__builtin_msa_srli_d: 3505 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 3506 case Mips::BI__builtin_msa_binsli_d: 3507 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 3508 // These intrinsics take a signed 5 bit immediate. 3509 case Mips::BI__builtin_msa_ceqi_b: 3510 case Mips::BI__builtin_msa_ceqi_h: 3511 case Mips::BI__builtin_msa_ceqi_w: 3512 case Mips::BI__builtin_msa_ceqi_d: 3513 case Mips::BI__builtin_msa_clti_s_b: 3514 case Mips::BI__builtin_msa_clti_s_h: 3515 case Mips::BI__builtin_msa_clti_s_w: 3516 case Mips::BI__builtin_msa_clti_s_d: 3517 case Mips::BI__builtin_msa_clei_s_b: 3518 case Mips::BI__builtin_msa_clei_s_h: 3519 case Mips::BI__builtin_msa_clei_s_w: 3520 case Mips::BI__builtin_msa_clei_s_d: 3521 case Mips::BI__builtin_msa_maxi_s_b: 3522 case Mips::BI__builtin_msa_maxi_s_h: 3523 case Mips::BI__builtin_msa_maxi_s_w: 3524 case Mips::BI__builtin_msa_maxi_s_d: 3525 case Mips::BI__builtin_msa_mini_s_b: 3526 case Mips::BI__builtin_msa_mini_s_h: 3527 case Mips::BI__builtin_msa_mini_s_w: 3528 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 3529 // These intrinsics take an unsigned 8 bit immediate. 3530 case Mips::BI__builtin_msa_andi_b: 3531 case Mips::BI__builtin_msa_nori_b: 3532 case Mips::BI__builtin_msa_ori_b: 3533 case Mips::BI__builtin_msa_shf_b: 3534 case Mips::BI__builtin_msa_shf_h: 3535 case Mips::BI__builtin_msa_shf_w: 3536 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 3537 case Mips::BI__builtin_msa_bseli_b: 3538 case Mips::BI__builtin_msa_bmnzi_b: 3539 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 3540 // df/n format 3541 // These intrinsics take an unsigned 4 bit immediate. 3542 case Mips::BI__builtin_msa_copy_s_b: 3543 case Mips::BI__builtin_msa_copy_u_b: 3544 case Mips::BI__builtin_msa_insve_b: 3545 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 3546 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 3547 // These intrinsics take an unsigned 3 bit immediate. 3548 case Mips::BI__builtin_msa_copy_s_h: 3549 case Mips::BI__builtin_msa_copy_u_h: 3550 case Mips::BI__builtin_msa_insve_h: 3551 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 3552 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 3553 // These intrinsics take an unsigned 2 bit immediate. 3554 case Mips::BI__builtin_msa_copy_s_w: 3555 case Mips::BI__builtin_msa_copy_u_w: 3556 case Mips::BI__builtin_msa_insve_w: 3557 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 3558 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 3559 // These intrinsics take an unsigned 1 bit immediate. 3560 case Mips::BI__builtin_msa_copy_s_d: 3561 case Mips::BI__builtin_msa_copy_u_d: 3562 case Mips::BI__builtin_msa_insve_d: 3563 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 3564 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 3565 // Memory offsets and immediate loads. 3566 // These intrinsics take a signed 10 bit immediate. 3567 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 3568 case Mips::BI__builtin_msa_ldi_h: 3569 case Mips::BI__builtin_msa_ldi_w: 3570 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 3571 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 3572 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 3573 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 3574 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 3575 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break; 3576 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break; 3577 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 3578 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 3579 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 3580 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 3581 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break; 3582 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break; 3583 } 3584 3585 if (!m) 3586 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3587 3588 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 3589 SemaBuiltinConstantArgMultiple(TheCall, i, m); 3590 } 3591 3592 /// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str, 3593 /// advancing the pointer over the consumed characters. The decoded type is 3594 /// returned. If the decoded type represents a constant integer with a 3595 /// constraint on its value then Mask is set to that value. The type descriptors 3596 /// used in Str are specific to PPC MMA builtins and are documented in the file 3597 /// defining the PPC builtins. 3598 static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str, 3599 unsigned &Mask) { 3600 bool RequireICE = false; 3601 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 3602 switch (*Str++) { 3603 case 'V': 3604 return Context.getVectorType(Context.UnsignedCharTy, 16, 3605 VectorType::VectorKind::AltiVecVector); 3606 case 'i': { 3607 char *End; 3608 unsigned size = strtoul(Str, &End, 10); 3609 assert(End != Str && "Missing constant parameter constraint"); 3610 Str = End; 3611 Mask = size; 3612 return Context.IntTy; 3613 } 3614 case 'W': { 3615 char *End; 3616 unsigned size = strtoul(Str, &End, 10); 3617 assert(End != Str && "Missing PowerPC MMA type size"); 3618 Str = End; 3619 QualType Type; 3620 switch (size) { 3621 #define PPC_VECTOR_TYPE(typeName, Id, size) \ 3622 case size: Type = Context.Id##Ty; break; 3623 #include "clang/Basic/PPCTypes.def" 3624 default: llvm_unreachable("Invalid PowerPC MMA vector type"); 3625 } 3626 bool CheckVectorArgs = false; 3627 while (!CheckVectorArgs) { 3628 switch (*Str++) { 3629 case '*': 3630 Type = Context.getPointerType(Type); 3631 break; 3632 case 'C': 3633 Type = Type.withConst(); 3634 break; 3635 default: 3636 CheckVectorArgs = true; 3637 --Str; 3638 break; 3639 } 3640 } 3641 return Type; 3642 } 3643 default: 3644 return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true); 3645 } 3646 } 3647 3648 static bool isPPC_64Builtin(unsigned BuiltinID) { 3649 // These builtins only work on PPC 64bit targets. 3650 switch (BuiltinID) { 3651 case PPC::BI__builtin_divde: 3652 case PPC::BI__builtin_divdeu: 3653 case PPC::BI__builtin_bpermd: 3654 case PPC::BI__builtin_pdepd: 3655 case PPC::BI__builtin_pextd: 3656 case PPC::BI__builtin_ppc_ldarx: 3657 case PPC::BI__builtin_ppc_stdcx: 3658 case PPC::BI__builtin_ppc_tdw: 3659 case PPC::BI__builtin_ppc_trapd: 3660 case PPC::BI__builtin_ppc_cmpeqb: 3661 case PPC::BI__builtin_ppc_setb: 3662 case PPC::BI__builtin_ppc_mulhd: 3663 case PPC::BI__builtin_ppc_mulhdu: 3664 case PPC::BI__builtin_ppc_maddhd: 3665 case PPC::BI__builtin_ppc_maddhdu: 3666 case PPC::BI__builtin_ppc_maddld: 3667 case PPC::BI__builtin_ppc_load8r: 3668 case PPC::BI__builtin_ppc_store8r: 3669 case PPC::BI__builtin_ppc_insert_exp: 3670 case PPC::BI__builtin_ppc_extract_sig: 3671 case PPC::BI__builtin_ppc_addex: 3672 case PPC::BI__builtin_darn: 3673 case PPC::BI__builtin_darn_raw: 3674 case PPC::BI__builtin_ppc_compare_and_swaplp: 3675 case PPC::BI__builtin_ppc_fetch_and_addlp: 3676 case PPC::BI__builtin_ppc_fetch_and_andlp: 3677 case PPC::BI__builtin_ppc_fetch_and_orlp: 3678 case PPC::BI__builtin_ppc_fetch_and_swaplp: 3679 return true; 3680 } 3681 return false; 3682 } 3683 3684 static bool SemaFeatureCheck(Sema &S, CallExpr *TheCall, 3685 StringRef FeatureToCheck, unsigned DiagID, 3686 StringRef DiagArg = "") { 3687 if (S.Context.getTargetInfo().hasFeature(FeatureToCheck)) 3688 return false; 3689 3690 if (DiagArg.empty()) 3691 S.Diag(TheCall->getBeginLoc(), DiagID) << TheCall->getSourceRange(); 3692 else 3693 S.Diag(TheCall->getBeginLoc(), DiagID) 3694 << DiagArg << TheCall->getSourceRange(); 3695 3696 return true; 3697 } 3698 3699 /// Returns true if the argument consists of one contiguous run of 1s with any 3700 /// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so 3701 /// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not, 3702 /// since all 1s are not contiguous. 3703 bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) { 3704 llvm::APSInt Result; 3705 // We can't check the value of a dependent argument. 3706 Expr *Arg = TheCall->getArg(ArgNum); 3707 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3708 return false; 3709 3710 // Check constant-ness first. 3711 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3712 return true; 3713 3714 // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s. 3715 if (Result.isShiftedMask() || (~Result).isShiftedMask()) 3716 return false; 3717 3718 return Diag(TheCall->getBeginLoc(), 3719 diag::err_argument_not_contiguous_bit_field) 3720 << ArgNum << Arg->getSourceRange(); 3721 } 3722 3723 bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3724 CallExpr *TheCall) { 3725 unsigned i = 0, l = 0, u = 0; 3726 bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64; 3727 llvm::APSInt Result; 3728 3729 if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit) 3730 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 3731 << TheCall->getSourceRange(); 3732 3733 switch (BuiltinID) { 3734 default: return false; 3735 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 3736 case PPC::BI__builtin_altivec_crypto_vshasigmad: 3737 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3738 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3739 case PPC::BI__builtin_altivec_dss: 3740 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3); 3741 case PPC::BI__builtin_tbegin: 3742 case PPC::BI__builtin_tend: 3743 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1) || 3744 SemaFeatureCheck(*this, TheCall, "htm", 3745 diag::err_ppc_builtin_requires_htm); 3746 case PPC::BI__builtin_tsr: 3747 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 3748 SemaFeatureCheck(*this, TheCall, "htm", 3749 diag::err_ppc_builtin_requires_htm); 3750 case PPC::BI__builtin_tabortwc: 3751 case PPC::BI__builtin_tabortdc: 3752 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 3753 SemaFeatureCheck(*this, TheCall, "htm", 3754 diag::err_ppc_builtin_requires_htm); 3755 case PPC::BI__builtin_tabortwci: 3756 case PPC::BI__builtin_tabortdci: 3757 return SemaFeatureCheck(*this, TheCall, "htm", 3758 diag::err_ppc_builtin_requires_htm) || 3759 (SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 3760 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31)); 3761 case PPC::BI__builtin_tabort: 3762 case PPC::BI__builtin_tcheck: 3763 case PPC::BI__builtin_treclaim: 3764 case PPC::BI__builtin_trechkpt: 3765 case PPC::BI__builtin_tendall: 3766 case PPC::BI__builtin_tresume: 3767 case PPC::BI__builtin_tsuspend: 3768 case PPC::BI__builtin_get_texasr: 3769 case PPC::BI__builtin_get_texasru: 3770 case PPC::BI__builtin_get_tfhar: 3771 case PPC::BI__builtin_get_tfiar: 3772 case PPC::BI__builtin_set_texasr: 3773 case PPC::BI__builtin_set_texasru: 3774 case PPC::BI__builtin_set_tfhar: 3775 case PPC::BI__builtin_set_tfiar: 3776 case PPC::BI__builtin_ttest: 3777 return SemaFeatureCheck(*this, TheCall, "htm", 3778 diag::err_ppc_builtin_requires_htm); 3779 // According to GCC 'Basic PowerPC Built-in Functions Available on ISA 2.05', 3780 // __builtin_(un)pack_longdouble are available only if long double uses IBM 3781 // extended double representation. 3782 case PPC::BI__builtin_unpack_longdouble: 3783 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 1)) 3784 return true; 3785 LLVM_FALLTHROUGH; 3786 case PPC::BI__builtin_pack_longdouble: 3787 if (&TI.getLongDoubleFormat() != &llvm::APFloat::PPCDoubleDouble()) 3788 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_requires_abi) 3789 << "ibmlongdouble"; 3790 return false; 3791 case PPC::BI__builtin_altivec_dst: 3792 case PPC::BI__builtin_altivec_dstt: 3793 case PPC::BI__builtin_altivec_dstst: 3794 case PPC::BI__builtin_altivec_dststt: 3795 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 3796 case PPC::BI__builtin_vsx_xxpermdi: 3797 case PPC::BI__builtin_vsx_xxsldwi: 3798 return SemaBuiltinVSX(TheCall); 3799 case PPC::BI__builtin_divwe: 3800 case PPC::BI__builtin_divweu: 3801 case PPC::BI__builtin_divde: 3802 case PPC::BI__builtin_divdeu: 3803 return SemaFeatureCheck(*this, TheCall, "extdiv", 3804 diag::err_ppc_builtin_only_on_arch, "7"); 3805 case PPC::BI__builtin_bpermd: 3806 return SemaFeatureCheck(*this, TheCall, "bpermd", 3807 diag::err_ppc_builtin_only_on_arch, "7"); 3808 case PPC::BI__builtin_unpack_vector_int128: 3809 return SemaFeatureCheck(*this, TheCall, "vsx", 3810 diag::err_ppc_builtin_only_on_arch, "7") || 3811 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3812 case PPC::BI__builtin_pack_vector_int128: 3813 return SemaFeatureCheck(*this, TheCall, "vsx", 3814 diag::err_ppc_builtin_only_on_arch, "7"); 3815 case PPC::BI__builtin_pdepd: 3816 case PPC::BI__builtin_pextd: 3817 return SemaFeatureCheck(*this, TheCall, "isa-v31-instructions", 3818 diag::err_ppc_builtin_only_on_arch, "10"); 3819 case PPC::BI__builtin_altivec_vgnb: 3820 return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7); 3821 case PPC::BI__builtin_altivec_vec_replace_elt: 3822 case PPC::BI__builtin_altivec_vec_replace_unaligned: { 3823 QualType VecTy = TheCall->getArg(0)->getType(); 3824 QualType EltTy = TheCall->getArg(1)->getType(); 3825 unsigned Width = Context.getIntWidth(EltTy); 3826 return SemaBuiltinConstantArgRange(TheCall, 2, 0, Width == 32 ? 12 : 8) || 3827 !isEltOfVectorTy(Context, TheCall, *this, VecTy, EltTy); 3828 } 3829 case PPC::BI__builtin_vsx_xxeval: 3830 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255); 3831 case PPC::BI__builtin_altivec_vsldbi: 3832 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 3833 case PPC::BI__builtin_altivec_vsrdbi: 3834 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 3835 case PPC::BI__builtin_vsx_xxpermx: 3836 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7); 3837 case PPC::BI__builtin_ppc_tw: 3838 case PPC::BI__builtin_ppc_tdw: 3839 return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31); 3840 case PPC::BI__builtin_ppc_cmpeqb: 3841 case PPC::BI__builtin_ppc_setb: 3842 case PPC::BI__builtin_ppc_maddhd: 3843 case PPC::BI__builtin_ppc_maddhdu: 3844 case PPC::BI__builtin_ppc_maddld: 3845 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3846 diag::err_ppc_builtin_only_on_arch, "9"); 3847 case PPC::BI__builtin_ppc_cmprb: 3848 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3849 diag::err_ppc_builtin_only_on_arch, "9") || 3850 SemaBuiltinConstantArgRange(TheCall, 0, 0, 1); 3851 // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must 3852 // be a constant that represents a contiguous bit field. 3853 case PPC::BI__builtin_ppc_rlwnm: 3854 return SemaValueIsRunOfOnes(TheCall, 2); 3855 case PPC::BI__builtin_ppc_rlwimi: 3856 case PPC::BI__builtin_ppc_rldimi: 3857 return SemaBuiltinConstantArg(TheCall, 2, Result) || 3858 SemaValueIsRunOfOnes(TheCall, 3); 3859 case PPC::BI__builtin_ppc_extract_exp: 3860 case PPC::BI__builtin_ppc_extract_sig: 3861 case PPC::BI__builtin_ppc_insert_exp: 3862 return SemaFeatureCheck(*this, TheCall, "power9-vector", 3863 diag::err_ppc_builtin_only_on_arch, "9"); 3864 case PPC::BI__builtin_ppc_addex: { 3865 if (SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3866 diag::err_ppc_builtin_only_on_arch, "9") || 3867 SemaBuiltinConstantArgRange(TheCall, 2, 0, 3)) 3868 return true; 3869 // Output warning for reserved values 1 to 3. 3870 int ArgValue = 3871 TheCall->getArg(2)->getIntegerConstantExpr(Context)->getSExtValue(); 3872 if (ArgValue != 0) 3873 Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour) 3874 << ArgValue; 3875 return false; 3876 } 3877 case PPC::BI__builtin_ppc_mtfsb0: 3878 case PPC::BI__builtin_ppc_mtfsb1: 3879 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 3880 case PPC::BI__builtin_ppc_mtfsf: 3881 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 255); 3882 case PPC::BI__builtin_ppc_mtfsfi: 3883 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 3884 SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 3885 case PPC::BI__builtin_ppc_alignx: 3886 return SemaBuiltinConstantArgPower2(TheCall, 0); 3887 case PPC::BI__builtin_ppc_rdlam: 3888 return SemaValueIsRunOfOnes(TheCall, 2); 3889 case PPC::BI__builtin_ppc_icbt: 3890 case PPC::BI__builtin_ppc_sthcx: 3891 case PPC::BI__builtin_ppc_stbcx: 3892 case PPC::BI__builtin_ppc_lharx: 3893 case PPC::BI__builtin_ppc_lbarx: 3894 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 3895 diag::err_ppc_builtin_only_on_arch, "8"); 3896 case PPC::BI__builtin_vsx_ldrmb: 3897 case PPC::BI__builtin_vsx_strmb: 3898 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 3899 diag::err_ppc_builtin_only_on_arch, "8") || 3900 SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 3901 case PPC::BI__builtin_altivec_vcntmbb: 3902 case PPC::BI__builtin_altivec_vcntmbh: 3903 case PPC::BI__builtin_altivec_vcntmbw: 3904 case PPC::BI__builtin_altivec_vcntmbd: 3905 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3906 case PPC::BI__builtin_darn: 3907 case PPC::BI__builtin_darn_raw: 3908 case PPC::BI__builtin_darn_32: 3909 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3910 diag::err_ppc_builtin_only_on_arch, "9"); 3911 case PPC::BI__builtin_vsx_xxgenpcvbm: 3912 case PPC::BI__builtin_vsx_xxgenpcvhm: 3913 case PPC::BI__builtin_vsx_xxgenpcvwm: 3914 case PPC::BI__builtin_vsx_xxgenpcvdm: 3915 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3); 3916 case PPC::BI__builtin_ppc_compare_exp_uo: 3917 case PPC::BI__builtin_ppc_compare_exp_lt: 3918 case PPC::BI__builtin_ppc_compare_exp_gt: 3919 case PPC::BI__builtin_ppc_compare_exp_eq: 3920 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3921 diag::err_ppc_builtin_only_on_arch, "9") || 3922 SemaFeatureCheck(*this, TheCall, "vsx", 3923 diag::err_ppc_builtin_requires_vsx); 3924 case PPC::BI__builtin_ppc_test_data_class: { 3925 // Check if the first argument of the __builtin_ppc_test_data_class call is 3926 // valid. The argument must be either a 'float' or a 'double'. 3927 QualType ArgType = TheCall->getArg(0)->getType(); 3928 if (ArgType != QualType(Context.FloatTy) && 3929 ArgType != QualType(Context.DoubleTy)) 3930 return Diag(TheCall->getBeginLoc(), 3931 diag::err_ppc_invalid_test_data_class_type); 3932 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3933 diag::err_ppc_builtin_only_on_arch, "9") || 3934 SemaFeatureCheck(*this, TheCall, "vsx", 3935 diag::err_ppc_builtin_requires_vsx) || 3936 SemaBuiltinConstantArgRange(TheCall, 1, 0, 127); 3937 } 3938 case PPC::BI__builtin_ppc_maxfe: 3939 case PPC::BI__builtin_ppc_minfe: 3940 case PPC::BI__builtin_ppc_maxfl: 3941 case PPC::BI__builtin_ppc_minfl: 3942 case PPC::BI__builtin_ppc_maxfs: 3943 case PPC::BI__builtin_ppc_minfs: { 3944 if (Context.getTargetInfo().getTriple().isOSAIX() && 3945 (BuiltinID == PPC::BI__builtin_ppc_maxfe || 3946 BuiltinID == PPC::BI__builtin_ppc_minfe)) 3947 return Diag(TheCall->getBeginLoc(), diag::err_target_unsupported_type) 3948 << "builtin" << true << 128 << QualType(Context.LongDoubleTy) 3949 << false << Context.getTargetInfo().getTriple().str(); 3950 // Argument type should be exact. 3951 QualType ArgType = QualType(Context.LongDoubleTy); 3952 if (BuiltinID == PPC::BI__builtin_ppc_maxfl || 3953 BuiltinID == PPC::BI__builtin_ppc_minfl) 3954 ArgType = QualType(Context.DoubleTy); 3955 else if (BuiltinID == PPC::BI__builtin_ppc_maxfs || 3956 BuiltinID == PPC::BI__builtin_ppc_minfs) 3957 ArgType = QualType(Context.FloatTy); 3958 for (unsigned I = 0, E = TheCall->getNumArgs(); I < E; ++I) 3959 if (TheCall->getArg(I)->getType() != ArgType) 3960 return Diag(TheCall->getBeginLoc(), 3961 diag::err_typecheck_convert_incompatible) 3962 << TheCall->getArg(I)->getType() << ArgType << 1 << 0 << 0; 3963 return false; 3964 } 3965 case PPC::BI__builtin_ppc_load8r: 3966 case PPC::BI__builtin_ppc_store8r: 3967 return SemaFeatureCheck(*this, TheCall, "isa-v206-instructions", 3968 diag::err_ppc_builtin_only_on_arch, "7"); 3969 #define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \ 3970 case PPC::BI__builtin_##Name: \ 3971 return SemaBuiltinPPCMMACall(TheCall, BuiltinID, Types); 3972 #include "clang/Basic/BuiltinsPPC.def" 3973 } 3974 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3975 } 3976 3977 // Check if the given type is a non-pointer PPC MMA type. This function is used 3978 // in Sema to prevent invalid uses of restricted PPC MMA types. 3979 bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) { 3980 if (Type->isPointerType() || Type->isArrayType()) 3981 return false; 3982 3983 QualType CoreType = Type.getCanonicalType().getUnqualifiedType(); 3984 #define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty 3985 if (false 3986 #include "clang/Basic/PPCTypes.def" 3987 ) { 3988 Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type); 3989 return true; 3990 } 3991 return false; 3992 } 3993 3994 bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, 3995 CallExpr *TheCall) { 3996 // position of memory order and scope arguments in the builtin 3997 unsigned OrderIndex, ScopeIndex; 3998 switch (BuiltinID) { 3999 case AMDGPU::BI__builtin_amdgcn_atomic_inc32: 4000 case AMDGPU::BI__builtin_amdgcn_atomic_inc64: 4001 case AMDGPU::BI__builtin_amdgcn_atomic_dec32: 4002 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: 4003 OrderIndex = 2; 4004 ScopeIndex = 3; 4005 break; 4006 case AMDGPU::BI__builtin_amdgcn_fence: 4007 OrderIndex = 0; 4008 ScopeIndex = 1; 4009 break; 4010 default: 4011 return false; 4012 } 4013 4014 ExprResult Arg = TheCall->getArg(OrderIndex); 4015 auto ArgExpr = Arg.get(); 4016 Expr::EvalResult ArgResult; 4017 4018 if (!ArgExpr->EvaluateAsInt(ArgResult, Context)) 4019 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int) 4020 << ArgExpr->getType(); 4021 auto Ord = ArgResult.Val.getInt().getZExtValue(); 4022 4023 // Check validity of memory ordering as per C11 / C++11's memody model. 4024 // Only fence needs check. Atomic dec/inc allow all memory orders. 4025 if (!llvm::isValidAtomicOrderingCABI(Ord)) 4026 return Diag(ArgExpr->getBeginLoc(), 4027 diag::warn_atomic_op_has_invalid_memory_order) 4028 << ArgExpr->getSourceRange(); 4029 switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) { 4030 case llvm::AtomicOrderingCABI::relaxed: 4031 case llvm::AtomicOrderingCABI::consume: 4032 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence) 4033 return Diag(ArgExpr->getBeginLoc(), 4034 diag::warn_atomic_op_has_invalid_memory_order) 4035 << ArgExpr->getSourceRange(); 4036 break; 4037 case llvm::AtomicOrderingCABI::acquire: 4038 case llvm::AtomicOrderingCABI::release: 4039 case llvm::AtomicOrderingCABI::acq_rel: 4040 case llvm::AtomicOrderingCABI::seq_cst: 4041 break; 4042 } 4043 4044 Arg = TheCall->getArg(ScopeIndex); 4045 ArgExpr = Arg.get(); 4046 Expr::EvalResult ArgResult1; 4047 // Check that sync scope is a constant literal 4048 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context)) 4049 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal) 4050 << ArgExpr->getType(); 4051 4052 return false; 4053 } 4054 4055 bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) { 4056 llvm::APSInt Result; 4057 4058 // We can't check the value of a dependent argument. 4059 Expr *Arg = TheCall->getArg(ArgNum); 4060 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4061 return false; 4062 4063 // Check constant-ness first. 4064 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4065 return true; 4066 4067 int64_t Val = Result.getSExtValue(); 4068 if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7)) 4069 return false; 4070 4071 return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul) 4072 << Arg->getSourceRange(); 4073 } 4074 4075 static bool isRISCV32Builtin(unsigned BuiltinID) { 4076 // These builtins only work on riscv32 targets. 4077 switch (BuiltinID) { 4078 case RISCV::BI__builtin_riscv_zip_32: 4079 case RISCV::BI__builtin_riscv_unzip_32: 4080 case RISCV::BI__builtin_riscv_aes32dsi_32: 4081 case RISCV::BI__builtin_riscv_aes32dsmi_32: 4082 case RISCV::BI__builtin_riscv_aes32esi_32: 4083 case RISCV::BI__builtin_riscv_aes32esmi_32: 4084 case RISCV::BI__builtin_riscv_sha512sig0h_32: 4085 case RISCV::BI__builtin_riscv_sha512sig0l_32: 4086 case RISCV::BI__builtin_riscv_sha512sig1h_32: 4087 case RISCV::BI__builtin_riscv_sha512sig1l_32: 4088 case RISCV::BI__builtin_riscv_sha512sum0r_32: 4089 case RISCV::BI__builtin_riscv_sha512sum1r_32: 4090 return true; 4091 } 4092 4093 return false; 4094 } 4095 4096 bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, 4097 unsigned BuiltinID, 4098 CallExpr *TheCall) { 4099 // CodeGenFunction can also detect this, but this gives a better error 4100 // message. 4101 bool FeatureMissing = false; 4102 SmallVector<StringRef> ReqFeatures; 4103 StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID); 4104 Features.split(ReqFeatures, ','); 4105 4106 // Check for 32-bit only builtins on a 64-bit target. 4107 const llvm::Triple &TT = TI.getTriple(); 4108 if (TT.getArch() != llvm::Triple::riscv32 && isRISCV32Builtin(BuiltinID)) 4109 return Diag(TheCall->getCallee()->getBeginLoc(), 4110 diag::err_32_bit_builtin_64_bit_tgt); 4111 4112 // Check if each required feature is included 4113 for (StringRef F : ReqFeatures) { 4114 SmallVector<StringRef> ReqOpFeatures; 4115 F.split(ReqOpFeatures, '|'); 4116 bool HasFeature = false; 4117 for (StringRef OF : ReqOpFeatures) { 4118 if (TI.hasFeature(OF)) { 4119 HasFeature = true; 4120 continue; 4121 } 4122 } 4123 4124 if (!HasFeature) { 4125 std::string FeatureStrs; 4126 for (StringRef OF : ReqOpFeatures) { 4127 // If the feature is 64bit, alter the string so it will print better in 4128 // the diagnostic. 4129 if (OF == "64bit") 4130 OF = "RV64"; 4131 4132 // Convert features like "zbr" and "experimental-zbr" to "Zbr". 4133 OF.consume_front("experimental-"); 4134 std::string FeatureStr = OF.str(); 4135 FeatureStr[0] = std::toupper(FeatureStr[0]); 4136 // Combine strings. 4137 FeatureStrs += FeatureStrs == "" ? "" : ", "; 4138 FeatureStrs += "'"; 4139 FeatureStrs += FeatureStr; 4140 FeatureStrs += "'"; 4141 } 4142 // Error message 4143 FeatureMissing = true; 4144 Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension) 4145 << TheCall->getSourceRange() << StringRef(FeatureStrs); 4146 } 4147 } 4148 4149 if (FeatureMissing) 4150 return true; 4151 4152 switch (BuiltinID) { 4153 case RISCVVector::BI__builtin_rvv_vsetvli: 4154 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) || 4155 CheckRISCVLMUL(TheCall, 2); 4156 case RISCVVector::BI__builtin_rvv_vsetvlimax: 4157 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 4158 CheckRISCVLMUL(TheCall, 1); 4159 case RISCVVector::BI__builtin_rvv_vget_v: { 4160 ASTContext::BuiltinVectorTypeInfo ResVecInfo = 4161 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4162 TheCall->getType().getCanonicalType().getTypePtr())); 4163 ASTContext::BuiltinVectorTypeInfo VecInfo = 4164 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4165 TheCall->getArg(0)->getType().getCanonicalType().getTypePtr())); 4166 unsigned MaxIndex = 4167 (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors) / 4168 (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors); 4169 return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1); 4170 } 4171 case RISCVVector::BI__builtin_rvv_vset_v: { 4172 ASTContext::BuiltinVectorTypeInfo ResVecInfo = 4173 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4174 TheCall->getType().getCanonicalType().getTypePtr())); 4175 ASTContext::BuiltinVectorTypeInfo VecInfo = 4176 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4177 TheCall->getArg(2)->getType().getCanonicalType().getTypePtr())); 4178 unsigned MaxIndex = 4179 (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors) / 4180 (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors); 4181 return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1); 4182 } 4183 // Check if byteselect is in [0, 3] 4184 case RISCV::BI__builtin_riscv_aes32dsi_32: 4185 case RISCV::BI__builtin_riscv_aes32dsmi_32: 4186 case RISCV::BI__builtin_riscv_aes32esi_32: 4187 case RISCV::BI__builtin_riscv_aes32esmi_32: 4188 case RISCV::BI__builtin_riscv_sm4ks: 4189 case RISCV::BI__builtin_riscv_sm4ed: 4190 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 4191 // Check if rnum is in [0, 10] 4192 case RISCV::BI__builtin_riscv_aes64ks1i_64: 4193 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 10); 4194 } 4195 4196 return false; 4197 } 4198 4199 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 4200 CallExpr *TheCall) { 4201 if (BuiltinID == SystemZ::BI__builtin_tabort) { 4202 Expr *Arg = TheCall->getArg(0); 4203 if (Optional<llvm::APSInt> AbortCode = Arg->getIntegerConstantExpr(Context)) 4204 if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256) 4205 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 4206 << Arg->getSourceRange(); 4207 } 4208 4209 // For intrinsics which take an immediate value as part of the instruction, 4210 // range check them here. 4211 unsigned i = 0, l = 0, u = 0; 4212 switch (BuiltinID) { 4213 default: return false; 4214 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 4215 case SystemZ::BI__builtin_s390_verimb: 4216 case SystemZ::BI__builtin_s390_verimh: 4217 case SystemZ::BI__builtin_s390_verimf: 4218 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 4219 case SystemZ::BI__builtin_s390_vfaeb: 4220 case SystemZ::BI__builtin_s390_vfaeh: 4221 case SystemZ::BI__builtin_s390_vfaef: 4222 case SystemZ::BI__builtin_s390_vfaebs: 4223 case SystemZ::BI__builtin_s390_vfaehs: 4224 case SystemZ::BI__builtin_s390_vfaefs: 4225 case SystemZ::BI__builtin_s390_vfaezb: 4226 case SystemZ::BI__builtin_s390_vfaezh: 4227 case SystemZ::BI__builtin_s390_vfaezf: 4228 case SystemZ::BI__builtin_s390_vfaezbs: 4229 case SystemZ::BI__builtin_s390_vfaezhs: 4230 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 4231 case SystemZ::BI__builtin_s390_vfisb: 4232 case SystemZ::BI__builtin_s390_vfidb: 4233 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 4234 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 4235 case SystemZ::BI__builtin_s390_vftcisb: 4236 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 4237 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 4238 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 4239 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 4240 case SystemZ::BI__builtin_s390_vstrcb: 4241 case SystemZ::BI__builtin_s390_vstrch: 4242 case SystemZ::BI__builtin_s390_vstrcf: 4243 case SystemZ::BI__builtin_s390_vstrczb: 4244 case SystemZ::BI__builtin_s390_vstrczh: 4245 case SystemZ::BI__builtin_s390_vstrczf: 4246 case SystemZ::BI__builtin_s390_vstrcbs: 4247 case SystemZ::BI__builtin_s390_vstrchs: 4248 case SystemZ::BI__builtin_s390_vstrcfs: 4249 case SystemZ::BI__builtin_s390_vstrczbs: 4250 case SystemZ::BI__builtin_s390_vstrczhs: 4251 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 4252 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 4253 case SystemZ::BI__builtin_s390_vfminsb: 4254 case SystemZ::BI__builtin_s390_vfmaxsb: 4255 case SystemZ::BI__builtin_s390_vfmindb: 4256 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 4257 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break; 4258 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break; 4259 case SystemZ::BI__builtin_s390_vclfnhs: 4260 case SystemZ::BI__builtin_s390_vclfnls: 4261 case SystemZ::BI__builtin_s390_vcfn: 4262 case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break; 4263 case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break; 4264 } 4265 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 4266 } 4267 4268 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 4269 /// This checks that the target supports __builtin_cpu_supports and 4270 /// that the string argument is constant and valid. 4271 static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI, 4272 CallExpr *TheCall) { 4273 Expr *Arg = TheCall->getArg(0); 4274 4275 // Check if the argument is a string literal. 4276 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 4277 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 4278 << Arg->getSourceRange(); 4279 4280 // Check the contents of the string. 4281 StringRef Feature = 4282 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 4283 if (!TI.validateCpuSupports(Feature)) 4284 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 4285 << Arg->getSourceRange(); 4286 return false; 4287 } 4288 4289 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 4290 /// This checks that the target supports __builtin_cpu_is and 4291 /// that the string argument is constant and valid. 4292 static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) { 4293 Expr *Arg = TheCall->getArg(0); 4294 4295 // Check if the argument is a string literal. 4296 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 4297 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 4298 << Arg->getSourceRange(); 4299 4300 // Check the contents of the string. 4301 StringRef Feature = 4302 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 4303 if (!TI.validateCpuIs(Feature)) 4304 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 4305 << Arg->getSourceRange(); 4306 return false; 4307 } 4308 4309 // Check if the rounding mode is legal. 4310 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 4311 // Indicates if this instruction has rounding control or just SAE. 4312 bool HasRC = false; 4313 4314 unsigned ArgNum = 0; 4315 switch (BuiltinID) { 4316 default: 4317 return false; 4318 case X86::BI__builtin_ia32_vcvttsd2si32: 4319 case X86::BI__builtin_ia32_vcvttsd2si64: 4320 case X86::BI__builtin_ia32_vcvttsd2usi32: 4321 case X86::BI__builtin_ia32_vcvttsd2usi64: 4322 case X86::BI__builtin_ia32_vcvttss2si32: 4323 case X86::BI__builtin_ia32_vcvttss2si64: 4324 case X86::BI__builtin_ia32_vcvttss2usi32: 4325 case X86::BI__builtin_ia32_vcvttss2usi64: 4326 case X86::BI__builtin_ia32_vcvttsh2si32: 4327 case X86::BI__builtin_ia32_vcvttsh2si64: 4328 case X86::BI__builtin_ia32_vcvttsh2usi32: 4329 case X86::BI__builtin_ia32_vcvttsh2usi64: 4330 ArgNum = 1; 4331 break; 4332 case X86::BI__builtin_ia32_maxpd512: 4333 case X86::BI__builtin_ia32_maxps512: 4334 case X86::BI__builtin_ia32_minpd512: 4335 case X86::BI__builtin_ia32_minps512: 4336 case X86::BI__builtin_ia32_maxph512: 4337 case X86::BI__builtin_ia32_minph512: 4338 ArgNum = 2; 4339 break; 4340 case X86::BI__builtin_ia32_vcvtph2pd512_mask: 4341 case X86::BI__builtin_ia32_vcvtph2psx512_mask: 4342 case X86::BI__builtin_ia32_cvtps2pd512_mask: 4343 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 4344 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 4345 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 4346 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 4347 case X86::BI__builtin_ia32_cvttps2dq512_mask: 4348 case X86::BI__builtin_ia32_cvttps2qq512_mask: 4349 case X86::BI__builtin_ia32_cvttps2udq512_mask: 4350 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 4351 case X86::BI__builtin_ia32_vcvttph2w512_mask: 4352 case X86::BI__builtin_ia32_vcvttph2uw512_mask: 4353 case X86::BI__builtin_ia32_vcvttph2dq512_mask: 4354 case X86::BI__builtin_ia32_vcvttph2udq512_mask: 4355 case X86::BI__builtin_ia32_vcvttph2qq512_mask: 4356 case X86::BI__builtin_ia32_vcvttph2uqq512_mask: 4357 case X86::BI__builtin_ia32_exp2pd_mask: 4358 case X86::BI__builtin_ia32_exp2ps_mask: 4359 case X86::BI__builtin_ia32_getexppd512_mask: 4360 case X86::BI__builtin_ia32_getexpps512_mask: 4361 case X86::BI__builtin_ia32_getexpph512_mask: 4362 case X86::BI__builtin_ia32_rcp28pd_mask: 4363 case X86::BI__builtin_ia32_rcp28ps_mask: 4364 case X86::BI__builtin_ia32_rsqrt28pd_mask: 4365 case X86::BI__builtin_ia32_rsqrt28ps_mask: 4366 case X86::BI__builtin_ia32_vcomisd: 4367 case X86::BI__builtin_ia32_vcomiss: 4368 case X86::BI__builtin_ia32_vcomish: 4369 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 4370 ArgNum = 3; 4371 break; 4372 case X86::BI__builtin_ia32_cmppd512_mask: 4373 case X86::BI__builtin_ia32_cmpps512_mask: 4374 case X86::BI__builtin_ia32_cmpsd_mask: 4375 case X86::BI__builtin_ia32_cmpss_mask: 4376 case X86::BI__builtin_ia32_cmpsh_mask: 4377 case X86::BI__builtin_ia32_vcvtsh2sd_round_mask: 4378 case X86::BI__builtin_ia32_vcvtsh2ss_round_mask: 4379 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 4380 case X86::BI__builtin_ia32_getexpsd128_round_mask: 4381 case X86::BI__builtin_ia32_getexpss128_round_mask: 4382 case X86::BI__builtin_ia32_getexpsh128_round_mask: 4383 case X86::BI__builtin_ia32_getmantpd512_mask: 4384 case X86::BI__builtin_ia32_getmantps512_mask: 4385 case X86::BI__builtin_ia32_getmantph512_mask: 4386 case X86::BI__builtin_ia32_maxsd_round_mask: 4387 case X86::BI__builtin_ia32_maxss_round_mask: 4388 case X86::BI__builtin_ia32_maxsh_round_mask: 4389 case X86::BI__builtin_ia32_minsd_round_mask: 4390 case X86::BI__builtin_ia32_minss_round_mask: 4391 case X86::BI__builtin_ia32_minsh_round_mask: 4392 case X86::BI__builtin_ia32_rcp28sd_round_mask: 4393 case X86::BI__builtin_ia32_rcp28ss_round_mask: 4394 case X86::BI__builtin_ia32_reducepd512_mask: 4395 case X86::BI__builtin_ia32_reduceps512_mask: 4396 case X86::BI__builtin_ia32_reduceph512_mask: 4397 case X86::BI__builtin_ia32_rndscalepd_mask: 4398 case X86::BI__builtin_ia32_rndscaleps_mask: 4399 case X86::BI__builtin_ia32_rndscaleph_mask: 4400 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 4401 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 4402 ArgNum = 4; 4403 break; 4404 case X86::BI__builtin_ia32_fixupimmpd512_mask: 4405 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 4406 case X86::BI__builtin_ia32_fixupimmps512_mask: 4407 case X86::BI__builtin_ia32_fixupimmps512_maskz: 4408 case X86::BI__builtin_ia32_fixupimmsd_mask: 4409 case X86::BI__builtin_ia32_fixupimmsd_maskz: 4410 case X86::BI__builtin_ia32_fixupimmss_mask: 4411 case X86::BI__builtin_ia32_fixupimmss_maskz: 4412 case X86::BI__builtin_ia32_getmantsd_round_mask: 4413 case X86::BI__builtin_ia32_getmantss_round_mask: 4414 case X86::BI__builtin_ia32_getmantsh_round_mask: 4415 case X86::BI__builtin_ia32_rangepd512_mask: 4416 case X86::BI__builtin_ia32_rangeps512_mask: 4417 case X86::BI__builtin_ia32_rangesd128_round_mask: 4418 case X86::BI__builtin_ia32_rangess128_round_mask: 4419 case X86::BI__builtin_ia32_reducesd_mask: 4420 case X86::BI__builtin_ia32_reducess_mask: 4421 case X86::BI__builtin_ia32_reducesh_mask: 4422 case X86::BI__builtin_ia32_rndscalesd_round_mask: 4423 case X86::BI__builtin_ia32_rndscaless_round_mask: 4424 case X86::BI__builtin_ia32_rndscalesh_round_mask: 4425 ArgNum = 5; 4426 break; 4427 case X86::BI__builtin_ia32_vcvtsd2si64: 4428 case X86::BI__builtin_ia32_vcvtsd2si32: 4429 case X86::BI__builtin_ia32_vcvtsd2usi32: 4430 case X86::BI__builtin_ia32_vcvtsd2usi64: 4431 case X86::BI__builtin_ia32_vcvtss2si32: 4432 case X86::BI__builtin_ia32_vcvtss2si64: 4433 case X86::BI__builtin_ia32_vcvtss2usi32: 4434 case X86::BI__builtin_ia32_vcvtss2usi64: 4435 case X86::BI__builtin_ia32_vcvtsh2si32: 4436 case X86::BI__builtin_ia32_vcvtsh2si64: 4437 case X86::BI__builtin_ia32_vcvtsh2usi32: 4438 case X86::BI__builtin_ia32_vcvtsh2usi64: 4439 case X86::BI__builtin_ia32_sqrtpd512: 4440 case X86::BI__builtin_ia32_sqrtps512: 4441 case X86::BI__builtin_ia32_sqrtph512: 4442 ArgNum = 1; 4443 HasRC = true; 4444 break; 4445 case X86::BI__builtin_ia32_addph512: 4446 case X86::BI__builtin_ia32_divph512: 4447 case X86::BI__builtin_ia32_mulph512: 4448 case X86::BI__builtin_ia32_subph512: 4449 case X86::BI__builtin_ia32_addpd512: 4450 case X86::BI__builtin_ia32_addps512: 4451 case X86::BI__builtin_ia32_divpd512: 4452 case X86::BI__builtin_ia32_divps512: 4453 case X86::BI__builtin_ia32_mulpd512: 4454 case X86::BI__builtin_ia32_mulps512: 4455 case X86::BI__builtin_ia32_subpd512: 4456 case X86::BI__builtin_ia32_subps512: 4457 case X86::BI__builtin_ia32_cvtsi2sd64: 4458 case X86::BI__builtin_ia32_cvtsi2ss32: 4459 case X86::BI__builtin_ia32_cvtsi2ss64: 4460 case X86::BI__builtin_ia32_cvtusi2sd64: 4461 case X86::BI__builtin_ia32_cvtusi2ss32: 4462 case X86::BI__builtin_ia32_cvtusi2ss64: 4463 case X86::BI__builtin_ia32_vcvtusi2sh: 4464 case X86::BI__builtin_ia32_vcvtusi642sh: 4465 case X86::BI__builtin_ia32_vcvtsi2sh: 4466 case X86::BI__builtin_ia32_vcvtsi642sh: 4467 ArgNum = 2; 4468 HasRC = true; 4469 break; 4470 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 4471 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 4472 case X86::BI__builtin_ia32_vcvtpd2ph512_mask: 4473 case X86::BI__builtin_ia32_vcvtps2phx512_mask: 4474 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 4475 case X86::BI__builtin_ia32_cvtpd2dq512_mask: 4476 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 4477 case X86::BI__builtin_ia32_cvtpd2udq512_mask: 4478 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 4479 case X86::BI__builtin_ia32_cvtps2dq512_mask: 4480 case X86::BI__builtin_ia32_cvtps2qq512_mask: 4481 case X86::BI__builtin_ia32_cvtps2udq512_mask: 4482 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 4483 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 4484 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 4485 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 4486 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 4487 case X86::BI__builtin_ia32_vcvtdq2ph512_mask: 4488 case X86::BI__builtin_ia32_vcvtudq2ph512_mask: 4489 case X86::BI__builtin_ia32_vcvtw2ph512_mask: 4490 case X86::BI__builtin_ia32_vcvtuw2ph512_mask: 4491 case X86::BI__builtin_ia32_vcvtph2w512_mask: 4492 case X86::BI__builtin_ia32_vcvtph2uw512_mask: 4493 case X86::BI__builtin_ia32_vcvtph2dq512_mask: 4494 case X86::BI__builtin_ia32_vcvtph2udq512_mask: 4495 case X86::BI__builtin_ia32_vcvtph2qq512_mask: 4496 case X86::BI__builtin_ia32_vcvtph2uqq512_mask: 4497 case X86::BI__builtin_ia32_vcvtqq2ph512_mask: 4498 case X86::BI__builtin_ia32_vcvtuqq2ph512_mask: 4499 ArgNum = 3; 4500 HasRC = true; 4501 break; 4502 case X86::BI__builtin_ia32_addsh_round_mask: 4503 case X86::BI__builtin_ia32_addss_round_mask: 4504 case X86::BI__builtin_ia32_addsd_round_mask: 4505 case X86::BI__builtin_ia32_divsh_round_mask: 4506 case X86::BI__builtin_ia32_divss_round_mask: 4507 case X86::BI__builtin_ia32_divsd_round_mask: 4508 case X86::BI__builtin_ia32_mulsh_round_mask: 4509 case X86::BI__builtin_ia32_mulss_round_mask: 4510 case X86::BI__builtin_ia32_mulsd_round_mask: 4511 case X86::BI__builtin_ia32_subsh_round_mask: 4512 case X86::BI__builtin_ia32_subss_round_mask: 4513 case X86::BI__builtin_ia32_subsd_round_mask: 4514 case X86::BI__builtin_ia32_scalefph512_mask: 4515 case X86::BI__builtin_ia32_scalefpd512_mask: 4516 case X86::BI__builtin_ia32_scalefps512_mask: 4517 case X86::BI__builtin_ia32_scalefsd_round_mask: 4518 case X86::BI__builtin_ia32_scalefss_round_mask: 4519 case X86::BI__builtin_ia32_scalefsh_round_mask: 4520 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 4521 case X86::BI__builtin_ia32_vcvtss2sh_round_mask: 4522 case X86::BI__builtin_ia32_vcvtsd2sh_round_mask: 4523 case X86::BI__builtin_ia32_sqrtsd_round_mask: 4524 case X86::BI__builtin_ia32_sqrtss_round_mask: 4525 case X86::BI__builtin_ia32_sqrtsh_round_mask: 4526 case X86::BI__builtin_ia32_vfmaddsd3_mask: 4527 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 4528 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 4529 case X86::BI__builtin_ia32_vfmaddss3_mask: 4530 case X86::BI__builtin_ia32_vfmaddss3_maskz: 4531 case X86::BI__builtin_ia32_vfmaddss3_mask3: 4532 case X86::BI__builtin_ia32_vfmaddsh3_mask: 4533 case X86::BI__builtin_ia32_vfmaddsh3_maskz: 4534 case X86::BI__builtin_ia32_vfmaddsh3_mask3: 4535 case X86::BI__builtin_ia32_vfmaddpd512_mask: 4536 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 4537 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 4538 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 4539 case X86::BI__builtin_ia32_vfmaddps512_mask: 4540 case X86::BI__builtin_ia32_vfmaddps512_maskz: 4541 case X86::BI__builtin_ia32_vfmaddps512_mask3: 4542 case X86::BI__builtin_ia32_vfmsubps512_mask3: 4543 case X86::BI__builtin_ia32_vfmaddph512_mask: 4544 case X86::BI__builtin_ia32_vfmaddph512_maskz: 4545 case X86::BI__builtin_ia32_vfmaddph512_mask3: 4546 case X86::BI__builtin_ia32_vfmsubph512_mask3: 4547 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 4548 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 4549 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 4550 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 4551 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 4552 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 4553 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 4554 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 4555 case X86::BI__builtin_ia32_vfmaddsubph512_mask: 4556 case X86::BI__builtin_ia32_vfmaddsubph512_maskz: 4557 case X86::BI__builtin_ia32_vfmaddsubph512_mask3: 4558 case X86::BI__builtin_ia32_vfmsubaddph512_mask3: 4559 case X86::BI__builtin_ia32_vfmaddcsh_mask: 4560 case X86::BI__builtin_ia32_vfmaddcsh_round_mask: 4561 case X86::BI__builtin_ia32_vfmaddcsh_round_mask3: 4562 case X86::BI__builtin_ia32_vfmaddcph512_mask: 4563 case X86::BI__builtin_ia32_vfmaddcph512_maskz: 4564 case X86::BI__builtin_ia32_vfmaddcph512_mask3: 4565 case X86::BI__builtin_ia32_vfcmaddcsh_mask: 4566 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask: 4567 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3: 4568 case X86::BI__builtin_ia32_vfcmaddcph512_mask: 4569 case X86::BI__builtin_ia32_vfcmaddcph512_maskz: 4570 case X86::BI__builtin_ia32_vfcmaddcph512_mask3: 4571 case X86::BI__builtin_ia32_vfmulcsh_mask: 4572 case X86::BI__builtin_ia32_vfmulcph512_mask: 4573 case X86::BI__builtin_ia32_vfcmulcsh_mask: 4574 case X86::BI__builtin_ia32_vfcmulcph512_mask: 4575 ArgNum = 4; 4576 HasRC = true; 4577 break; 4578 } 4579 4580 llvm::APSInt Result; 4581 4582 // We can't check the value of a dependent argument. 4583 Expr *Arg = TheCall->getArg(ArgNum); 4584 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4585 return false; 4586 4587 // Check constant-ness first. 4588 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4589 return true; 4590 4591 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 4592 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 4593 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding 4594 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together. 4595 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 4596 Result == 8/*ROUND_NO_EXC*/ || 4597 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) || 4598 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 4599 return false; 4600 4601 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 4602 << Arg->getSourceRange(); 4603 } 4604 4605 // Check if the gather/scatter scale is legal. 4606 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 4607 CallExpr *TheCall) { 4608 unsigned ArgNum = 0; 4609 switch (BuiltinID) { 4610 default: 4611 return false; 4612 case X86::BI__builtin_ia32_gatherpfdpd: 4613 case X86::BI__builtin_ia32_gatherpfdps: 4614 case X86::BI__builtin_ia32_gatherpfqpd: 4615 case X86::BI__builtin_ia32_gatherpfqps: 4616 case X86::BI__builtin_ia32_scatterpfdpd: 4617 case X86::BI__builtin_ia32_scatterpfdps: 4618 case X86::BI__builtin_ia32_scatterpfqpd: 4619 case X86::BI__builtin_ia32_scatterpfqps: 4620 ArgNum = 3; 4621 break; 4622 case X86::BI__builtin_ia32_gatherd_pd: 4623 case X86::BI__builtin_ia32_gatherd_pd256: 4624 case X86::BI__builtin_ia32_gatherq_pd: 4625 case X86::BI__builtin_ia32_gatherq_pd256: 4626 case X86::BI__builtin_ia32_gatherd_ps: 4627 case X86::BI__builtin_ia32_gatherd_ps256: 4628 case X86::BI__builtin_ia32_gatherq_ps: 4629 case X86::BI__builtin_ia32_gatherq_ps256: 4630 case X86::BI__builtin_ia32_gatherd_q: 4631 case X86::BI__builtin_ia32_gatherd_q256: 4632 case X86::BI__builtin_ia32_gatherq_q: 4633 case X86::BI__builtin_ia32_gatherq_q256: 4634 case X86::BI__builtin_ia32_gatherd_d: 4635 case X86::BI__builtin_ia32_gatherd_d256: 4636 case X86::BI__builtin_ia32_gatherq_d: 4637 case X86::BI__builtin_ia32_gatherq_d256: 4638 case X86::BI__builtin_ia32_gather3div2df: 4639 case X86::BI__builtin_ia32_gather3div2di: 4640 case X86::BI__builtin_ia32_gather3div4df: 4641 case X86::BI__builtin_ia32_gather3div4di: 4642 case X86::BI__builtin_ia32_gather3div4sf: 4643 case X86::BI__builtin_ia32_gather3div4si: 4644 case X86::BI__builtin_ia32_gather3div8sf: 4645 case X86::BI__builtin_ia32_gather3div8si: 4646 case X86::BI__builtin_ia32_gather3siv2df: 4647 case X86::BI__builtin_ia32_gather3siv2di: 4648 case X86::BI__builtin_ia32_gather3siv4df: 4649 case X86::BI__builtin_ia32_gather3siv4di: 4650 case X86::BI__builtin_ia32_gather3siv4sf: 4651 case X86::BI__builtin_ia32_gather3siv4si: 4652 case X86::BI__builtin_ia32_gather3siv8sf: 4653 case X86::BI__builtin_ia32_gather3siv8si: 4654 case X86::BI__builtin_ia32_gathersiv8df: 4655 case X86::BI__builtin_ia32_gathersiv16sf: 4656 case X86::BI__builtin_ia32_gatherdiv8df: 4657 case X86::BI__builtin_ia32_gatherdiv16sf: 4658 case X86::BI__builtin_ia32_gathersiv8di: 4659 case X86::BI__builtin_ia32_gathersiv16si: 4660 case X86::BI__builtin_ia32_gatherdiv8di: 4661 case X86::BI__builtin_ia32_gatherdiv16si: 4662 case X86::BI__builtin_ia32_scatterdiv2df: 4663 case X86::BI__builtin_ia32_scatterdiv2di: 4664 case X86::BI__builtin_ia32_scatterdiv4df: 4665 case X86::BI__builtin_ia32_scatterdiv4di: 4666 case X86::BI__builtin_ia32_scatterdiv4sf: 4667 case X86::BI__builtin_ia32_scatterdiv4si: 4668 case X86::BI__builtin_ia32_scatterdiv8sf: 4669 case X86::BI__builtin_ia32_scatterdiv8si: 4670 case X86::BI__builtin_ia32_scattersiv2df: 4671 case X86::BI__builtin_ia32_scattersiv2di: 4672 case X86::BI__builtin_ia32_scattersiv4df: 4673 case X86::BI__builtin_ia32_scattersiv4di: 4674 case X86::BI__builtin_ia32_scattersiv4sf: 4675 case X86::BI__builtin_ia32_scattersiv4si: 4676 case X86::BI__builtin_ia32_scattersiv8sf: 4677 case X86::BI__builtin_ia32_scattersiv8si: 4678 case X86::BI__builtin_ia32_scattersiv8df: 4679 case X86::BI__builtin_ia32_scattersiv16sf: 4680 case X86::BI__builtin_ia32_scatterdiv8df: 4681 case X86::BI__builtin_ia32_scatterdiv16sf: 4682 case X86::BI__builtin_ia32_scattersiv8di: 4683 case X86::BI__builtin_ia32_scattersiv16si: 4684 case X86::BI__builtin_ia32_scatterdiv8di: 4685 case X86::BI__builtin_ia32_scatterdiv16si: 4686 ArgNum = 4; 4687 break; 4688 } 4689 4690 llvm::APSInt Result; 4691 4692 // We can't check the value of a dependent argument. 4693 Expr *Arg = TheCall->getArg(ArgNum); 4694 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4695 return false; 4696 4697 // Check constant-ness first. 4698 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4699 return true; 4700 4701 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 4702 return false; 4703 4704 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 4705 << Arg->getSourceRange(); 4706 } 4707 4708 enum { TileRegLow = 0, TileRegHigh = 7 }; 4709 4710 bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, 4711 ArrayRef<int> ArgNums) { 4712 for (int ArgNum : ArgNums) { 4713 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh)) 4714 return true; 4715 } 4716 return false; 4717 } 4718 4719 bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall, 4720 ArrayRef<int> ArgNums) { 4721 // Because the max number of tile register is TileRegHigh + 1, so here we use 4722 // each bit to represent the usage of them in bitset. 4723 std::bitset<TileRegHigh + 1> ArgValues; 4724 for (int ArgNum : ArgNums) { 4725 Expr *Arg = TheCall->getArg(ArgNum); 4726 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4727 continue; 4728 4729 llvm::APSInt Result; 4730 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4731 return true; 4732 int ArgExtValue = Result.getExtValue(); 4733 assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) && 4734 "Incorrect tile register num."); 4735 if (ArgValues.test(ArgExtValue)) 4736 return Diag(TheCall->getBeginLoc(), 4737 diag::err_x86_builtin_tile_arg_duplicate) 4738 << TheCall->getArg(ArgNum)->getSourceRange(); 4739 ArgValues.set(ArgExtValue); 4740 } 4741 return false; 4742 } 4743 4744 bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, 4745 ArrayRef<int> ArgNums) { 4746 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) || 4747 CheckX86BuiltinTileDuplicate(TheCall, ArgNums); 4748 } 4749 4750 bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) { 4751 switch (BuiltinID) { 4752 default: 4753 return false; 4754 case X86::BI__builtin_ia32_tileloadd64: 4755 case X86::BI__builtin_ia32_tileloaddt164: 4756 case X86::BI__builtin_ia32_tilestored64: 4757 case X86::BI__builtin_ia32_tilezero: 4758 return CheckX86BuiltinTileArgumentsRange(TheCall, 0); 4759 case X86::BI__builtin_ia32_tdpbssd: 4760 case X86::BI__builtin_ia32_tdpbsud: 4761 case X86::BI__builtin_ia32_tdpbusd: 4762 case X86::BI__builtin_ia32_tdpbuud: 4763 case X86::BI__builtin_ia32_tdpbf16ps: 4764 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2}); 4765 } 4766 } 4767 static bool isX86_32Builtin(unsigned BuiltinID) { 4768 // These builtins only work on x86-32 targets. 4769 switch (BuiltinID) { 4770 case X86::BI__builtin_ia32_readeflags_u32: 4771 case X86::BI__builtin_ia32_writeeflags_u32: 4772 return true; 4773 } 4774 4775 return false; 4776 } 4777 4778 bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 4779 CallExpr *TheCall) { 4780 if (BuiltinID == X86::BI__builtin_cpu_supports) 4781 return SemaBuiltinCpuSupports(*this, TI, TheCall); 4782 4783 if (BuiltinID == X86::BI__builtin_cpu_is) 4784 return SemaBuiltinCpuIs(*this, TI, TheCall); 4785 4786 // Check for 32-bit only builtins on a 64-bit target. 4787 const llvm::Triple &TT = TI.getTriple(); 4788 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 4789 return Diag(TheCall->getCallee()->getBeginLoc(), 4790 diag::err_32_bit_builtin_64_bit_tgt); 4791 4792 // If the intrinsic has rounding or SAE make sure its valid. 4793 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 4794 return true; 4795 4796 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 4797 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 4798 return true; 4799 4800 // If the intrinsic has a tile arguments, make sure they are valid. 4801 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall)) 4802 return true; 4803 4804 // For intrinsics which take an immediate value as part of the instruction, 4805 // range check them here. 4806 int i = 0, l = 0, u = 0; 4807 switch (BuiltinID) { 4808 default: 4809 return false; 4810 case X86::BI__builtin_ia32_vec_ext_v2si: 4811 case X86::BI__builtin_ia32_vec_ext_v2di: 4812 case X86::BI__builtin_ia32_vextractf128_pd256: 4813 case X86::BI__builtin_ia32_vextractf128_ps256: 4814 case X86::BI__builtin_ia32_vextractf128_si256: 4815 case X86::BI__builtin_ia32_extract128i256: 4816 case X86::BI__builtin_ia32_extractf64x4_mask: 4817 case X86::BI__builtin_ia32_extracti64x4_mask: 4818 case X86::BI__builtin_ia32_extractf32x8_mask: 4819 case X86::BI__builtin_ia32_extracti32x8_mask: 4820 case X86::BI__builtin_ia32_extractf64x2_256_mask: 4821 case X86::BI__builtin_ia32_extracti64x2_256_mask: 4822 case X86::BI__builtin_ia32_extractf32x4_256_mask: 4823 case X86::BI__builtin_ia32_extracti32x4_256_mask: 4824 i = 1; l = 0; u = 1; 4825 break; 4826 case X86::BI__builtin_ia32_vec_set_v2di: 4827 case X86::BI__builtin_ia32_vinsertf128_pd256: 4828 case X86::BI__builtin_ia32_vinsertf128_ps256: 4829 case X86::BI__builtin_ia32_vinsertf128_si256: 4830 case X86::BI__builtin_ia32_insert128i256: 4831 case X86::BI__builtin_ia32_insertf32x8: 4832 case X86::BI__builtin_ia32_inserti32x8: 4833 case X86::BI__builtin_ia32_insertf64x4: 4834 case X86::BI__builtin_ia32_inserti64x4: 4835 case X86::BI__builtin_ia32_insertf64x2_256: 4836 case X86::BI__builtin_ia32_inserti64x2_256: 4837 case X86::BI__builtin_ia32_insertf32x4_256: 4838 case X86::BI__builtin_ia32_inserti32x4_256: 4839 i = 2; l = 0; u = 1; 4840 break; 4841 case X86::BI__builtin_ia32_vpermilpd: 4842 case X86::BI__builtin_ia32_vec_ext_v4hi: 4843 case X86::BI__builtin_ia32_vec_ext_v4si: 4844 case X86::BI__builtin_ia32_vec_ext_v4sf: 4845 case X86::BI__builtin_ia32_vec_ext_v4di: 4846 case X86::BI__builtin_ia32_extractf32x4_mask: 4847 case X86::BI__builtin_ia32_extracti32x4_mask: 4848 case X86::BI__builtin_ia32_extractf64x2_512_mask: 4849 case X86::BI__builtin_ia32_extracti64x2_512_mask: 4850 i = 1; l = 0; u = 3; 4851 break; 4852 case X86::BI_mm_prefetch: 4853 case X86::BI__builtin_ia32_vec_ext_v8hi: 4854 case X86::BI__builtin_ia32_vec_ext_v8si: 4855 i = 1; l = 0; u = 7; 4856 break; 4857 case X86::BI__builtin_ia32_sha1rnds4: 4858 case X86::BI__builtin_ia32_blendpd: 4859 case X86::BI__builtin_ia32_shufpd: 4860 case X86::BI__builtin_ia32_vec_set_v4hi: 4861 case X86::BI__builtin_ia32_vec_set_v4si: 4862 case X86::BI__builtin_ia32_vec_set_v4di: 4863 case X86::BI__builtin_ia32_shuf_f32x4_256: 4864 case X86::BI__builtin_ia32_shuf_f64x2_256: 4865 case X86::BI__builtin_ia32_shuf_i32x4_256: 4866 case X86::BI__builtin_ia32_shuf_i64x2_256: 4867 case X86::BI__builtin_ia32_insertf64x2_512: 4868 case X86::BI__builtin_ia32_inserti64x2_512: 4869 case X86::BI__builtin_ia32_insertf32x4: 4870 case X86::BI__builtin_ia32_inserti32x4: 4871 i = 2; l = 0; u = 3; 4872 break; 4873 case X86::BI__builtin_ia32_vpermil2pd: 4874 case X86::BI__builtin_ia32_vpermil2pd256: 4875 case X86::BI__builtin_ia32_vpermil2ps: 4876 case X86::BI__builtin_ia32_vpermil2ps256: 4877 i = 3; l = 0; u = 3; 4878 break; 4879 case X86::BI__builtin_ia32_cmpb128_mask: 4880 case X86::BI__builtin_ia32_cmpw128_mask: 4881 case X86::BI__builtin_ia32_cmpd128_mask: 4882 case X86::BI__builtin_ia32_cmpq128_mask: 4883 case X86::BI__builtin_ia32_cmpb256_mask: 4884 case X86::BI__builtin_ia32_cmpw256_mask: 4885 case X86::BI__builtin_ia32_cmpd256_mask: 4886 case X86::BI__builtin_ia32_cmpq256_mask: 4887 case X86::BI__builtin_ia32_cmpb512_mask: 4888 case X86::BI__builtin_ia32_cmpw512_mask: 4889 case X86::BI__builtin_ia32_cmpd512_mask: 4890 case X86::BI__builtin_ia32_cmpq512_mask: 4891 case X86::BI__builtin_ia32_ucmpb128_mask: 4892 case X86::BI__builtin_ia32_ucmpw128_mask: 4893 case X86::BI__builtin_ia32_ucmpd128_mask: 4894 case X86::BI__builtin_ia32_ucmpq128_mask: 4895 case X86::BI__builtin_ia32_ucmpb256_mask: 4896 case X86::BI__builtin_ia32_ucmpw256_mask: 4897 case X86::BI__builtin_ia32_ucmpd256_mask: 4898 case X86::BI__builtin_ia32_ucmpq256_mask: 4899 case X86::BI__builtin_ia32_ucmpb512_mask: 4900 case X86::BI__builtin_ia32_ucmpw512_mask: 4901 case X86::BI__builtin_ia32_ucmpd512_mask: 4902 case X86::BI__builtin_ia32_ucmpq512_mask: 4903 case X86::BI__builtin_ia32_vpcomub: 4904 case X86::BI__builtin_ia32_vpcomuw: 4905 case X86::BI__builtin_ia32_vpcomud: 4906 case X86::BI__builtin_ia32_vpcomuq: 4907 case X86::BI__builtin_ia32_vpcomb: 4908 case X86::BI__builtin_ia32_vpcomw: 4909 case X86::BI__builtin_ia32_vpcomd: 4910 case X86::BI__builtin_ia32_vpcomq: 4911 case X86::BI__builtin_ia32_vec_set_v8hi: 4912 case X86::BI__builtin_ia32_vec_set_v8si: 4913 i = 2; l = 0; u = 7; 4914 break; 4915 case X86::BI__builtin_ia32_vpermilpd256: 4916 case X86::BI__builtin_ia32_roundps: 4917 case X86::BI__builtin_ia32_roundpd: 4918 case X86::BI__builtin_ia32_roundps256: 4919 case X86::BI__builtin_ia32_roundpd256: 4920 case X86::BI__builtin_ia32_getmantpd128_mask: 4921 case X86::BI__builtin_ia32_getmantpd256_mask: 4922 case X86::BI__builtin_ia32_getmantps128_mask: 4923 case X86::BI__builtin_ia32_getmantps256_mask: 4924 case X86::BI__builtin_ia32_getmantpd512_mask: 4925 case X86::BI__builtin_ia32_getmantps512_mask: 4926 case X86::BI__builtin_ia32_getmantph128_mask: 4927 case X86::BI__builtin_ia32_getmantph256_mask: 4928 case X86::BI__builtin_ia32_getmantph512_mask: 4929 case X86::BI__builtin_ia32_vec_ext_v16qi: 4930 case X86::BI__builtin_ia32_vec_ext_v16hi: 4931 i = 1; l = 0; u = 15; 4932 break; 4933 case X86::BI__builtin_ia32_pblendd128: 4934 case X86::BI__builtin_ia32_blendps: 4935 case X86::BI__builtin_ia32_blendpd256: 4936 case X86::BI__builtin_ia32_shufpd256: 4937 case X86::BI__builtin_ia32_roundss: 4938 case X86::BI__builtin_ia32_roundsd: 4939 case X86::BI__builtin_ia32_rangepd128_mask: 4940 case X86::BI__builtin_ia32_rangepd256_mask: 4941 case X86::BI__builtin_ia32_rangepd512_mask: 4942 case X86::BI__builtin_ia32_rangeps128_mask: 4943 case X86::BI__builtin_ia32_rangeps256_mask: 4944 case X86::BI__builtin_ia32_rangeps512_mask: 4945 case X86::BI__builtin_ia32_getmantsd_round_mask: 4946 case X86::BI__builtin_ia32_getmantss_round_mask: 4947 case X86::BI__builtin_ia32_getmantsh_round_mask: 4948 case X86::BI__builtin_ia32_vec_set_v16qi: 4949 case X86::BI__builtin_ia32_vec_set_v16hi: 4950 i = 2; l = 0; u = 15; 4951 break; 4952 case X86::BI__builtin_ia32_vec_ext_v32qi: 4953 i = 1; l = 0; u = 31; 4954 break; 4955 case X86::BI__builtin_ia32_cmpps: 4956 case X86::BI__builtin_ia32_cmpss: 4957 case X86::BI__builtin_ia32_cmppd: 4958 case X86::BI__builtin_ia32_cmpsd: 4959 case X86::BI__builtin_ia32_cmpps256: 4960 case X86::BI__builtin_ia32_cmppd256: 4961 case X86::BI__builtin_ia32_cmpps128_mask: 4962 case X86::BI__builtin_ia32_cmppd128_mask: 4963 case X86::BI__builtin_ia32_cmpps256_mask: 4964 case X86::BI__builtin_ia32_cmppd256_mask: 4965 case X86::BI__builtin_ia32_cmpps512_mask: 4966 case X86::BI__builtin_ia32_cmppd512_mask: 4967 case X86::BI__builtin_ia32_cmpsd_mask: 4968 case X86::BI__builtin_ia32_cmpss_mask: 4969 case X86::BI__builtin_ia32_vec_set_v32qi: 4970 i = 2; l = 0; u = 31; 4971 break; 4972 case X86::BI__builtin_ia32_permdf256: 4973 case X86::BI__builtin_ia32_permdi256: 4974 case X86::BI__builtin_ia32_permdf512: 4975 case X86::BI__builtin_ia32_permdi512: 4976 case X86::BI__builtin_ia32_vpermilps: 4977 case X86::BI__builtin_ia32_vpermilps256: 4978 case X86::BI__builtin_ia32_vpermilpd512: 4979 case X86::BI__builtin_ia32_vpermilps512: 4980 case X86::BI__builtin_ia32_pshufd: 4981 case X86::BI__builtin_ia32_pshufd256: 4982 case X86::BI__builtin_ia32_pshufd512: 4983 case X86::BI__builtin_ia32_pshufhw: 4984 case X86::BI__builtin_ia32_pshufhw256: 4985 case X86::BI__builtin_ia32_pshufhw512: 4986 case X86::BI__builtin_ia32_pshuflw: 4987 case X86::BI__builtin_ia32_pshuflw256: 4988 case X86::BI__builtin_ia32_pshuflw512: 4989 case X86::BI__builtin_ia32_vcvtps2ph: 4990 case X86::BI__builtin_ia32_vcvtps2ph_mask: 4991 case X86::BI__builtin_ia32_vcvtps2ph256: 4992 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 4993 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 4994 case X86::BI__builtin_ia32_rndscaleps_128_mask: 4995 case X86::BI__builtin_ia32_rndscalepd_128_mask: 4996 case X86::BI__builtin_ia32_rndscaleps_256_mask: 4997 case X86::BI__builtin_ia32_rndscalepd_256_mask: 4998 case X86::BI__builtin_ia32_rndscaleps_mask: 4999 case X86::BI__builtin_ia32_rndscalepd_mask: 5000 case X86::BI__builtin_ia32_rndscaleph_mask: 5001 case X86::BI__builtin_ia32_reducepd128_mask: 5002 case X86::BI__builtin_ia32_reducepd256_mask: 5003 case X86::BI__builtin_ia32_reducepd512_mask: 5004 case X86::BI__builtin_ia32_reduceps128_mask: 5005 case X86::BI__builtin_ia32_reduceps256_mask: 5006 case X86::BI__builtin_ia32_reduceps512_mask: 5007 case X86::BI__builtin_ia32_reduceph128_mask: 5008 case X86::BI__builtin_ia32_reduceph256_mask: 5009 case X86::BI__builtin_ia32_reduceph512_mask: 5010 case X86::BI__builtin_ia32_prold512: 5011 case X86::BI__builtin_ia32_prolq512: 5012 case X86::BI__builtin_ia32_prold128: 5013 case X86::BI__builtin_ia32_prold256: 5014 case X86::BI__builtin_ia32_prolq128: 5015 case X86::BI__builtin_ia32_prolq256: 5016 case X86::BI__builtin_ia32_prord512: 5017 case X86::BI__builtin_ia32_prorq512: 5018 case X86::BI__builtin_ia32_prord128: 5019 case X86::BI__builtin_ia32_prord256: 5020 case X86::BI__builtin_ia32_prorq128: 5021 case X86::BI__builtin_ia32_prorq256: 5022 case X86::BI__builtin_ia32_fpclasspd128_mask: 5023 case X86::BI__builtin_ia32_fpclasspd256_mask: 5024 case X86::BI__builtin_ia32_fpclassps128_mask: 5025 case X86::BI__builtin_ia32_fpclassps256_mask: 5026 case X86::BI__builtin_ia32_fpclassps512_mask: 5027 case X86::BI__builtin_ia32_fpclasspd512_mask: 5028 case X86::BI__builtin_ia32_fpclassph128_mask: 5029 case X86::BI__builtin_ia32_fpclassph256_mask: 5030 case X86::BI__builtin_ia32_fpclassph512_mask: 5031 case X86::BI__builtin_ia32_fpclasssd_mask: 5032 case X86::BI__builtin_ia32_fpclassss_mask: 5033 case X86::BI__builtin_ia32_fpclasssh_mask: 5034 case X86::BI__builtin_ia32_pslldqi128_byteshift: 5035 case X86::BI__builtin_ia32_pslldqi256_byteshift: 5036 case X86::BI__builtin_ia32_pslldqi512_byteshift: 5037 case X86::BI__builtin_ia32_psrldqi128_byteshift: 5038 case X86::BI__builtin_ia32_psrldqi256_byteshift: 5039 case X86::BI__builtin_ia32_psrldqi512_byteshift: 5040 case X86::BI__builtin_ia32_kshiftliqi: 5041 case X86::BI__builtin_ia32_kshiftlihi: 5042 case X86::BI__builtin_ia32_kshiftlisi: 5043 case X86::BI__builtin_ia32_kshiftlidi: 5044 case X86::BI__builtin_ia32_kshiftriqi: 5045 case X86::BI__builtin_ia32_kshiftrihi: 5046 case X86::BI__builtin_ia32_kshiftrisi: 5047 case X86::BI__builtin_ia32_kshiftridi: 5048 i = 1; l = 0; u = 255; 5049 break; 5050 case X86::BI__builtin_ia32_vperm2f128_pd256: 5051 case X86::BI__builtin_ia32_vperm2f128_ps256: 5052 case X86::BI__builtin_ia32_vperm2f128_si256: 5053 case X86::BI__builtin_ia32_permti256: 5054 case X86::BI__builtin_ia32_pblendw128: 5055 case X86::BI__builtin_ia32_pblendw256: 5056 case X86::BI__builtin_ia32_blendps256: 5057 case X86::BI__builtin_ia32_pblendd256: 5058 case X86::BI__builtin_ia32_palignr128: 5059 case X86::BI__builtin_ia32_palignr256: 5060 case X86::BI__builtin_ia32_palignr512: 5061 case X86::BI__builtin_ia32_alignq512: 5062 case X86::BI__builtin_ia32_alignd512: 5063 case X86::BI__builtin_ia32_alignd128: 5064 case X86::BI__builtin_ia32_alignd256: 5065 case X86::BI__builtin_ia32_alignq128: 5066 case X86::BI__builtin_ia32_alignq256: 5067 case X86::BI__builtin_ia32_vcomisd: 5068 case X86::BI__builtin_ia32_vcomiss: 5069 case X86::BI__builtin_ia32_shuf_f32x4: 5070 case X86::BI__builtin_ia32_shuf_f64x2: 5071 case X86::BI__builtin_ia32_shuf_i32x4: 5072 case X86::BI__builtin_ia32_shuf_i64x2: 5073 case X86::BI__builtin_ia32_shufpd512: 5074 case X86::BI__builtin_ia32_shufps: 5075 case X86::BI__builtin_ia32_shufps256: 5076 case X86::BI__builtin_ia32_shufps512: 5077 case X86::BI__builtin_ia32_dbpsadbw128: 5078 case X86::BI__builtin_ia32_dbpsadbw256: 5079 case X86::BI__builtin_ia32_dbpsadbw512: 5080 case X86::BI__builtin_ia32_vpshldd128: 5081 case X86::BI__builtin_ia32_vpshldd256: 5082 case X86::BI__builtin_ia32_vpshldd512: 5083 case X86::BI__builtin_ia32_vpshldq128: 5084 case X86::BI__builtin_ia32_vpshldq256: 5085 case X86::BI__builtin_ia32_vpshldq512: 5086 case X86::BI__builtin_ia32_vpshldw128: 5087 case X86::BI__builtin_ia32_vpshldw256: 5088 case X86::BI__builtin_ia32_vpshldw512: 5089 case X86::BI__builtin_ia32_vpshrdd128: 5090 case X86::BI__builtin_ia32_vpshrdd256: 5091 case X86::BI__builtin_ia32_vpshrdd512: 5092 case X86::BI__builtin_ia32_vpshrdq128: 5093 case X86::BI__builtin_ia32_vpshrdq256: 5094 case X86::BI__builtin_ia32_vpshrdq512: 5095 case X86::BI__builtin_ia32_vpshrdw128: 5096 case X86::BI__builtin_ia32_vpshrdw256: 5097 case X86::BI__builtin_ia32_vpshrdw512: 5098 i = 2; l = 0; u = 255; 5099 break; 5100 case X86::BI__builtin_ia32_fixupimmpd512_mask: 5101 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 5102 case X86::BI__builtin_ia32_fixupimmps512_mask: 5103 case X86::BI__builtin_ia32_fixupimmps512_maskz: 5104 case X86::BI__builtin_ia32_fixupimmsd_mask: 5105 case X86::BI__builtin_ia32_fixupimmsd_maskz: 5106 case X86::BI__builtin_ia32_fixupimmss_mask: 5107 case X86::BI__builtin_ia32_fixupimmss_maskz: 5108 case X86::BI__builtin_ia32_fixupimmpd128_mask: 5109 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 5110 case X86::BI__builtin_ia32_fixupimmpd256_mask: 5111 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 5112 case X86::BI__builtin_ia32_fixupimmps128_mask: 5113 case X86::BI__builtin_ia32_fixupimmps128_maskz: 5114 case X86::BI__builtin_ia32_fixupimmps256_mask: 5115 case X86::BI__builtin_ia32_fixupimmps256_maskz: 5116 case X86::BI__builtin_ia32_pternlogd512_mask: 5117 case X86::BI__builtin_ia32_pternlogd512_maskz: 5118 case X86::BI__builtin_ia32_pternlogq512_mask: 5119 case X86::BI__builtin_ia32_pternlogq512_maskz: 5120 case X86::BI__builtin_ia32_pternlogd128_mask: 5121 case X86::BI__builtin_ia32_pternlogd128_maskz: 5122 case X86::BI__builtin_ia32_pternlogd256_mask: 5123 case X86::BI__builtin_ia32_pternlogd256_maskz: 5124 case X86::BI__builtin_ia32_pternlogq128_mask: 5125 case X86::BI__builtin_ia32_pternlogq128_maskz: 5126 case X86::BI__builtin_ia32_pternlogq256_mask: 5127 case X86::BI__builtin_ia32_pternlogq256_maskz: 5128 i = 3; l = 0; u = 255; 5129 break; 5130 case X86::BI__builtin_ia32_gatherpfdpd: 5131 case X86::BI__builtin_ia32_gatherpfdps: 5132 case X86::BI__builtin_ia32_gatherpfqpd: 5133 case X86::BI__builtin_ia32_gatherpfqps: 5134 case X86::BI__builtin_ia32_scatterpfdpd: 5135 case X86::BI__builtin_ia32_scatterpfdps: 5136 case X86::BI__builtin_ia32_scatterpfqpd: 5137 case X86::BI__builtin_ia32_scatterpfqps: 5138 i = 4; l = 2; u = 3; 5139 break; 5140 case X86::BI__builtin_ia32_reducesd_mask: 5141 case X86::BI__builtin_ia32_reducess_mask: 5142 case X86::BI__builtin_ia32_rndscalesd_round_mask: 5143 case X86::BI__builtin_ia32_rndscaless_round_mask: 5144 case X86::BI__builtin_ia32_rndscalesh_round_mask: 5145 case X86::BI__builtin_ia32_reducesh_mask: 5146 i = 4; l = 0; u = 255; 5147 break; 5148 } 5149 5150 // Note that we don't force a hard error on the range check here, allowing 5151 // template-generated or macro-generated dead code to potentially have out-of- 5152 // range values. These need to code generate, but don't need to necessarily 5153 // make any sense. We use a warning that defaults to an error. 5154 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 5155 } 5156 5157 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 5158 /// parameter with the FormatAttr's correct format_idx and firstDataArg. 5159 /// Returns true when the format fits the function and the FormatStringInfo has 5160 /// been populated. 5161 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 5162 FormatStringInfo *FSI) { 5163 FSI->HasVAListArg = Format->getFirstArg() == 0; 5164 FSI->FormatIdx = Format->getFormatIdx() - 1; 5165 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1; 5166 5167 // The way the format attribute works in GCC, the implicit this argument 5168 // of member functions is counted. However, it doesn't appear in our own 5169 // lists, so decrement format_idx in that case. 5170 if (IsCXXMember) { 5171 if(FSI->FormatIdx == 0) 5172 return false; 5173 --FSI->FormatIdx; 5174 if (FSI->FirstDataArg != 0) 5175 --FSI->FirstDataArg; 5176 } 5177 return true; 5178 } 5179 5180 /// Checks if a the given expression evaluates to null. 5181 /// 5182 /// Returns true if the value evaluates to null. 5183 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 5184 // If the expression has non-null type, it doesn't evaluate to null. 5185 if (auto nullability 5186 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { 5187 if (*nullability == NullabilityKind::NonNull) 5188 return false; 5189 } 5190 5191 // As a special case, transparent unions initialized with zero are 5192 // considered null for the purposes of the nonnull attribute. 5193 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 5194 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 5195 if (const CompoundLiteralExpr *CLE = 5196 dyn_cast<CompoundLiteralExpr>(Expr)) 5197 if (const InitListExpr *ILE = 5198 dyn_cast<InitListExpr>(CLE->getInitializer())) 5199 Expr = ILE->getInit(0); 5200 } 5201 5202 bool Result; 5203 return (!Expr->isValueDependent() && 5204 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 5205 !Result); 5206 } 5207 5208 static void CheckNonNullArgument(Sema &S, 5209 const Expr *ArgExpr, 5210 SourceLocation CallSiteLoc) { 5211 if (CheckNonNullExpr(S, ArgExpr)) 5212 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 5213 S.PDiag(diag::warn_null_arg) 5214 << ArgExpr->getSourceRange()); 5215 } 5216 5217 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 5218 FormatStringInfo FSI; 5219 if ((GetFormatStringType(Format) == FST_NSString) && 5220 getFormatStringInfo(Format, false, &FSI)) { 5221 Idx = FSI.FormatIdx; 5222 return true; 5223 } 5224 return false; 5225 } 5226 5227 /// Diagnose use of %s directive in an NSString which is being passed 5228 /// as formatting string to formatting method. 5229 static void 5230 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 5231 const NamedDecl *FDecl, 5232 Expr **Args, 5233 unsigned NumArgs) { 5234 unsigned Idx = 0; 5235 bool Format = false; 5236 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 5237 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 5238 Idx = 2; 5239 Format = true; 5240 } 5241 else 5242 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 5243 if (S.GetFormatNSStringIdx(I, Idx)) { 5244 Format = true; 5245 break; 5246 } 5247 } 5248 if (!Format || NumArgs <= Idx) 5249 return; 5250 const Expr *FormatExpr = Args[Idx]; 5251 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 5252 FormatExpr = CSCE->getSubExpr(); 5253 const StringLiteral *FormatString; 5254 if (const ObjCStringLiteral *OSL = 5255 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 5256 FormatString = OSL->getString(); 5257 else 5258 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 5259 if (!FormatString) 5260 return; 5261 if (S.FormatStringHasSArg(FormatString)) { 5262 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 5263 << "%s" << 1 << 1; 5264 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 5265 << FDecl->getDeclName(); 5266 } 5267 } 5268 5269 /// Determine whether the given type has a non-null nullability annotation. 5270 static bool isNonNullType(ASTContext &ctx, QualType type) { 5271 if (auto nullability = type->getNullability(ctx)) 5272 return *nullability == NullabilityKind::NonNull; 5273 5274 return false; 5275 } 5276 5277 static void CheckNonNullArguments(Sema &S, 5278 const NamedDecl *FDecl, 5279 const FunctionProtoType *Proto, 5280 ArrayRef<const Expr *> Args, 5281 SourceLocation CallSiteLoc) { 5282 assert((FDecl || Proto) && "Need a function declaration or prototype"); 5283 5284 // Already checked by by constant evaluator. 5285 if (S.isConstantEvaluated()) 5286 return; 5287 // Check the attributes attached to the method/function itself. 5288 llvm::SmallBitVector NonNullArgs; 5289 if (FDecl) { 5290 // Handle the nonnull attribute on the function/method declaration itself. 5291 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 5292 if (!NonNull->args_size()) { 5293 // Easy case: all pointer arguments are nonnull. 5294 for (const auto *Arg : Args) 5295 if (S.isValidPointerAttrType(Arg->getType())) 5296 CheckNonNullArgument(S, Arg, CallSiteLoc); 5297 return; 5298 } 5299 5300 for (const ParamIdx &Idx : NonNull->args()) { 5301 unsigned IdxAST = Idx.getASTIndex(); 5302 if (IdxAST >= Args.size()) 5303 continue; 5304 if (NonNullArgs.empty()) 5305 NonNullArgs.resize(Args.size()); 5306 NonNullArgs.set(IdxAST); 5307 } 5308 } 5309 } 5310 5311 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 5312 // Handle the nonnull attribute on the parameters of the 5313 // function/method. 5314 ArrayRef<ParmVarDecl*> parms; 5315 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 5316 parms = FD->parameters(); 5317 else 5318 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 5319 5320 unsigned ParamIndex = 0; 5321 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 5322 I != E; ++I, ++ParamIndex) { 5323 const ParmVarDecl *PVD = *I; 5324 if (PVD->hasAttr<NonNullAttr>() || 5325 isNonNullType(S.Context, PVD->getType())) { 5326 if (NonNullArgs.empty()) 5327 NonNullArgs.resize(Args.size()); 5328 5329 NonNullArgs.set(ParamIndex); 5330 } 5331 } 5332 } else { 5333 // If we have a non-function, non-method declaration but no 5334 // function prototype, try to dig out the function prototype. 5335 if (!Proto) { 5336 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 5337 QualType type = VD->getType().getNonReferenceType(); 5338 if (auto pointerType = type->getAs<PointerType>()) 5339 type = pointerType->getPointeeType(); 5340 else if (auto blockType = type->getAs<BlockPointerType>()) 5341 type = blockType->getPointeeType(); 5342 // FIXME: data member pointers? 5343 5344 // Dig out the function prototype, if there is one. 5345 Proto = type->getAs<FunctionProtoType>(); 5346 } 5347 } 5348 5349 // Fill in non-null argument information from the nullability 5350 // information on the parameter types (if we have them). 5351 if (Proto) { 5352 unsigned Index = 0; 5353 for (auto paramType : Proto->getParamTypes()) { 5354 if (isNonNullType(S.Context, paramType)) { 5355 if (NonNullArgs.empty()) 5356 NonNullArgs.resize(Args.size()); 5357 5358 NonNullArgs.set(Index); 5359 } 5360 5361 ++Index; 5362 } 5363 } 5364 } 5365 5366 // Check for non-null arguments. 5367 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 5368 ArgIndex != ArgIndexEnd; ++ArgIndex) { 5369 if (NonNullArgs[ArgIndex]) 5370 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc); 5371 } 5372 } 5373 5374 /// Warn if a pointer or reference argument passed to a function points to an 5375 /// object that is less aligned than the parameter. This can happen when 5376 /// creating a typedef with a lower alignment than the original type and then 5377 /// calling functions defined in terms of the original type. 5378 void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, 5379 StringRef ParamName, QualType ArgTy, 5380 QualType ParamTy) { 5381 5382 // If a function accepts a pointer or reference type 5383 if (!ParamTy->isPointerType() && !ParamTy->isReferenceType()) 5384 return; 5385 5386 // If the parameter is a pointer type, get the pointee type for the 5387 // argument too. If the parameter is a reference type, don't try to get 5388 // the pointee type for the argument. 5389 if (ParamTy->isPointerType()) 5390 ArgTy = ArgTy->getPointeeType(); 5391 5392 // Remove reference or pointer 5393 ParamTy = ParamTy->getPointeeType(); 5394 5395 // Find expected alignment, and the actual alignment of the passed object. 5396 // getTypeAlignInChars requires complete types 5397 if (ArgTy.isNull() || ParamTy->isIncompleteType() || 5398 ArgTy->isIncompleteType() || ParamTy->isUndeducedType() || 5399 ArgTy->isUndeducedType()) 5400 return; 5401 5402 CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy); 5403 CharUnits ArgAlign = Context.getTypeAlignInChars(ArgTy); 5404 5405 // If the argument is less aligned than the parameter, there is a 5406 // potential alignment issue. 5407 if (ArgAlign < ParamAlign) 5408 Diag(Loc, diag::warn_param_mismatched_alignment) 5409 << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity() 5410 << ParamName << (FDecl != nullptr) << FDecl; 5411 } 5412 5413 /// Handles the checks for format strings, non-POD arguments to vararg 5414 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if 5415 /// attributes. 5416 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 5417 const Expr *ThisArg, ArrayRef<const Expr *> Args, 5418 bool IsMemberFunction, SourceLocation Loc, 5419 SourceRange Range, VariadicCallType CallType) { 5420 // FIXME: We should check as much as we can in the template definition. 5421 if (CurContext->isDependentContext()) 5422 return; 5423 5424 // Printf and scanf checking. 5425 llvm::SmallBitVector CheckedVarArgs; 5426 if (FDecl) { 5427 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 5428 // Only create vector if there are format attributes. 5429 CheckedVarArgs.resize(Args.size()); 5430 5431 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 5432 CheckedVarArgs); 5433 } 5434 } 5435 5436 // Refuse POD arguments that weren't caught by the format string 5437 // checks above. 5438 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 5439 if (CallType != VariadicDoesNotApply && 5440 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 5441 unsigned NumParams = Proto ? Proto->getNumParams() 5442 : FDecl && isa<FunctionDecl>(FDecl) 5443 ? cast<FunctionDecl>(FDecl)->getNumParams() 5444 : FDecl && isa<ObjCMethodDecl>(FDecl) 5445 ? cast<ObjCMethodDecl>(FDecl)->param_size() 5446 : 0; 5447 5448 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 5449 // Args[ArgIdx] can be null in malformed code. 5450 if (const Expr *Arg = Args[ArgIdx]) { 5451 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 5452 checkVariadicArgument(Arg, CallType); 5453 } 5454 } 5455 } 5456 5457 if (FDecl || Proto) { 5458 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 5459 5460 // Type safety checking. 5461 if (FDecl) { 5462 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 5463 CheckArgumentWithTypeTag(I, Args, Loc); 5464 } 5465 } 5466 5467 // Check that passed arguments match the alignment of original arguments. 5468 // Try to get the missing prototype from the declaration. 5469 if (!Proto && FDecl) { 5470 const auto *FT = FDecl->getFunctionType(); 5471 if (isa_and_nonnull<FunctionProtoType>(FT)) 5472 Proto = cast<FunctionProtoType>(FDecl->getFunctionType()); 5473 } 5474 if (Proto) { 5475 // For variadic functions, we may have more args than parameters. 5476 // For some K&R functions, we may have less args than parameters. 5477 const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size()); 5478 for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) { 5479 // Args[ArgIdx] can be null in malformed code. 5480 if (const Expr *Arg = Args[ArgIdx]) { 5481 if (Arg->containsErrors()) 5482 continue; 5483 5484 QualType ParamTy = Proto->getParamType(ArgIdx); 5485 QualType ArgTy = Arg->getType(); 5486 CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1), 5487 ArgTy, ParamTy); 5488 } 5489 } 5490 } 5491 5492 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) { 5493 auto *AA = FDecl->getAttr<AllocAlignAttr>(); 5494 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()]; 5495 if (!Arg->isValueDependent()) { 5496 Expr::EvalResult Align; 5497 if (Arg->EvaluateAsInt(Align, Context)) { 5498 const llvm::APSInt &I = Align.Val.getInt(); 5499 if (!I.isPowerOf2()) 5500 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two) 5501 << Arg->getSourceRange(); 5502 5503 if (I > Sema::MaximumAlignment) 5504 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great) 5505 << Arg->getSourceRange() << Sema::MaximumAlignment; 5506 } 5507 } 5508 } 5509 5510 if (FD) 5511 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 5512 } 5513 5514 /// CheckConstructorCall - Check a constructor call for correctness and safety 5515 /// properties not enforced by the C type system. 5516 void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, 5517 ArrayRef<const Expr *> Args, 5518 const FunctionProtoType *Proto, 5519 SourceLocation Loc) { 5520 VariadicCallType CallType = 5521 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 5522 5523 auto *Ctor = cast<CXXConstructorDecl>(FDecl); 5524 CheckArgAlignment(Loc, FDecl, "'this'", Context.getPointerType(ThisType), 5525 Context.getPointerType(Ctor->getThisObjectType())); 5526 5527 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 5528 Loc, SourceRange(), CallType); 5529 } 5530 5531 /// CheckFunctionCall - Check a direct function call for various correctness 5532 /// and safety properties not strictly enforced by the C type system. 5533 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 5534 const FunctionProtoType *Proto) { 5535 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 5536 isa<CXXMethodDecl>(FDecl); 5537 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 5538 IsMemberOperatorCall; 5539 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 5540 TheCall->getCallee()); 5541 Expr** Args = TheCall->getArgs(); 5542 unsigned NumArgs = TheCall->getNumArgs(); 5543 5544 Expr *ImplicitThis = nullptr; 5545 if (IsMemberOperatorCall) { 5546 // If this is a call to a member operator, hide the first argument 5547 // from checkCall. 5548 // FIXME: Our choice of AST representation here is less than ideal. 5549 ImplicitThis = Args[0]; 5550 ++Args; 5551 --NumArgs; 5552 } else if (IsMemberFunction) 5553 ImplicitThis = 5554 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 5555 5556 if (ImplicitThis) { 5557 // ImplicitThis may or may not be a pointer, depending on whether . or -> is 5558 // used. 5559 QualType ThisType = ImplicitThis->getType(); 5560 if (!ThisType->isPointerType()) { 5561 assert(!ThisType->isReferenceType()); 5562 ThisType = Context.getPointerType(ThisType); 5563 } 5564 5565 QualType ThisTypeFromDecl = 5566 Context.getPointerType(cast<CXXMethodDecl>(FDecl)->getThisObjectType()); 5567 5568 CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType, 5569 ThisTypeFromDecl); 5570 } 5571 5572 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs), 5573 IsMemberFunction, TheCall->getRParenLoc(), 5574 TheCall->getCallee()->getSourceRange(), CallType); 5575 5576 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 5577 // None of the checks below are needed for functions that don't have 5578 // simple names (e.g., C++ conversion functions). 5579 if (!FnInfo) 5580 return false; 5581 5582 // Enforce TCB except for builtin calls, which are always allowed. 5583 if (FDecl->getBuiltinID() == 0) 5584 CheckTCBEnforcement(TheCall->getExprLoc(), FDecl); 5585 5586 CheckAbsoluteValueFunction(TheCall, FDecl); 5587 CheckMaxUnsignedZero(TheCall, FDecl); 5588 5589 if (getLangOpts().ObjC) 5590 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 5591 5592 unsigned CMId = FDecl->getMemoryFunctionKind(); 5593 5594 // Handle memory setting and copying functions. 5595 switch (CMId) { 5596 case 0: 5597 return false; 5598 case Builtin::BIstrlcpy: // fallthrough 5599 case Builtin::BIstrlcat: 5600 CheckStrlcpycatArguments(TheCall, FnInfo); 5601 break; 5602 case Builtin::BIstrncat: 5603 CheckStrncatArguments(TheCall, FnInfo); 5604 break; 5605 case Builtin::BIfree: 5606 CheckFreeArguments(TheCall); 5607 break; 5608 default: 5609 CheckMemaccessArguments(TheCall, CMId, FnInfo); 5610 } 5611 5612 return false; 5613 } 5614 5615 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 5616 ArrayRef<const Expr *> Args) { 5617 VariadicCallType CallType = 5618 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 5619 5620 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 5621 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 5622 CallType); 5623 5624 CheckTCBEnforcement(lbrac, Method); 5625 5626 return false; 5627 } 5628 5629 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 5630 const FunctionProtoType *Proto) { 5631 QualType Ty; 5632 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 5633 Ty = V->getType().getNonReferenceType(); 5634 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 5635 Ty = F->getType().getNonReferenceType(); 5636 else 5637 return false; 5638 5639 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 5640 !Ty->isFunctionProtoType()) 5641 return false; 5642 5643 VariadicCallType CallType; 5644 if (!Proto || !Proto->isVariadic()) { 5645 CallType = VariadicDoesNotApply; 5646 } else if (Ty->isBlockPointerType()) { 5647 CallType = VariadicBlock; 5648 } else { // Ty->isFunctionPointerType() 5649 CallType = VariadicFunction; 5650 } 5651 5652 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 5653 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 5654 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 5655 TheCall->getCallee()->getSourceRange(), CallType); 5656 5657 return false; 5658 } 5659 5660 /// Checks function calls when a FunctionDecl or a NamedDecl is not available, 5661 /// such as function pointers returned from functions. 5662 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 5663 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 5664 TheCall->getCallee()); 5665 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 5666 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 5667 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 5668 TheCall->getCallee()->getSourceRange(), CallType); 5669 5670 return false; 5671 } 5672 5673 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 5674 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 5675 return false; 5676 5677 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 5678 switch (Op) { 5679 case AtomicExpr::AO__c11_atomic_init: 5680 case AtomicExpr::AO__opencl_atomic_init: 5681 llvm_unreachable("There is no ordering argument for an init"); 5682 5683 case AtomicExpr::AO__c11_atomic_load: 5684 case AtomicExpr::AO__opencl_atomic_load: 5685 case AtomicExpr::AO__hip_atomic_load: 5686 case AtomicExpr::AO__atomic_load_n: 5687 case AtomicExpr::AO__atomic_load: 5688 return OrderingCABI != llvm::AtomicOrderingCABI::release && 5689 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 5690 5691 case AtomicExpr::AO__c11_atomic_store: 5692 case AtomicExpr::AO__opencl_atomic_store: 5693 case AtomicExpr::AO__hip_atomic_store: 5694 case AtomicExpr::AO__atomic_store: 5695 case AtomicExpr::AO__atomic_store_n: 5696 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 5697 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 5698 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 5699 5700 default: 5701 return true; 5702 } 5703 } 5704 5705 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 5706 AtomicExpr::AtomicOp Op) { 5707 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 5708 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 5709 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()}; 5710 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()}, 5711 DRE->getSourceRange(), TheCall->getRParenLoc(), Args, 5712 Op); 5713 } 5714 5715 ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, 5716 SourceLocation RParenLoc, MultiExprArg Args, 5717 AtomicExpr::AtomicOp Op, 5718 AtomicArgumentOrder ArgOrder) { 5719 // All the non-OpenCL operations take one of the following forms. 5720 // The OpenCL operations take the __c11 forms with one extra argument for 5721 // synchronization scope. 5722 enum { 5723 // C __c11_atomic_init(A *, C) 5724 Init, 5725 5726 // C __c11_atomic_load(A *, int) 5727 Load, 5728 5729 // void __atomic_load(A *, CP, int) 5730 LoadCopy, 5731 5732 // void __atomic_store(A *, CP, int) 5733 Copy, 5734 5735 // C __c11_atomic_add(A *, M, int) 5736 Arithmetic, 5737 5738 // C __atomic_exchange_n(A *, CP, int) 5739 Xchg, 5740 5741 // void __atomic_exchange(A *, C *, CP, int) 5742 GNUXchg, 5743 5744 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 5745 C11CmpXchg, 5746 5747 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 5748 GNUCmpXchg 5749 } Form = Init; 5750 5751 const unsigned NumForm = GNUCmpXchg + 1; 5752 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 5753 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 5754 // where: 5755 // C is an appropriate type, 5756 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 5757 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 5758 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 5759 // the int parameters are for orderings. 5760 5761 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 5762 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 5763 "need to update code for modified forms"); 5764 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 5765 AtomicExpr::AO__c11_atomic_fetch_min + 1 == 5766 AtomicExpr::AO__atomic_load, 5767 "need to update code for modified C11 atomics"); 5768 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 5769 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 5770 bool IsHIP = Op >= AtomicExpr::AO__hip_atomic_load && 5771 Op <= AtomicExpr::AO__hip_atomic_fetch_max; 5772 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 5773 Op <= AtomicExpr::AO__c11_atomic_fetch_min) || 5774 IsOpenCL; 5775 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 5776 Op == AtomicExpr::AO__atomic_store_n || 5777 Op == AtomicExpr::AO__atomic_exchange_n || 5778 Op == AtomicExpr::AO__atomic_compare_exchange_n; 5779 bool IsAddSub = false; 5780 5781 switch (Op) { 5782 case AtomicExpr::AO__c11_atomic_init: 5783 case AtomicExpr::AO__opencl_atomic_init: 5784 Form = Init; 5785 break; 5786 5787 case AtomicExpr::AO__c11_atomic_load: 5788 case AtomicExpr::AO__opencl_atomic_load: 5789 case AtomicExpr::AO__hip_atomic_load: 5790 case AtomicExpr::AO__atomic_load_n: 5791 Form = Load; 5792 break; 5793 5794 case AtomicExpr::AO__atomic_load: 5795 Form = LoadCopy; 5796 break; 5797 5798 case AtomicExpr::AO__c11_atomic_store: 5799 case AtomicExpr::AO__opencl_atomic_store: 5800 case AtomicExpr::AO__hip_atomic_store: 5801 case AtomicExpr::AO__atomic_store: 5802 case AtomicExpr::AO__atomic_store_n: 5803 Form = Copy; 5804 break; 5805 case AtomicExpr::AO__hip_atomic_fetch_add: 5806 case AtomicExpr::AO__hip_atomic_fetch_min: 5807 case AtomicExpr::AO__hip_atomic_fetch_max: 5808 case AtomicExpr::AO__c11_atomic_fetch_add: 5809 case AtomicExpr::AO__c11_atomic_fetch_sub: 5810 case AtomicExpr::AO__opencl_atomic_fetch_add: 5811 case AtomicExpr::AO__opencl_atomic_fetch_sub: 5812 case AtomicExpr::AO__atomic_fetch_add: 5813 case AtomicExpr::AO__atomic_fetch_sub: 5814 case AtomicExpr::AO__atomic_add_fetch: 5815 case AtomicExpr::AO__atomic_sub_fetch: 5816 IsAddSub = true; 5817 Form = Arithmetic; 5818 break; 5819 case AtomicExpr::AO__c11_atomic_fetch_and: 5820 case AtomicExpr::AO__c11_atomic_fetch_or: 5821 case AtomicExpr::AO__c11_atomic_fetch_xor: 5822 case AtomicExpr::AO__hip_atomic_fetch_and: 5823 case AtomicExpr::AO__hip_atomic_fetch_or: 5824 case AtomicExpr::AO__hip_atomic_fetch_xor: 5825 case AtomicExpr::AO__c11_atomic_fetch_nand: 5826 case AtomicExpr::AO__opencl_atomic_fetch_and: 5827 case AtomicExpr::AO__opencl_atomic_fetch_or: 5828 case AtomicExpr::AO__opencl_atomic_fetch_xor: 5829 case AtomicExpr::AO__atomic_fetch_and: 5830 case AtomicExpr::AO__atomic_fetch_or: 5831 case AtomicExpr::AO__atomic_fetch_xor: 5832 case AtomicExpr::AO__atomic_fetch_nand: 5833 case AtomicExpr::AO__atomic_and_fetch: 5834 case AtomicExpr::AO__atomic_or_fetch: 5835 case AtomicExpr::AO__atomic_xor_fetch: 5836 case AtomicExpr::AO__atomic_nand_fetch: 5837 Form = Arithmetic; 5838 break; 5839 case AtomicExpr::AO__c11_atomic_fetch_min: 5840 case AtomicExpr::AO__c11_atomic_fetch_max: 5841 case AtomicExpr::AO__opencl_atomic_fetch_min: 5842 case AtomicExpr::AO__opencl_atomic_fetch_max: 5843 case AtomicExpr::AO__atomic_min_fetch: 5844 case AtomicExpr::AO__atomic_max_fetch: 5845 case AtomicExpr::AO__atomic_fetch_min: 5846 case AtomicExpr::AO__atomic_fetch_max: 5847 Form = Arithmetic; 5848 break; 5849 5850 case AtomicExpr::AO__c11_atomic_exchange: 5851 case AtomicExpr::AO__hip_atomic_exchange: 5852 case AtomicExpr::AO__opencl_atomic_exchange: 5853 case AtomicExpr::AO__atomic_exchange_n: 5854 Form = Xchg; 5855 break; 5856 5857 case AtomicExpr::AO__atomic_exchange: 5858 Form = GNUXchg; 5859 break; 5860 5861 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 5862 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 5863 case AtomicExpr::AO__hip_atomic_compare_exchange_strong: 5864 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 5865 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 5866 case AtomicExpr::AO__hip_atomic_compare_exchange_weak: 5867 Form = C11CmpXchg; 5868 break; 5869 5870 case AtomicExpr::AO__atomic_compare_exchange: 5871 case AtomicExpr::AO__atomic_compare_exchange_n: 5872 Form = GNUCmpXchg; 5873 break; 5874 } 5875 5876 unsigned AdjustedNumArgs = NumArgs[Form]; 5877 if ((IsOpenCL || IsHIP) && Op != AtomicExpr::AO__opencl_atomic_init) 5878 ++AdjustedNumArgs; 5879 // Check we have the right number of arguments. 5880 if (Args.size() < AdjustedNumArgs) { 5881 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args) 5882 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 5883 << ExprRange; 5884 return ExprError(); 5885 } else if (Args.size() > AdjustedNumArgs) { 5886 Diag(Args[AdjustedNumArgs]->getBeginLoc(), 5887 diag::err_typecheck_call_too_many_args) 5888 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 5889 << ExprRange; 5890 return ExprError(); 5891 } 5892 5893 // Inspect the first argument of the atomic operation. 5894 Expr *Ptr = Args[0]; 5895 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 5896 if (ConvertedPtr.isInvalid()) 5897 return ExprError(); 5898 5899 Ptr = ConvertedPtr.get(); 5900 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 5901 if (!pointerType) { 5902 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer) 5903 << Ptr->getType() << Ptr->getSourceRange(); 5904 return ExprError(); 5905 } 5906 5907 // For a __c11 builtin, this should be a pointer to an _Atomic type. 5908 QualType AtomTy = pointerType->getPointeeType(); // 'A' 5909 QualType ValType = AtomTy; // 'C' 5910 if (IsC11) { 5911 if (!AtomTy->isAtomicType()) { 5912 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic) 5913 << Ptr->getType() << Ptr->getSourceRange(); 5914 return ExprError(); 5915 } 5916 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 5917 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 5918 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic) 5919 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 5920 << Ptr->getSourceRange(); 5921 return ExprError(); 5922 } 5923 ValType = AtomTy->castAs<AtomicType>()->getValueType(); 5924 } else if (Form != Load && Form != LoadCopy) { 5925 if (ValType.isConstQualified()) { 5926 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer) 5927 << Ptr->getType() << Ptr->getSourceRange(); 5928 return ExprError(); 5929 } 5930 } 5931 5932 // For an arithmetic operation, the implied arithmetic must be well-formed. 5933 if (Form == Arithmetic) { 5934 // GCC does not enforce these rules for GNU atomics, but we do to help catch 5935 // trivial type errors. 5936 auto IsAllowedValueType = [&](QualType ValType) { 5937 if (ValType->isIntegerType()) 5938 return true; 5939 if (ValType->isPointerType()) 5940 return true; 5941 if (!ValType->isFloatingType()) 5942 return false; 5943 // LLVM Parser does not allow atomicrmw with x86_fp80 type. 5944 if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) && 5945 &Context.getTargetInfo().getLongDoubleFormat() == 5946 &llvm::APFloat::x87DoubleExtended()) 5947 return false; 5948 return true; 5949 }; 5950 if (IsAddSub && !IsAllowedValueType(ValType)) { 5951 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_ptr_or_fp) 5952 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 5953 return ExprError(); 5954 } 5955 if (!IsAddSub && !ValType->isIntegerType()) { 5956 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int) 5957 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 5958 return ExprError(); 5959 } 5960 if (IsC11 && ValType->isPointerType() && 5961 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 5962 diag::err_incomplete_type)) { 5963 return ExprError(); 5964 } 5965 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 5966 // For __atomic_*_n operations, the value type must be a scalar integral or 5967 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 5968 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 5969 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 5970 return ExprError(); 5971 } 5972 5973 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 5974 !AtomTy->isScalarType()) { 5975 // For GNU atomics, require a trivially-copyable type. This is not part of 5976 // the GNU atomics specification but we enforce it for consistency with 5977 // other atomics which generally all require a trivially-copyable type. This 5978 // is because atomics just copy bits. 5979 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy) 5980 << Ptr->getType() << Ptr->getSourceRange(); 5981 return ExprError(); 5982 } 5983 5984 switch (ValType.getObjCLifetime()) { 5985 case Qualifiers::OCL_None: 5986 case Qualifiers::OCL_ExplicitNone: 5987 // okay 5988 break; 5989 5990 case Qualifiers::OCL_Weak: 5991 case Qualifiers::OCL_Strong: 5992 case Qualifiers::OCL_Autoreleasing: 5993 // FIXME: Can this happen? By this point, ValType should be known 5994 // to be trivially copyable. 5995 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership) 5996 << ValType << Ptr->getSourceRange(); 5997 return ExprError(); 5998 } 5999 6000 // All atomic operations have an overload which takes a pointer to a volatile 6001 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 6002 // into the result or the other operands. Similarly atomic_load takes a 6003 // pointer to a const 'A'. 6004 ValType.removeLocalVolatile(); 6005 ValType.removeLocalConst(); 6006 QualType ResultType = ValType; 6007 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 6008 Form == Init) 6009 ResultType = Context.VoidTy; 6010 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 6011 ResultType = Context.BoolTy; 6012 6013 // The type of a parameter passed 'by value'. In the GNU atomics, such 6014 // arguments are actually passed as pointers. 6015 QualType ByValType = ValType; // 'CP' 6016 bool IsPassedByAddress = false; 6017 if (!IsC11 && !IsHIP && !IsN) { 6018 ByValType = Ptr->getType(); 6019 IsPassedByAddress = true; 6020 } 6021 6022 SmallVector<Expr *, 5> APIOrderedArgs; 6023 if (ArgOrder == Sema::AtomicArgumentOrder::AST) { 6024 APIOrderedArgs.push_back(Args[0]); 6025 switch (Form) { 6026 case Init: 6027 case Load: 6028 APIOrderedArgs.push_back(Args[1]); // Val1/Order 6029 break; 6030 case LoadCopy: 6031 case Copy: 6032 case Arithmetic: 6033 case Xchg: 6034 APIOrderedArgs.push_back(Args[2]); // Val1 6035 APIOrderedArgs.push_back(Args[1]); // Order 6036 break; 6037 case GNUXchg: 6038 APIOrderedArgs.push_back(Args[2]); // Val1 6039 APIOrderedArgs.push_back(Args[3]); // Val2 6040 APIOrderedArgs.push_back(Args[1]); // Order 6041 break; 6042 case C11CmpXchg: 6043 APIOrderedArgs.push_back(Args[2]); // Val1 6044 APIOrderedArgs.push_back(Args[4]); // Val2 6045 APIOrderedArgs.push_back(Args[1]); // Order 6046 APIOrderedArgs.push_back(Args[3]); // OrderFail 6047 break; 6048 case GNUCmpXchg: 6049 APIOrderedArgs.push_back(Args[2]); // Val1 6050 APIOrderedArgs.push_back(Args[4]); // Val2 6051 APIOrderedArgs.push_back(Args[5]); // Weak 6052 APIOrderedArgs.push_back(Args[1]); // Order 6053 APIOrderedArgs.push_back(Args[3]); // OrderFail 6054 break; 6055 } 6056 } else 6057 APIOrderedArgs.append(Args.begin(), Args.end()); 6058 6059 // The first argument's non-CV pointer type is used to deduce the type of 6060 // subsequent arguments, except for: 6061 // - weak flag (always converted to bool) 6062 // - memory order (always converted to int) 6063 // - scope (always converted to int) 6064 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) { 6065 QualType Ty; 6066 if (i < NumVals[Form] + 1) { 6067 switch (i) { 6068 case 0: 6069 // The first argument is always a pointer. It has a fixed type. 6070 // It is always dereferenced, a nullptr is undefined. 6071 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 6072 // Nothing else to do: we already know all we want about this pointer. 6073 continue; 6074 case 1: 6075 // The second argument is the non-atomic operand. For arithmetic, this 6076 // is always passed by value, and for a compare_exchange it is always 6077 // passed by address. For the rest, GNU uses by-address and C11 uses 6078 // by-value. 6079 assert(Form != Load); 6080 if (Form == Arithmetic && ValType->isPointerType()) 6081 Ty = Context.getPointerDiffType(); 6082 else if (Form == Init || Form == Arithmetic) 6083 Ty = ValType; 6084 else if (Form == Copy || Form == Xchg) { 6085 if (IsPassedByAddress) { 6086 // The value pointer is always dereferenced, a nullptr is undefined. 6087 CheckNonNullArgument(*this, APIOrderedArgs[i], 6088 ExprRange.getBegin()); 6089 } 6090 Ty = ByValType; 6091 } else { 6092 Expr *ValArg = APIOrderedArgs[i]; 6093 // The value pointer is always dereferenced, a nullptr is undefined. 6094 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin()); 6095 LangAS AS = LangAS::Default; 6096 // Keep address space of non-atomic pointer type. 6097 if (const PointerType *PtrTy = 6098 ValArg->getType()->getAs<PointerType>()) { 6099 AS = PtrTy->getPointeeType().getAddressSpace(); 6100 } 6101 Ty = Context.getPointerType( 6102 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 6103 } 6104 break; 6105 case 2: 6106 // The third argument to compare_exchange / GNU exchange is the desired 6107 // value, either by-value (for the C11 and *_n variant) or as a pointer. 6108 if (IsPassedByAddress) 6109 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 6110 Ty = ByValType; 6111 break; 6112 case 3: 6113 // The fourth argument to GNU compare_exchange is a 'weak' flag. 6114 Ty = Context.BoolTy; 6115 break; 6116 } 6117 } else { 6118 // The order(s) and scope are always converted to int. 6119 Ty = Context.IntTy; 6120 } 6121 6122 InitializedEntity Entity = 6123 InitializedEntity::InitializeParameter(Context, Ty, false); 6124 ExprResult Arg = APIOrderedArgs[i]; 6125 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6126 if (Arg.isInvalid()) 6127 return true; 6128 APIOrderedArgs[i] = Arg.get(); 6129 } 6130 6131 // Permute the arguments into a 'consistent' order. 6132 SmallVector<Expr*, 5> SubExprs; 6133 SubExprs.push_back(Ptr); 6134 switch (Form) { 6135 case Init: 6136 // Note, AtomicExpr::getVal1() has a special case for this atomic. 6137 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6138 break; 6139 case Load: 6140 SubExprs.push_back(APIOrderedArgs[1]); // Order 6141 break; 6142 case LoadCopy: 6143 case Copy: 6144 case Arithmetic: 6145 case Xchg: 6146 SubExprs.push_back(APIOrderedArgs[2]); // Order 6147 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6148 break; 6149 case GNUXchg: 6150 // Note, AtomicExpr::getVal2() has a special case for this atomic. 6151 SubExprs.push_back(APIOrderedArgs[3]); // Order 6152 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6153 SubExprs.push_back(APIOrderedArgs[2]); // Val2 6154 break; 6155 case C11CmpXchg: 6156 SubExprs.push_back(APIOrderedArgs[3]); // Order 6157 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6158 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail 6159 SubExprs.push_back(APIOrderedArgs[2]); // Val2 6160 break; 6161 case GNUCmpXchg: 6162 SubExprs.push_back(APIOrderedArgs[4]); // Order 6163 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6164 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail 6165 SubExprs.push_back(APIOrderedArgs[2]); // Val2 6166 SubExprs.push_back(APIOrderedArgs[3]); // Weak 6167 break; 6168 } 6169 6170 if (SubExprs.size() >= 2 && Form != Init) { 6171 if (Optional<llvm::APSInt> Result = 6172 SubExprs[1]->getIntegerConstantExpr(Context)) 6173 if (!isValidOrderingForOp(Result->getSExtValue(), Op)) 6174 Diag(SubExprs[1]->getBeginLoc(), 6175 diag::warn_atomic_op_has_invalid_memory_order) 6176 << SubExprs[1]->getSourceRange(); 6177 } 6178 6179 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 6180 auto *Scope = Args[Args.size() - 1]; 6181 if (Optional<llvm::APSInt> Result = 6182 Scope->getIntegerConstantExpr(Context)) { 6183 if (!ScopeModel->isValid(Result->getZExtValue())) 6184 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 6185 << Scope->getSourceRange(); 6186 } 6187 SubExprs.push_back(Scope); 6188 } 6189 6190 AtomicExpr *AE = new (Context) 6191 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc); 6192 6193 if ((Op == AtomicExpr::AO__c11_atomic_load || 6194 Op == AtomicExpr::AO__c11_atomic_store || 6195 Op == AtomicExpr::AO__opencl_atomic_load || 6196 Op == AtomicExpr::AO__hip_atomic_load || 6197 Op == AtomicExpr::AO__opencl_atomic_store || 6198 Op == AtomicExpr::AO__hip_atomic_store) && 6199 Context.AtomicUsesUnsupportedLibcall(AE)) 6200 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 6201 << ((Op == AtomicExpr::AO__c11_atomic_load || 6202 Op == AtomicExpr::AO__opencl_atomic_load || 6203 Op == AtomicExpr::AO__hip_atomic_load) 6204 ? 0 6205 : 1); 6206 6207 if (ValType->isBitIntType()) { 6208 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_bit_int_prohibit); 6209 return ExprError(); 6210 } 6211 6212 return AE; 6213 } 6214 6215 /// checkBuiltinArgument - Given a call to a builtin function, perform 6216 /// normal type-checking on the given argument, updating the call in 6217 /// place. This is useful when a builtin function requires custom 6218 /// type-checking for some of its arguments but not necessarily all of 6219 /// them. 6220 /// 6221 /// Returns true on error. 6222 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 6223 FunctionDecl *Fn = E->getDirectCallee(); 6224 assert(Fn && "builtin call without direct callee!"); 6225 6226 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 6227 InitializedEntity Entity = 6228 InitializedEntity::InitializeParameter(S.Context, Param); 6229 6230 ExprResult Arg = E->getArg(ArgIndex); 6231 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 6232 if (Arg.isInvalid()) 6233 return true; 6234 6235 E->setArg(ArgIndex, Arg.get()); 6236 return false; 6237 } 6238 6239 /// We have a call to a function like __sync_fetch_and_add, which is an 6240 /// overloaded function based on the pointer type of its first argument. 6241 /// The main BuildCallExpr routines have already promoted the types of 6242 /// arguments because all of these calls are prototyped as void(...). 6243 /// 6244 /// This function goes through and does final semantic checking for these 6245 /// builtins, as well as generating any warnings. 6246 ExprResult 6247 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 6248 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 6249 Expr *Callee = TheCall->getCallee(); 6250 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 6251 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6252 6253 // Ensure that we have at least one argument to do type inference from. 6254 if (TheCall->getNumArgs() < 1) { 6255 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 6256 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange(); 6257 return ExprError(); 6258 } 6259 6260 // Inspect the first argument of the atomic builtin. This should always be 6261 // a pointer type, whose element is an integral scalar or pointer type. 6262 // Because it is a pointer type, we don't have to worry about any implicit 6263 // casts here. 6264 // FIXME: We don't allow floating point scalars as input. 6265 Expr *FirstArg = TheCall->getArg(0); 6266 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 6267 if (FirstArgResult.isInvalid()) 6268 return ExprError(); 6269 FirstArg = FirstArgResult.get(); 6270 TheCall->setArg(0, FirstArg); 6271 6272 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 6273 if (!pointerType) { 6274 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 6275 << FirstArg->getType() << FirstArg->getSourceRange(); 6276 return ExprError(); 6277 } 6278 6279 QualType ValType = pointerType->getPointeeType(); 6280 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 6281 !ValType->isBlockPointerType()) { 6282 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 6283 << FirstArg->getType() << FirstArg->getSourceRange(); 6284 return ExprError(); 6285 } 6286 6287 if (ValType.isConstQualified()) { 6288 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 6289 << FirstArg->getType() << FirstArg->getSourceRange(); 6290 return ExprError(); 6291 } 6292 6293 switch (ValType.getObjCLifetime()) { 6294 case Qualifiers::OCL_None: 6295 case Qualifiers::OCL_ExplicitNone: 6296 // okay 6297 break; 6298 6299 case Qualifiers::OCL_Weak: 6300 case Qualifiers::OCL_Strong: 6301 case Qualifiers::OCL_Autoreleasing: 6302 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 6303 << ValType << FirstArg->getSourceRange(); 6304 return ExprError(); 6305 } 6306 6307 // Strip any qualifiers off ValType. 6308 ValType = ValType.getUnqualifiedType(); 6309 6310 // The majority of builtins return a value, but a few have special return 6311 // types, so allow them to override appropriately below. 6312 QualType ResultType = ValType; 6313 6314 // We need to figure out which concrete builtin this maps onto. For example, 6315 // __sync_fetch_and_add with a 2 byte object turns into 6316 // __sync_fetch_and_add_2. 6317 #define BUILTIN_ROW(x) \ 6318 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 6319 Builtin::BI##x##_8, Builtin::BI##x##_16 } 6320 6321 static const unsigned BuiltinIndices[][5] = { 6322 BUILTIN_ROW(__sync_fetch_and_add), 6323 BUILTIN_ROW(__sync_fetch_and_sub), 6324 BUILTIN_ROW(__sync_fetch_and_or), 6325 BUILTIN_ROW(__sync_fetch_and_and), 6326 BUILTIN_ROW(__sync_fetch_and_xor), 6327 BUILTIN_ROW(__sync_fetch_and_nand), 6328 6329 BUILTIN_ROW(__sync_add_and_fetch), 6330 BUILTIN_ROW(__sync_sub_and_fetch), 6331 BUILTIN_ROW(__sync_and_and_fetch), 6332 BUILTIN_ROW(__sync_or_and_fetch), 6333 BUILTIN_ROW(__sync_xor_and_fetch), 6334 BUILTIN_ROW(__sync_nand_and_fetch), 6335 6336 BUILTIN_ROW(__sync_val_compare_and_swap), 6337 BUILTIN_ROW(__sync_bool_compare_and_swap), 6338 BUILTIN_ROW(__sync_lock_test_and_set), 6339 BUILTIN_ROW(__sync_lock_release), 6340 BUILTIN_ROW(__sync_swap) 6341 }; 6342 #undef BUILTIN_ROW 6343 6344 // Determine the index of the size. 6345 unsigned SizeIndex; 6346 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 6347 case 1: SizeIndex = 0; break; 6348 case 2: SizeIndex = 1; break; 6349 case 4: SizeIndex = 2; break; 6350 case 8: SizeIndex = 3; break; 6351 case 16: SizeIndex = 4; break; 6352 default: 6353 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 6354 << FirstArg->getType() << FirstArg->getSourceRange(); 6355 return ExprError(); 6356 } 6357 6358 // Each of these builtins has one pointer argument, followed by some number of 6359 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 6360 // that we ignore. Find out which row of BuiltinIndices to read from as well 6361 // as the number of fixed args. 6362 unsigned BuiltinID = FDecl->getBuiltinID(); 6363 unsigned BuiltinIndex, NumFixed = 1; 6364 bool WarnAboutSemanticsChange = false; 6365 switch (BuiltinID) { 6366 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 6367 case Builtin::BI__sync_fetch_and_add: 6368 case Builtin::BI__sync_fetch_and_add_1: 6369 case Builtin::BI__sync_fetch_and_add_2: 6370 case Builtin::BI__sync_fetch_and_add_4: 6371 case Builtin::BI__sync_fetch_and_add_8: 6372 case Builtin::BI__sync_fetch_and_add_16: 6373 BuiltinIndex = 0; 6374 break; 6375 6376 case Builtin::BI__sync_fetch_and_sub: 6377 case Builtin::BI__sync_fetch_and_sub_1: 6378 case Builtin::BI__sync_fetch_and_sub_2: 6379 case Builtin::BI__sync_fetch_and_sub_4: 6380 case Builtin::BI__sync_fetch_and_sub_8: 6381 case Builtin::BI__sync_fetch_and_sub_16: 6382 BuiltinIndex = 1; 6383 break; 6384 6385 case Builtin::BI__sync_fetch_and_or: 6386 case Builtin::BI__sync_fetch_and_or_1: 6387 case Builtin::BI__sync_fetch_and_or_2: 6388 case Builtin::BI__sync_fetch_and_or_4: 6389 case Builtin::BI__sync_fetch_and_or_8: 6390 case Builtin::BI__sync_fetch_and_or_16: 6391 BuiltinIndex = 2; 6392 break; 6393 6394 case Builtin::BI__sync_fetch_and_and: 6395 case Builtin::BI__sync_fetch_and_and_1: 6396 case Builtin::BI__sync_fetch_and_and_2: 6397 case Builtin::BI__sync_fetch_and_and_4: 6398 case Builtin::BI__sync_fetch_and_and_8: 6399 case Builtin::BI__sync_fetch_and_and_16: 6400 BuiltinIndex = 3; 6401 break; 6402 6403 case Builtin::BI__sync_fetch_and_xor: 6404 case Builtin::BI__sync_fetch_and_xor_1: 6405 case Builtin::BI__sync_fetch_and_xor_2: 6406 case Builtin::BI__sync_fetch_and_xor_4: 6407 case Builtin::BI__sync_fetch_and_xor_8: 6408 case Builtin::BI__sync_fetch_and_xor_16: 6409 BuiltinIndex = 4; 6410 break; 6411 6412 case Builtin::BI__sync_fetch_and_nand: 6413 case Builtin::BI__sync_fetch_and_nand_1: 6414 case Builtin::BI__sync_fetch_and_nand_2: 6415 case Builtin::BI__sync_fetch_and_nand_4: 6416 case Builtin::BI__sync_fetch_and_nand_8: 6417 case Builtin::BI__sync_fetch_and_nand_16: 6418 BuiltinIndex = 5; 6419 WarnAboutSemanticsChange = true; 6420 break; 6421 6422 case Builtin::BI__sync_add_and_fetch: 6423 case Builtin::BI__sync_add_and_fetch_1: 6424 case Builtin::BI__sync_add_and_fetch_2: 6425 case Builtin::BI__sync_add_and_fetch_4: 6426 case Builtin::BI__sync_add_and_fetch_8: 6427 case Builtin::BI__sync_add_and_fetch_16: 6428 BuiltinIndex = 6; 6429 break; 6430 6431 case Builtin::BI__sync_sub_and_fetch: 6432 case Builtin::BI__sync_sub_and_fetch_1: 6433 case Builtin::BI__sync_sub_and_fetch_2: 6434 case Builtin::BI__sync_sub_and_fetch_4: 6435 case Builtin::BI__sync_sub_and_fetch_8: 6436 case Builtin::BI__sync_sub_and_fetch_16: 6437 BuiltinIndex = 7; 6438 break; 6439 6440 case Builtin::BI__sync_and_and_fetch: 6441 case Builtin::BI__sync_and_and_fetch_1: 6442 case Builtin::BI__sync_and_and_fetch_2: 6443 case Builtin::BI__sync_and_and_fetch_4: 6444 case Builtin::BI__sync_and_and_fetch_8: 6445 case Builtin::BI__sync_and_and_fetch_16: 6446 BuiltinIndex = 8; 6447 break; 6448 6449 case Builtin::BI__sync_or_and_fetch: 6450 case Builtin::BI__sync_or_and_fetch_1: 6451 case Builtin::BI__sync_or_and_fetch_2: 6452 case Builtin::BI__sync_or_and_fetch_4: 6453 case Builtin::BI__sync_or_and_fetch_8: 6454 case Builtin::BI__sync_or_and_fetch_16: 6455 BuiltinIndex = 9; 6456 break; 6457 6458 case Builtin::BI__sync_xor_and_fetch: 6459 case Builtin::BI__sync_xor_and_fetch_1: 6460 case Builtin::BI__sync_xor_and_fetch_2: 6461 case Builtin::BI__sync_xor_and_fetch_4: 6462 case Builtin::BI__sync_xor_and_fetch_8: 6463 case Builtin::BI__sync_xor_and_fetch_16: 6464 BuiltinIndex = 10; 6465 break; 6466 6467 case Builtin::BI__sync_nand_and_fetch: 6468 case Builtin::BI__sync_nand_and_fetch_1: 6469 case Builtin::BI__sync_nand_and_fetch_2: 6470 case Builtin::BI__sync_nand_and_fetch_4: 6471 case Builtin::BI__sync_nand_and_fetch_8: 6472 case Builtin::BI__sync_nand_and_fetch_16: 6473 BuiltinIndex = 11; 6474 WarnAboutSemanticsChange = true; 6475 break; 6476 6477 case Builtin::BI__sync_val_compare_and_swap: 6478 case Builtin::BI__sync_val_compare_and_swap_1: 6479 case Builtin::BI__sync_val_compare_and_swap_2: 6480 case Builtin::BI__sync_val_compare_and_swap_4: 6481 case Builtin::BI__sync_val_compare_and_swap_8: 6482 case Builtin::BI__sync_val_compare_and_swap_16: 6483 BuiltinIndex = 12; 6484 NumFixed = 2; 6485 break; 6486 6487 case Builtin::BI__sync_bool_compare_and_swap: 6488 case Builtin::BI__sync_bool_compare_and_swap_1: 6489 case Builtin::BI__sync_bool_compare_and_swap_2: 6490 case Builtin::BI__sync_bool_compare_and_swap_4: 6491 case Builtin::BI__sync_bool_compare_and_swap_8: 6492 case Builtin::BI__sync_bool_compare_and_swap_16: 6493 BuiltinIndex = 13; 6494 NumFixed = 2; 6495 ResultType = Context.BoolTy; 6496 break; 6497 6498 case Builtin::BI__sync_lock_test_and_set: 6499 case Builtin::BI__sync_lock_test_and_set_1: 6500 case Builtin::BI__sync_lock_test_and_set_2: 6501 case Builtin::BI__sync_lock_test_and_set_4: 6502 case Builtin::BI__sync_lock_test_and_set_8: 6503 case Builtin::BI__sync_lock_test_and_set_16: 6504 BuiltinIndex = 14; 6505 break; 6506 6507 case Builtin::BI__sync_lock_release: 6508 case Builtin::BI__sync_lock_release_1: 6509 case Builtin::BI__sync_lock_release_2: 6510 case Builtin::BI__sync_lock_release_4: 6511 case Builtin::BI__sync_lock_release_8: 6512 case Builtin::BI__sync_lock_release_16: 6513 BuiltinIndex = 15; 6514 NumFixed = 0; 6515 ResultType = Context.VoidTy; 6516 break; 6517 6518 case Builtin::BI__sync_swap: 6519 case Builtin::BI__sync_swap_1: 6520 case Builtin::BI__sync_swap_2: 6521 case Builtin::BI__sync_swap_4: 6522 case Builtin::BI__sync_swap_8: 6523 case Builtin::BI__sync_swap_16: 6524 BuiltinIndex = 16; 6525 break; 6526 } 6527 6528 // Now that we know how many fixed arguments we expect, first check that we 6529 // have at least that many. 6530 if (TheCall->getNumArgs() < 1+NumFixed) { 6531 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 6532 << 0 << 1 + NumFixed << TheCall->getNumArgs() 6533 << Callee->getSourceRange(); 6534 return ExprError(); 6535 } 6536 6537 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 6538 << Callee->getSourceRange(); 6539 6540 if (WarnAboutSemanticsChange) { 6541 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 6542 << Callee->getSourceRange(); 6543 } 6544 6545 // Get the decl for the concrete builtin from this, we can tell what the 6546 // concrete integer type we should convert to is. 6547 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 6548 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 6549 FunctionDecl *NewBuiltinDecl; 6550 if (NewBuiltinID == BuiltinID) 6551 NewBuiltinDecl = FDecl; 6552 else { 6553 // Perform builtin lookup to avoid redeclaring it. 6554 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 6555 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 6556 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 6557 assert(Res.getFoundDecl()); 6558 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 6559 if (!NewBuiltinDecl) 6560 return ExprError(); 6561 } 6562 6563 // The first argument --- the pointer --- has a fixed type; we 6564 // deduce the types of the rest of the arguments accordingly. Walk 6565 // the remaining arguments, converting them to the deduced value type. 6566 for (unsigned i = 0; i != NumFixed; ++i) { 6567 ExprResult Arg = TheCall->getArg(i+1); 6568 6569 // GCC does an implicit conversion to the pointer or integer ValType. This 6570 // can fail in some cases (1i -> int**), check for this error case now. 6571 // Initialize the argument. 6572 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 6573 ValType, /*consume*/ false); 6574 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6575 if (Arg.isInvalid()) 6576 return ExprError(); 6577 6578 // Okay, we have something that *can* be converted to the right type. Check 6579 // to see if there is a potentially weird extension going on here. This can 6580 // happen when you do an atomic operation on something like an char* and 6581 // pass in 42. The 42 gets converted to char. This is even more strange 6582 // for things like 45.123 -> char, etc. 6583 // FIXME: Do this check. 6584 TheCall->setArg(i+1, Arg.get()); 6585 } 6586 6587 // Create a new DeclRefExpr to refer to the new decl. 6588 DeclRefExpr *NewDRE = DeclRefExpr::Create( 6589 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, 6590 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, 6591 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse()); 6592 6593 // Set the callee in the CallExpr. 6594 // FIXME: This loses syntactic information. 6595 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 6596 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 6597 CK_BuiltinFnToFnPtr); 6598 TheCall->setCallee(PromotedCall.get()); 6599 6600 // Change the result type of the call to match the original value type. This 6601 // is arbitrary, but the codegen for these builtins ins design to handle it 6602 // gracefully. 6603 TheCall->setType(ResultType); 6604 6605 // Prohibit problematic uses of bit-precise integer types with atomic 6606 // builtins. The arguments would have already been converted to the first 6607 // argument's type, so only need to check the first argument. 6608 const auto *BitIntValType = ValType->getAs<BitIntType>(); 6609 if (BitIntValType && !llvm::isPowerOf2_64(BitIntValType->getNumBits())) { 6610 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size); 6611 return ExprError(); 6612 } 6613 6614 return TheCallResult; 6615 } 6616 6617 /// SemaBuiltinNontemporalOverloaded - We have a call to 6618 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 6619 /// overloaded function based on the pointer type of its last argument. 6620 /// 6621 /// This function goes through and does final semantic checking for these 6622 /// builtins. 6623 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 6624 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 6625 DeclRefExpr *DRE = 6626 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6627 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6628 unsigned BuiltinID = FDecl->getBuiltinID(); 6629 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 6630 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 6631 "Unexpected nontemporal load/store builtin!"); 6632 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 6633 unsigned numArgs = isStore ? 2 : 1; 6634 6635 // Ensure that we have the proper number of arguments. 6636 if (checkArgCount(*this, TheCall, numArgs)) 6637 return ExprError(); 6638 6639 // Inspect the last argument of the nontemporal builtin. This should always 6640 // be a pointer type, from which we imply the type of the memory access. 6641 // Because it is a pointer type, we don't have to worry about any implicit 6642 // casts here. 6643 Expr *PointerArg = TheCall->getArg(numArgs - 1); 6644 ExprResult PointerArgResult = 6645 DefaultFunctionArrayLvalueConversion(PointerArg); 6646 6647 if (PointerArgResult.isInvalid()) 6648 return ExprError(); 6649 PointerArg = PointerArgResult.get(); 6650 TheCall->setArg(numArgs - 1, PointerArg); 6651 6652 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 6653 if (!pointerType) { 6654 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 6655 << PointerArg->getType() << PointerArg->getSourceRange(); 6656 return ExprError(); 6657 } 6658 6659 QualType ValType = pointerType->getPointeeType(); 6660 6661 // Strip any qualifiers off ValType. 6662 ValType = ValType.getUnqualifiedType(); 6663 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 6664 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 6665 !ValType->isVectorType()) { 6666 Diag(DRE->getBeginLoc(), 6667 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 6668 << PointerArg->getType() << PointerArg->getSourceRange(); 6669 return ExprError(); 6670 } 6671 6672 if (!isStore) { 6673 TheCall->setType(ValType); 6674 return TheCallResult; 6675 } 6676 6677 ExprResult ValArg = TheCall->getArg(0); 6678 InitializedEntity Entity = InitializedEntity::InitializeParameter( 6679 Context, ValType, /*consume*/ false); 6680 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 6681 if (ValArg.isInvalid()) 6682 return ExprError(); 6683 6684 TheCall->setArg(0, ValArg.get()); 6685 TheCall->setType(Context.VoidTy); 6686 return TheCallResult; 6687 } 6688 6689 /// CheckObjCString - Checks that the argument to the builtin 6690 /// CFString constructor is correct 6691 /// Note: It might also make sense to do the UTF-16 conversion here (would 6692 /// simplify the backend). 6693 bool Sema::CheckObjCString(Expr *Arg) { 6694 Arg = Arg->IgnoreParenCasts(); 6695 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 6696 6697 if (!Literal || !Literal->isAscii()) { 6698 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 6699 << Arg->getSourceRange(); 6700 return true; 6701 } 6702 6703 if (Literal->containsNonAsciiOrNull()) { 6704 StringRef String = Literal->getString(); 6705 unsigned NumBytes = String.size(); 6706 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 6707 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 6708 llvm::UTF16 *ToPtr = &ToBuf[0]; 6709 6710 llvm::ConversionResult Result = 6711 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 6712 ToPtr + NumBytes, llvm::strictConversion); 6713 // Check for conversion failure. 6714 if (Result != llvm::conversionOK) 6715 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 6716 << Arg->getSourceRange(); 6717 } 6718 return false; 6719 } 6720 6721 /// CheckObjCString - Checks that the format string argument to the os_log() 6722 /// and os_trace() functions is correct, and converts it to const char *. 6723 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 6724 Arg = Arg->IgnoreParenCasts(); 6725 auto *Literal = dyn_cast<StringLiteral>(Arg); 6726 if (!Literal) { 6727 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 6728 Literal = ObjcLiteral->getString(); 6729 } 6730 } 6731 6732 if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) { 6733 return ExprError( 6734 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 6735 << Arg->getSourceRange()); 6736 } 6737 6738 ExprResult Result(Literal); 6739 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 6740 InitializedEntity Entity = 6741 InitializedEntity::InitializeParameter(Context, ResultTy, false); 6742 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 6743 return Result; 6744 } 6745 6746 /// Check that the user is calling the appropriate va_start builtin for the 6747 /// target and calling convention. 6748 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 6749 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 6750 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 6751 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 || 6752 TT.getArch() == llvm::Triple::aarch64_32); 6753 bool IsWindows = TT.isOSWindows(); 6754 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 6755 if (IsX64 || IsAArch64) { 6756 CallingConv CC = CC_C; 6757 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 6758 CC = FD->getType()->castAs<FunctionType>()->getCallConv(); 6759 if (IsMSVAStart) { 6760 // Don't allow this in System V ABI functions. 6761 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 6762 return S.Diag(Fn->getBeginLoc(), 6763 diag::err_ms_va_start_used_in_sysv_function); 6764 } else { 6765 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 6766 // On x64 Windows, don't allow this in System V ABI functions. 6767 // (Yes, that means there's no corresponding way to support variadic 6768 // System V ABI functions on Windows.) 6769 if ((IsWindows && CC == CC_X86_64SysV) || 6770 (!IsWindows && CC == CC_Win64)) 6771 return S.Diag(Fn->getBeginLoc(), 6772 diag::err_va_start_used_in_wrong_abi_function) 6773 << !IsWindows; 6774 } 6775 return false; 6776 } 6777 6778 if (IsMSVAStart) 6779 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 6780 return false; 6781 } 6782 6783 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 6784 ParmVarDecl **LastParam = nullptr) { 6785 // Determine whether the current function, block, or obj-c method is variadic 6786 // and get its parameter list. 6787 bool IsVariadic = false; 6788 ArrayRef<ParmVarDecl *> Params; 6789 DeclContext *Caller = S.CurContext; 6790 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 6791 IsVariadic = Block->isVariadic(); 6792 Params = Block->parameters(); 6793 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 6794 IsVariadic = FD->isVariadic(); 6795 Params = FD->parameters(); 6796 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 6797 IsVariadic = MD->isVariadic(); 6798 // FIXME: This isn't correct for methods (results in bogus warning). 6799 Params = MD->parameters(); 6800 } else if (isa<CapturedDecl>(Caller)) { 6801 // We don't support va_start in a CapturedDecl. 6802 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 6803 return true; 6804 } else { 6805 // This must be some other declcontext that parses exprs. 6806 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 6807 return true; 6808 } 6809 6810 if (!IsVariadic) { 6811 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 6812 return true; 6813 } 6814 6815 if (LastParam) 6816 *LastParam = Params.empty() ? nullptr : Params.back(); 6817 6818 return false; 6819 } 6820 6821 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 6822 /// for validity. Emit an error and return true on failure; return false 6823 /// on success. 6824 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 6825 Expr *Fn = TheCall->getCallee(); 6826 6827 if (checkVAStartABI(*this, BuiltinID, Fn)) 6828 return true; 6829 6830 if (checkArgCount(*this, TheCall, 2)) 6831 return true; 6832 6833 // Type-check the first argument normally. 6834 if (checkBuiltinArgument(*this, TheCall, 0)) 6835 return true; 6836 6837 // Check that the current function is variadic, and get its last parameter. 6838 ParmVarDecl *LastParam; 6839 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 6840 return true; 6841 6842 // Verify that the second argument to the builtin is the last argument of the 6843 // current function or method. 6844 bool SecondArgIsLastNamedArgument = false; 6845 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 6846 6847 // These are valid if SecondArgIsLastNamedArgument is false after the next 6848 // block. 6849 QualType Type; 6850 SourceLocation ParamLoc; 6851 bool IsCRegister = false; 6852 6853 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 6854 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 6855 SecondArgIsLastNamedArgument = PV == LastParam; 6856 6857 Type = PV->getType(); 6858 ParamLoc = PV->getLocation(); 6859 IsCRegister = 6860 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 6861 } 6862 } 6863 6864 if (!SecondArgIsLastNamedArgument) 6865 Diag(TheCall->getArg(1)->getBeginLoc(), 6866 diag::warn_second_arg_of_va_start_not_last_named_param); 6867 else if (IsCRegister || Type->isReferenceType() || 6868 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 6869 // Promotable integers are UB, but enumerations need a bit of 6870 // extra checking to see what their promotable type actually is. 6871 if (!Type->isPromotableIntegerType()) 6872 return false; 6873 if (!Type->isEnumeralType()) 6874 return true; 6875 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl(); 6876 return !(ED && 6877 Context.typesAreCompatible(ED->getPromotionType(), Type)); 6878 }()) { 6879 unsigned Reason = 0; 6880 if (Type->isReferenceType()) Reason = 1; 6881 else if (IsCRegister) Reason = 2; 6882 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 6883 Diag(ParamLoc, diag::note_parameter_type) << Type; 6884 } 6885 6886 TheCall->setType(Context.VoidTy); 6887 return false; 6888 } 6889 6890 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 6891 auto IsSuitablyTypedFormatArgument = [this](const Expr *Arg) -> bool { 6892 const LangOptions &LO = getLangOpts(); 6893 6894 if (LO.CPlusPlus) 6895 return Arg->getType() 6896 .getCanonicalType() 6897 .getTypePtr() 6898 ->getPointeeType() 6899 .withoutLocalFastQualifiers() == Context.CharTy; 6900 6901 // In C, allow aliasing through `char *`, this is required for AArch64 at 6902 // least. 6903 return true; 6904 }; 6905 6906 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 6907 // const char *named_addr); 6908 6909 Expr *Func = Call->getCallee(); 6910 6911 if (Call->getNumArgs() < 3) 6912 return Diag(Call->getEndLoc(), 6913 diag::err_typecheck_call_too_few_args_at_least) 6914 << 0 /*function call*/ << 3 << Call->getNumArgs(); 6915 6916 // Type-check the first argument normally. 6917 if (checkBuiltinArgument(*this, Call, 0)) 6918 return true; 6919 6920 // Check that the current function is variadic. 6921 if (checkVAStartIsInVariadicFunction(*this, Func)) 6922 return true; 6923 6924 // __va_start on Windows does not validate the parameter qualifiers 6925 6926 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 6927 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 6928 6929 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 6930 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 6931 6932 const QualType &ConstCharPtrTy = 6933 Context.getPointerType(Context.CharTy.withConst()); 6934 if (!Arg1Ty->isPointerType() || !IsSuitablyTypedFormatArgument(Arg1)) 6935 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 6936 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 6937 << 0 /* qualifier difference */ 6938 << 3 /* parameter mismatch */ 6939 << 2 << Arg1->getType() << ConstCharPtrTy; 6940 6941 const QualType SizeTy = Context.getSizeType(); 6942 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 6943 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 6944 << Arg2->getType() << SizeTy << 1 /* different class */ 6945 << 0 /* qualifier difference */ 6946 << 3 /* parameter mismatch */ 6947 << 3 << Arg2->getType() << SizeTy; 6948 6949 return false; 6950 } 6951 6952 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 6953 /// friends. This is declared to take (...), so we have to check everything. 6954 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 6955 if (checkArgCount(*this, TheCall, 2)) 6956 return true; 6957 6958 ExprResult OrigArg0 = TheCall->getArg(0); 6959 ExprResult OrigArg1 = TheCall->getArg(1); 6960 6961 // Do standard promotions between the two arguments, returning their common 6962 // type. 6963 QualType Res = UsualArithmeticConversions( 6964 OrigArg0, OrigArg1, TheCall->getExprLoc(), ACK_Comparison); 6965 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 6966 return true; 6967 6968 // Make sure any conversions are pushed back into the call; this is 6969 // type safe since unordered compare builtins are declared as "_Bool 6970 // foo(...)". 6971 TheCall->setArg(0, OrigArg0.get()); 6972 TheCall->setArg(1, OrigArg1.get()); 6973 6974 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 6975 return false; 6976 6977 // If the common type isn't a real floating type, then the arguments were 6978 // invalid for this operation. 6979 if (Res.isNull() || !Res->isRealFloatingType()) 6980 return Diag(OrigArg0.get()->getBeginLoc(), 6981 diag::err_typecheck_call_invalid_ordered_compare) 6982 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 6983 << SourceRange(OrigArg0.get()->getBeginLoc(), 6984 OrigArg1.get()->getEndLoc()); 6985 6986 return false; 6987 } 6988 6989 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 6990 /// __builtin_isnan and friends. This is declared to take (...), so we have 6991 /// to check everything. We expect the last argument to be a floating point 6992 /// value. 6993 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 6994 if (checkArgCount(*this, TheCall, NumArgs)) 6995 return true; 6996 6997 // __builtin_fpclassify is the only case where NumArgs != 1, so we can count 6998 // on all preceding parameters just being int. Try all of those. 6999 for (unsigned i = 0; i < NumArgs - 1; ++i) { 7000 Expr *Arg = TheCall->getArg(i); 7001 7002 if (Arg->isTypeDependent()) 7003 return false; 7004 7005 ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing); 7006 7007 if (Res.isInvalid()) 7008 return true; 7009 TheCall->setArg(i, Res.get()); 7010 } 7011 7012 Expr *OrigArg = TheCall->getArg(NumArgs-1); 7013 7014 if (OrigArg->isTypeDependent()) 7015 return false; 7016 7017 // Usual Unary Conversions will convert half to float, which we want for 7018 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the 7019 // type how it is, but do normal L->Rvalue conversions. 7020 if (Context.getTargetInfo().useFP16ConversionIntrinsics()) 7021 OrigArg = UsualUnaryConversions(OrigArg).get(); 7022 else 7023 OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get(); 7024 TheCall->setArg(NumArgs - 1, OrigArg); 7025 7026 // This operation requires a non-_Complex floating-point number. 7027 if (!OrigArg->getType()->isRealFloatingType()) 7028 return Diag(OrigArg->getBeginLoc(), 7029 diag::err_typecheck_call_invalid_unary_fp) 7030 << OrigArg->getType() << OrigArg->getSourceRange(); 7031 7032 return false; 7033 } 7034 7035 /// Perform semantic analysis for a call to __builtin_complex. 7036 bool Sema::SemaBuiltinComplex(CallExpr *TheCall) { 7037 if (checkArgCount(*this, TheCall, 2)) 7038 return true; 7039 7040 bool Dependent = false; 7041 for (unsigned I = 0; I != 2; ++I) { 7042 Expr *Arg = TheCall->getArg(I); 7043 QualType T = Arg->getType(); 7044 if (T->isDependentType()) { 7045 Dependent = true; 7046 continue; 7047 } 7048 7049 // Despite supporting _Complex int, GCC requires a real floating point type 7050 // for the operands of __builtin_complex. 7051 if (!T->isRealFloatingType()) { 7052 return Diag(Arg->getBeginLoc(), diag::err_typecheck_call_requires_real_fp) 7053 << Arg->getType() << Arg->getSourceRange(); 7054 } 7055 7056 ExprResult Converted = DefaultLvalueConversion(Arg); 7057 if (Converted.isInvalid()) 7058 return true; 7059 TheCall->setArg(I, Converted.get()); 7060 } 7061 7062 if (Dependent) { 7063 TheCall->setType(Context.DependentTy); 7064 return false; 7065 } 7066 7067 Expr *Real = TheCall->getArg(0); 7068 Expr *Imag = TheCall->getArg(1); 7069 if (!Context.hasSameType(Real->getType(), Imag->getType())) { 7070 return Diag(Real->getBeginLoc(), 7071 diag::err_typecheck_call_different_arg_types) 7072 << Real->getType() << Imag->getType() 7073 << Real->getSourceRange() << Imag->getSourceRange(); 7074 } 7075 7076 // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers; 7077 // don't allow this builtin to form those types either. 7078 // FIXME: Should we allow these types? 7079 if (Real->getType()->isFloat16Type()) 7080 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 7081 << "_Float16"; 7082 if (Real->getType()->isHalfType()) 7083 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 7084 << "half"; 7085 7086 TheCall->setType(Context.getComplexType(Real->getType())); 7087 return false; 7088 } 7089 7090 // Customized Sema Checking for VSX builtins that have the following signature: 7091 // vector [...] builtinName(vector [...], vector [...], const int); 7092 // Which takes the same type of vectors (any legal vector type) for the first 7093 // two arguments and takes compile time constant for the third argument. 7094 // Example builtins are : 7095 // vector double vec_xxpermdi(vector double, vector double, int); 7096 // vector short vec_xxsldwi(vector short, vector short, int); 7097 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 7098 unsigned ExpectedNumArgs = 3; 7099 if (checkArgCount(*this, TheCall, ExpectedNumArgs)) 7100 return true; 7101 7102 // Check the third argument is a compile time constant 7103 if (!TheCall->getArg(2)->isIntegerConstantExpr(Context)) 7104 return Diag(TheCall->getBeginLoc(), 7105 diag::err_vsx_builtin_nonconstant_argument) 7106 << 3 /* argument index */ << TheCall->getDirectCallee() 7107 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 7108 TheCall->getArg(2)->getEndLoc()); 7109 7110 QualType Arg1Ty = TheCall->getArg(0)->getType(); 7111 QualType Arg2Ty = TheCall->getArg(1)->getType(); 7112 7113 // Check the type of argument 1 and argument 2 are vectors. 7114 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 7115 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 7116 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 7117 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 7118 << TheCall->getDirectCallee() 7119 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7120 TheCall->getArg(1)->getEndLoc()); 7121 } 7122 7123 // Check the first two arguments are the same type. 7124 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 7125 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 7126 << TheCall->getDirectCallee() 7127 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7128 TheCall->getArg(1)->getEndLoc()); 7129 } 7130 7131 // When default clang type checking is turned off and the customized type 7132 // checking is used, the returning type of the function must be explicitly 7133 // set. Otherwise it is _Bool by default. 7134 TheCall->setType(Arg1Ty); 7135 7136 return false; 7137 } 7138 7139 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 7140 // This is declared to take (...), so we have to check everything. 7141 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 7142 if (TheCall->getNumArgs() < 2) 7143 return ExprError(Diag(TheCall->getEndLoc(), 7144 diag::err_typecheck_call_too_few_args_at_least) 7145 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 7146 << TheCall->getSourceRange()); 7147 7148 // Determine which of the following types of shufflevector we're checking: 7149 // 1) unary, vector mask: (lhs, mask) 7150 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 7151 QualType resType = TheCall->getArg(0)->getType(); 7152 unsigned numElements = 0; 7153 7154 if (!TheCall->getArg(0)->isTypeDependent() && 7155 !TheCall->getArg(1)->isTypeDependent()) { 7156 QualType LHSType = TheCall->getArg(0)->getType(); 7157 QualType RHSType = TheCall->getArg(1)->getType(); 7158 7159 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 7160 return ExprError( 7161 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 7162 << TheCall->getDirectCallee() 7163 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7164 TheCall->getArg(1)->getEndLoc())); 7165 7166 numElements = LHSType->castAs<VectorType>()->getNumElements(); 7167 unsigned numResElements = TheCall->getNumArgs() - 2; 7168 7169 // Check to see if we have a call with 2 vector arguments, the unary shuffle 7170 // with mask. If so, verify that RHS is an integer vector type with the 7171 // same number of elts as lhs. 7172 if (TheCall->getNumArgs() == 2) { 7173 if (!RHSType->hasIntegerRepresentation() || 7174 RHSType->castAs<VectorType>()->getNumElements() != numElements) 7175 return ExprError(Diag(TheCall->getBeginLoc(), 7176 diag::err_vec_builtin_incompatible_vector) 7177 << TheCall->getDirectCallee() 7178 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 7179 TheCall->getArg(1)->getEndLoc())); 7180 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 7181 return ExprError(Diag(TheCall->getBeginLoc(), 7182 diag::err_vec_builtin_incompatible_vector) 7183 << TheCall->getDirectCallee() 7184 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7185 TheCall->getArg(1)->getEndLoc())); 7186 } else if (numElements != numResElements) { 7187 QualType eltType = LHSType->castAs<VectorType>()->getElementType(); 7188 resType = Context.getVectorType(eltType, numResElements, 7189 VectorType::GenericVector); 7190 } 7191 } 7192 7193 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 7194 if (TheCall->getArg(i)->isTypeDependent() || 7195 TheCall->getArg(i)->isValueDependent()) 7196 continue; 7197 7198 Optional<llvm::APSInt> Result; 7199 if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context))) 7200 return ExprError(Diag(TheCall->getBeginLoc(), 7201 diag::err_shufflevector_nonconstant_argument) 7202 << TheCall->getArg(i)->getSourceRange()); 7203 7204 // Allow -1 which will be translated to undef in the IR. 7205 if (Result->isSigned() && Result->isAllOnes()) 7206 continue; 7207 7208 if (Result->getActiveBits() > 64 || 7209 Result->getZExtValue() >= numElements * 2) 7210 return ExprError(Diag(TheCall->getBeginLoc(), 7211 diag::err_shufflevector_argument_too_large) 7212 << TheCall->getArg(i)->getSourceRange()); 7213 } 7214 7215 SmallVector<Expr*, 32> exprs; 7216 7217 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 7218 exprs.push_back(TheCall->getArg(i)); 7219 TheCall->setArg(i, nullptr); 7220 } 7221 7222 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 7223 TheCall->getCallee()->getBeginLoc(), 7224 TheCall->getRParenLoc()); 7225 } 7226 7227 /// SemaConvertVectorExpr - Handle __builtin_convertvector 7228 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 7229 SourceLocation BuiltinLoc, 7230 SourceLocation RParenLoc) { 7231 ExprValueKind VK = VK_PRValue; 7232 ExprObjectKind OK = OK_Ordinary; 7233 QualType DstTy = TInfo->getType(); 7234 QualType SrcTy = E->getType(); 7235 7236 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 7237 return ExprError(Diag(BuiltinLoc, 7238 diag::err_convertvector_non_vector) 7239 << E->getSourceRange()); 7240 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 7241 return ExprError(Diag(BuiltinLoc, 7242 diag::err_convertvector_non_vector_type)); 7243 7244 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 7245 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements(); 7246 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements(); 7247 if (SrcElts != DstElts) 7248 return ExprError(Diag(BuiltinLoc, 7249 diag::err_convertvector_incompatible_vector) 7250 << E->getSourceRange()); 7251 } 7252 7253 return new (Context) 7254 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 7255 } 7256 7257 /// SemaBuiltinPrefetch - Handle __builtin_prefetch. 7258 // This is declared to take (const void*, ...) and can take two 7259 // optional constant int args. 7260 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 7261 unsigned NumArgs = TheCall->getNumArgs(); 7262 7263 if (NumArgs > 3) 7264 return Diag(TheCall->getEndLoc(), 7265 diag::err_typecheck_call_too_many_args_at_most) 7266 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 7267 7268 // Argument 0 is checked for us and the remaining arguments must be 7269 // constant integers. 7270 for (unsigned i = 1; i != NumArgs; ++i) 7271 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 7272 return true; 7273 7274 return false; 7275 } 7276 7277 /// SemaBuiltinArithmeticFence - Handle __arithmetic_fence. 7278 bool Sema::SemaBuiltinArithmeticFence(CallExpr *TheCall) { 7279 if (!Context.getTargetInfo().checkArithmeticFenceSupported()) 7280 return Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 7281 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 7282 if (checkArgCount(*this, TheCall, 1)) 7283 return true; 7284 Expr *Arg = TheCall->getArg(0); 7285 if (Arg->isInstantiationDependent()) 7286 return false; 7287 7288 QualType ArgTy = Arg->getType(); 7289 if (!ArgTy->hasFloatingRepresentation()) 7290 return Diag(TheCall->getEndLoc(), diag::err_typecheck_expect_flt_or_vector) 7291 << ArgTy; 7292 if (Arg->isLValue()) { 7293 ExprResult FirstArg = DefaultLvalueConversion(Arg); 7294 TheCall->setArg(0, FirstArg.get()); 7295 } 7296 TheCall->setType(TheCall->getArg(0)->getType()); 7297 return false; 7298 } 7299 7300 /// SemaBuiltinAssume - Handle __assume (MS Extension). 7301 // __assume does not evaluate its arguments, and should warn if its argument 7302 // has side effects. 7303 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 7304 Expr *Arg = TheCall->getArg(0); 7305 if (Arg->isInstantiationDependent()) return false; 7306 7307 if (Arg->HasSideEffects(Context)) 7308 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 7309 << Arg->getSourceRange() 7310 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 7311 7312 return false; 7313 } 7314 7315 /// Handle __builtin_alloca_with_align. This is declared 7316 /// as (size_t, size_t) where the second size_t must be a power of 2 greater 7317 /// than 8. 7318 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 7319 // The alignment must be a constant integer. 7320 Expr *Arg = TheCall->getArg(1); 7321 7322 // We can't check the value of a dependent argument. 7323 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 7324 if (const auto *UE = 7325 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 7326 if (UE->getKind() == UETT_AlignOf || 7327 UE->getKind() == UETT_PreferredAlignOf) 7328 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 7329 << Arg->getSourceRange(); 7330 7331 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 7332 7333 if (!Result.isPowerOf2()) 7334 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 7335 << Arg->getSourceRange(); 7336 7337 if (Result < Context.getCharWidth()) 7338 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 7339 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 7340 7341 if (Result > std::numeric_limits<int32_t>::max()) 7342 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 7343 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 7344 } 7345 7346 return false; 7347 } 7348 7349 /// Handle __builtin_assume_aligned. This is declared 7350 /// as (const void*, size_t, ...) and can take one optional constant int arg. 7351 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 7352 unsigned NumArgs = TheCall->getNumArgs(); 7353 7354 if (NumArgs > 3) 7355 return Diag(TheCall->getEndLoc(), 7356 diag::err_typecheck_call_too_many_args_at_most) 7357 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 7358 7359 // The alignment must be a constant integer. 7360 Expr *Arg = TheCall->getArg(1); 7361 7362 // We can't check the value of a dependent argument. 7363 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 7364 llvm::APSInt Result; 7365 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 7366 return true; 7367 7368 if (!Result.isPowerOf2()) 7369 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 7370 << Arg->getSourceRange(); 7371 7372 if (Result > Sema::MaximumAlignment) 7373 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great) 7374 << Arg->getSourceRange() << Sema::MaximumAlignment; 7375 } 7376 7377 if (NumArgs > 2) { 7378 ExprResult Arg(TheCall->getArg(2)); 7379 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 7380 Context.getSizeType(), false); 7381 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 7382 if (Arg.isInvalid()) return true; 7383 TheCall->setArg(2, Arg.get()); 7384 } 7385 7386 return false; 7387 } 7388 7389 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 7390 unsigned BuiltinID = 7391 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 7392 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 7393 7394 unsigned NumArgs = TheCall->getNumArgs(); 7395 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 7396 if (NumArgs < NumRequiredArgs) { 7397 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 7398 << 0 /* function call */ << NumRequiredArgs << NumArgs 7399 << TheCall->getSourceRange(); 7400 } 7401 if (NumArgs >= NumRequiredArgs + 0x100) { 7402 return Diag(TheCall->getEndLoc(), 7403 diag::err_typecheck_call_too_many_args_at_most) 7404 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 7405 << TheCall->getSourceRange(); 7406 } 7407 unsigned i = 0; 7408 7409 // For formatting call, check buffer arg. 7410 if (!IsSizeCall) { 7411 ExprResult Arg(TheCall->getArg(i)); 7412 InitializedEntity Entity = InitializedEntity::InitializeParameter( 7413 Context, Context.VoidPtrTy, false); 7414 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 7415 if (Arg.isInvalid()) 7416 return true; 7417 TheCall->setArg(i, Arg.get()); 7418 i++; 7419 } 7420 7421 // Check string literal arg. 7422 unsigned FormatIdx = i; 7423 { 7424 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 7425 if (Arg.isInvalid()) 7426 return true; 7427 TheCall->setArg(i, Arg.get()); 7428 i++; 7429 } 7430 7431 // Make sure variadic args are scalar. 7432 unsigned FirstDataArg = i; 7433 while (i < NumArgs) { 7434 ExprResult Arg = DefaultVariadicArgumentPromotion( 7435 TheCall->getArg(i), VariadicFunction, nullptr); 7436 if (Arg.isInvalid()) 7437 return true; 7438 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 7439 if (ArgSize.getQuantity() >= 0x100) { 7440 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 7441 << i << (int)ArgSize.getQuantity() << 0xff 7442 << TheCall->getSourceRange(); 7443 } 7444 TheCall->setArg(i, Arg.get()); 7445 i++; 7446 } 7447 7448 // Check formatting specifiers. NOTE: We're only doing this for the non-size 7449 // call to avoid duplicate diagnostics. 7450 if (!IsSizeCall) { 7451 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 7452 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 7453 bool Success = CheckFormatArguments( 7454 Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog, 7455 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 7456 CheckedVarArgs); 7457 if (!Success) 7458 return true; 7459 } 7460 7461 if (IsSizeCall) { 7462 TheCall->setType(Context.getSizeType()); 7463 } else { 7464 TheCall->setType(Context.VoidPtrTy); 7465 } 7466 return false; 7467 } 7468 7469 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 7470 /// TheCall is a constant expression. 7471 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 7472 llvm::APSInt &Result) { 7473 Expr *Arg = TheCall->getArg(ArgNum); 7474 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 7475 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 7476 7477 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 7478 7479 Optional<llvm::APSInt> R; 7480 if (!(R = Arg->getIntegerConstantExpr(Context))) 7481 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 7482 << FDecl->getDeclName() << Arg->getSourceRange(); 7483 Result = *R; 7484 return false; 7485 } 7486 7487 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 7488 /// TheCall is a constant expression in the range [Low, High]. 7489 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 7490 int Low, int High, bool RangeIsError) { 7491 if (isConstantEvaluated()) 7492 return false; 7493 llvm::APSInt Result; 7494 7495 // We can't check the value of a dependent argument. 7496 Expr *Arg = TheCall->getArg(ArgNum); 7497 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7498 return false; 7499 7500 // Check constant-ness first. 7501 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7502 return true; 7503 7504 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 7505 if (RangeIsError) 7506 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 7507 << toString(Result, 10) << Low << High << Arg->getSourceRange(); 7508 else 7509 // Defer the warning until we know if the code will be emitted so that 7510 // dead code can ignore this. 7511 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 7512 PDiag(diag::warn_argument_invalid_range) 7513 << toString(Result, 10) << Low << High 7514 << Arg->getSourceRange()); 7515 } 7516 7517 return false; 7518 } 7519 7520 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 7521 /// TheCall is a constant expression is a multiple of Num.. 7522 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 7523 unsigned Num) { 7524 llvm::APSInt Result; 7525 7526 // We can't check the value of a dependent argument. 7527 Expr *Arg = TheCall->getArg(ArgNum); 7528 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7529 return false; 7530 7531 // Check constant-ness first. 7532 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7533 return true; 7534 7535 if (Result.getSExtValue() % Num != 0) 7536 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 7537 << Num << Arg->getSourceRange(); 7538 7539 return false; 7540 } 7541 7542 /// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a 7543 /// constant expression representing a power of 2. 7544 bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) { 7545 llvm::APSInt Result; 7546 7547 // We can't check the value of a dependent argument. 7548 Expr *Arg = TheCall->getArg(ArgNum); 7549 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7550 return false; 7551 7552 // Check constant-ness first. 7553 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7554 return true; 7555 7556 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if 7557 // and only if x is a power of 2. 7558 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0) 7559 return false; 7560 7561 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2) 7562 << Arg->getSourceRange(); 7563 } 7564 7565 static bool IsShiftedByte(llvm::APSInt Value) { 7566 if (Value.isNegative()) 7567 return false; 7568 7569 // Check if it's a shifted byte, by shifting it down 7570 while (true) { 7571 // If the value fits in the bottom byte, the check passes. 7572 if (Value < 0x100) 7573 return true; 7574 7575 // Otherwise, if the value has _any_ bits in the bottom byte, the check 7576 // fails. 7577 if ((Value & 0xFF) != 0) 7578 return false; 7579 7580 // If the bottom 8 bits are all 0, but something above that is nonzero, 7581 // then shifting the value right by 8 bits won't affect whether it's a 7582 // shifted byte or not. So do that, and go round again. 7583 Value >>= 8; 7584 } 7585 } 7586 7587 /// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is 7588 /// a constant expression representing an arbitrary byte value shifted left by 7589 /// a multiple of 8 bits. 7590 bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, 7591 unsigned ArgBits) { 7592 llvm::APSInt Result; 7593 7594 // We can't check the value of a dependent argument. 7595 Expr *Arg = TheCall->getArg(ArgNum); 7596 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7597 return false; 7598 7599 // Check constant-ness first. 7600 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7601 return true; 7602 7603 // Truncate to the given size. 7604 Result = Result.getLoBits(ArgBits); 7605 Result.setIsUnsigned(true); 7606 7607 if (IsShiftedByte(Result)) 7608 return false; 7609 7610 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte) 7611 << Arg->getSourceRange(); 7612 } 7613 7614 /// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of 7615 /// TheCall is a constant expression representing either a shifted byte value, 7616 /// or a value of the form 0x??FF (i.e. a member of the arithmetic progression 7617 /// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some 7618 /// Arm MVE intrinsics. 7619 bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, 7620 int ArgNum, 7621 unsigned ArgBits) { 7622 llvm::APSInt Result; 7623 7624 // We can't check the value of a dependent argument. 7625 Expr *Arg = TheCall->getArg(ArgNum); 7626 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7627 return false; 7628 7629 // Check constant-ness first. 7630 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7631 return true; 7632 7633 // Truncate to the given size. 7634 Result = Result.getLoBits(ArgBits); 7635 Result.setIsUnsigned(true); 7636 7637 // Check to see if it's in either of the required forms. 7638 if (IsShiftedByte(Result) || 7639 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF)) 7640 return false; 7641 7642 return Diag(TheCall->getBeginLoc(), 7643 diag::err_argument_not_shifted_byte_or_xxff) 7644 << Arg->getSourceRange(); 7645 } 7646 7647 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 7648 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { 7649 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 7650 if (checkArgCount(*this, TheCall, 2)) 7651 return true; 7652 Expr *Arg0 = TheCall->getArg(0); 7653 Expr *Arg1 = TheCall->getArg(1); 7654 7655 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7656 if (FirstArg.isInvalid()) 7657 return true; 7658 QualType FirstArgType = FirstArg.get()->getType(); 7659 if (!FirstArgType->isAnyPointerType()) 7660 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7661 << "first" << FirstArgType << Arg0->getSourceRange(); 7662 TheCall->setArg(0, FirstArg.get()); 7663 7664 ExprResult SecArg = DefaultLvalueConversion(Arg1); 7665 if (SecArg.isInvalid()) 7666 return true; 7667 QualType SecArgType = SecArg.get()->getType(); 7668 if (!SecArgType->isIntegerType()) 7669 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 7670 << "second" << SecArgType << Arg1->getSourceRange(); 7671 7672 // Derive the return type from the pointer argument. 7673 TheCall->setType(FirstArgType); 7674 return false; 7675 } 7676 7677 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 7678 if (checkArgCount(*this, TheCall, 2)) 7679 return true; 7680 7681 Expr *Arg0 = TheCall->getArg(0); 7682 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7683 if (FirstArg.isInvalid()) 7684 return true; 7685 QualType FirstArgType = FirstArg.get()->getType(); 7686 if (!FirstArgType->isAnyPointerType()) 7687 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7688 << "first" << FirstArgType << Arg0->getSourceRange(); 7689 TheCall->setArg(0, FirstArg.get()); 7690 7691 // Derive the return type from the pointer argument. 7692 TheCall->setType(FirstArgType); 7693 7694 // Second arg must be an constant in range [0,15] 7695 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 7696 } 7697 7698 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 7699 if (checkArgCount(*this, TheCall, 2)) 7700 return true; 7701 Expr *Arg0 = TheCall->getArg(0); 7702 Expr *Arg1 = TheCall->getArg(1); 7703 7704 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7705 if (FirstArg.isInvalid()) 7706 return true; 7707 QualType FirstArgType = FirstArg.get()->getType(); 7708 if (!FirstArgType->isAnyPointerType()) 7709 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7710 << "first" << FirstArgType << Arg0->getSourceRange(); 7711 7712 QualType SecArgType = Arg1->getType(); 7713 if (!SecArgType->isIntegerType()) 7714 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 7715 << "second" << SecArgType << Arg1->getSourceRange(); 7716 TheCall->setType(Context.IntTy); 7717 return false; 7718 } 7719 7720 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 7721 BuiltinID == AArch64::BI__builtin_arm_stg) { 7722 if (checkArgCount(*this, TheCall, 1)) 7723 return true; 7724 Expr *Arg0 = TheCall->getArg(0); 7725 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7726 if (FirstArg.isInvalid()) 7727 return true; 7728 7729 QualType FirstArgType = FirstArg.get()->getType(); 7730 if (!FirstArgType->isAnyPointerType()) 7731 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7732 << "first" << FirstArgType << Arg0->getSourceRange(); 7733 TheCall->setArg(0, FirstArg.get()); 7734 7735 // Derive the return type from the pointer argument. 7736 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 7737 TheCall->setType(FirstArgType); 7738 return false; 7739 } 7740 7741 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 7742 Expr *ArgA = TheCall->getArg(0); 7743 Expr *ArgB = TheCall->getArg(1); 7744 7745 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA); 7746 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB); 7747 7748 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 7749 return true; 7750 7751 QualType ArgTypeA = ArgExprA.get()->getType(); 7752 QualType ArgTypeB = ArgExprB.get()->getType(); 7753 7754 auto isNull = [&] (Expr *E) -> bool { 7755 return E->isNullPointerConstant( 7756 Context, Expr::NPC_ValueDependentIsNotNull); }; 7757 7758 // argument should be either a pointer or null 7759 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 7760 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 7761 << "first" << ArgTypeA << ArgA->getSourceRange(); 7762 7763 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 7764 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 7765 << "second" << ArgTypeB << ArgB->getSourceRange(); 7766 7767 // Ensure Pointee types are compatible 7768 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 7769 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 7770 QualType pointeeA = ArgTypeA->getPointeeType(); 7771 QualType pointeeB = ArgTypeB->getPointeeType(); 7772 if (!Context.typesAreCompatible( 7773 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 7774 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 7775 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) 7776 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 7777 << ArgB->getSourceRange(); 7778 } 7779 } 7780 7781 // at least one argument should be pointer type 7782 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 7783 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 7784 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 7785 7786 if (isNull(ArgA)) // adopt type of the other pointer 7787 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 7788 7789 if (isNull(ArgB)) 7790 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 7791 7792 TheCall->setArg(0, ArgExprA.get()); 7793 TheCall->setArg(1, ArgExprB.get()); 7794 TheCall->setType(Context.LongLongTy); 7795 return false; 7796 } 7797 assert(false && "Unhandled ARM MTE intrinsic"); 7798 return true; 7799 } 7800 7801 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 7802 /// TheCall is an ARM/AArch64 special register string literal. 7803 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 7804 int ArgNum, unsigned ExpectedFieldNum, 7805 bool AllowName) { 7806 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 7807 BuiltinID == ARM::BI__builtin_arm_wsr64 || 7808 BuiltinID == ARM::BI__builtin_arm_rsr || 7809 BuiltinID == ARM::BI__builtin_arm_rsrp || 7810 BuiltinID == ARM::BI__builtin_arm_wsr || 7811 BuiltinID == ARM::BI__builtin_arm_wsrp; 7812 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 7813 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 7814 BuiltinID == AArch64::BI__builtin_arm_rsr || 7815 BuiltinID == AArch64::BI__builtin_arm_rsrp || 7816 BuiltinID == AArch64::BI__builtin_arm_wsr || 7817 BuiltinID == AArch64::BI__builtin_arm_wsrp; 7818 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 7819 7820 // We can't check the value of a dependent argument. 7821 Expr *Arg = TheCall->getArg(ArgNum); 7822 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7823 return false; 7824 7825 // Check if the argument is a string literal. 7826 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 7827 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 7828 << Arg->getSourceRange(); 7829 7830 // Check the type of special register given. 7831 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 7832 SmallVector<StringRef, 6> Fields; 7833 Reg.split(Fields, ":"); 7834 7835 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 7836 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 7837 << Arg->getSourceRange(); 7838 7839 // If the string is the name of a register then we cannot check that it is 7840 // valid here but if the string is of one the forms described in ACLE then we 7841 // can check that the supplied fields are integers and within the valid 7842 // ranges. 7843 if (Fields.size() > 1) { 7844 bool FiveFields = Fields.size() == 5; 7845 7846 bool ValidString = true; 7847 if (IsARMBuiltin) { 7848 ValidString &= Fields[0].startswith_insensitive("cp") || 7849 Fields[0].startswith_insensitive("p"); 7850 if (ValidString) 7851 Fields[0] = Fields[0].drop_front( 7852 Fields[0].startswith_insensitive("cp") ? 2 : 1); 7853 7854 ValidString &= Fields[2].startswith_insensitive("c"); 7855 if (ValidString) 7856 Fields[2] = Fields[2].drop_front(1); 7857 7858 if (FiveFields) { 7859 ValidString &= Fields[3].startswith_insensitive("c"); 7860 if (ValidString) 7861 Fields[3] = Fields[3].drop_front(1); 7862 } 7863 } 7864 7865 SmallVector<int, 5> Ranges; 7866 if (FiveFields) 7867 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 7868 else 7869 Ranges.append({15, 7, 15}); 7870 7871 for (unsigned i=0; i<Fields.size(); ++i) { 7872 int IntField; 7873 ValidString &= !Fields[i].getAsInteger(10, IntField); 7874 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 7875 } 7876 7877 if (!ValidString) 7878 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 7879 << Arg->getSourceRange(); 7880 } else if (IsAArch64Builtin && Fields.size() == 1) { 7881 // If the register name is one of those that appear in the condition below 7882 // and the special register builtin being used is one of the write builtins, 7883 // then we require that the argument provided for writing to the register 7884 // is an integer constant expression. This is because it will be lowered to 7885 // an MSR (immediate) instruction, so we need to know the immediate at 7886 // compile time. 7887 if (TheCall->getNumArgs() != 2) 7888 return false; 7889 7890 std::string RegLower = Reg.lower(); 7891 if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" && 7892 RegLower != "pan" && RegLower != "uao") 7893 return false; 7894 7895 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 7896 } 7897 7898 return false; 7899 } 7900 7901 /// SemaBuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity. 7902 /// Emit an error and return true on failure; return false on success. 7903 /// TypeStr is a string containing the type descriptor of the value returned by 7904 /// the builtin and the descriptors of the expected type of the arguments. 7905 bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID, 7906 const char *TypeStr) { 7907 7908 assert((TypeStr[0] != '\0') && 7909 "Invalid types in PPC MMA builtin declaration"); 7910 7911 switch (BuiltinID) { 7912 default: 7913 // This function is called in CheckPPCBuiltinFunctionCall where the 7914 // BuiltinID is guaranteed to be an MMA or pair vector memop builtin, here 7915 // we are isolating the pair vector memop builtins that can be used with mma 7916 // off so the default case is every builtin that requires mma and paired 7917 // vector memops. 7918 if (SemaFeatureCheck(*this, TheCall, "paired-vector-memops", 7919 diag::err_ppc_builtin_only_on_arch, "10") || 7920 SemaFeatureCheck(*this, TheCall, "mma", 7921 diag::err_ppc_builtin_only_on_arch, "10")) 7922 return true; 7923 break; 7924 case PPC::BI__builtin_vsx_lxvp: 7925 case PPC::BI__builtin_vsx_stxvp: 7926 case PPC::BI__builtin_vsx_assemble_pair: 7927 case PPC::BI__builtin_vsx_disassemble_pair: 7928 if (SemaFeatureCheck(*this, TheCall, "paired-vector-memops", 7929 diag::err_ppc_builtin_only_on_arch, "10")) 7930 return true; 7931 break; 7932 } 7933 7934 unsigned Mask = 0; 7935 unsigned ArgNum = 0; 7936 7937 // The first type in TypeStr is the type of the value returned by the 7938 // builtin. So we first read that type and change the type of TheCall. 7939 QualType type = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 7940 TheCall->setType(type); 7941 7942 while (*TypeStr != '\0') { 7943 Mask = 0; 7944 QualType ExpectedType = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 7945 if (ArgNum >= TheCall->getNumArgs()) { 7946 ArgNum++; 7947 break; 7948 } 7949 7950 Expr *Arg = TheCall->getArg(ArgNum); 7951 QualType PassedType = Arg->getType(); 7952 QualType StrippedRVType = PassedType.getCanonicalType(); 7953 7954 // Strip Restrict/Volatile qualifiers. 7955 if (StrippedRVType.isRestrictQualified() || 7956 StrippedRVType.isVolatileQualified()) 7957 StrippedRVType = StrippedRVType.getCanonicalType().getUnqualifiedType(); 7958 7959 // The only case where the argument type and expected type are allowed to 7960 // mismatch is if the argument type is a non-void pointer (or array) and 7961 // expected type is a void pointer. 7962 if (StrippedRVType != ExpectedType) 7963 if (!(ExpectedType->isVoidPointerType() && 7964 (StrippedRVType->isPointerType() || StrippedRVType->isArrayType()))) 7965 return Diag(Arg->getBeginLoc(), 7966 diag::err_typecheck_convert_incompatible) 7967 << PassedType << ExpectedType << 1 << 0 << 0; 7968 7969 // If the value of the Mask is not 0, we have a constraint in the size of 7970 // the integer argument so here we ensure the argument is a constant that 7971 // is in the valid range. 7972 if (Mask != 0 && 7973 SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true)) 7974 return true; 7975 7976 ArgNum++; 7977 } 7978 7979 // In case we exited early from the previous loop, there are other types to 7980 // read from TypeStr. So we need to read them all to ensure we have the right 7981 // number of arguments in TheCall and if it is not the case, to display a 7982 // better error message. 7983 while (*TypeStr != '\0') { 7984 (void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 7985 ArgNum++; 7986 } 7987 if (checkArgCount(*this, TheCall, ArgNum)) 7988 return true; 7989 7990 return false; 7991 } 7992 7993 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 7994 /// This checks that the target supports __builtin_longjmp and 7995 /// that val is a constant 1. 7996 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 7997 if (!Context.getTargetInfo().hasSjLjLowering()) 7998 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 7999 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 8000 8001 Expr *Arg = TheCall->getArg(1); 8002 llvm::APSInt Result; 8003 8004 // TODO: This is less than ideal. Overload this to take a value. 8005 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 8006 return true; 8007 8008 if (Result != 1) 8009 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 8010 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 8011 8012 return false; 8013 } 8014 8015 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 8016 /// This checks that the target supports __builtin_setjmp. 8017 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 8018 if (!Context.getTargetInfo().hasSjLjLowering()) 8019 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 8020 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 8021 return false; 8022 } 8023 8024 namespace { 8025 8026 class UncoveredArgHandler { 8027 enum { Unknown = -1, AllCovered = -2 }; 8028 8029 signed FirstUncoveredArg = Unknown; 8030 SmallVector<const Expr *, 4> DiagnosticExprs; 8031 8032 public: 8033 UncoveredArgHandler() = default; 8034 8035 bool hasUncoveredArg() const { 8036 return (FirstUncoveredArg >= 0); 8037 } 8038 8039 unsigned getUncoveredArg() const { 8040 assert(hasUncoveredArg() && "no uncovered argument"); 8041 return FirstUncoveredArg; 8042 } 8043 8044 void setAllCovered() { 8045 // A string has been found with all arguments covered, so clear out 8046 // the diagnostics. 8047 DiagnosticExprs.clear(); 8048 FirstUncoveredArg = AllCovered; 8049 } 8050 8051 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 8052 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 8053 8054 // Don't update if a previous string covers all arguments. 8055 if (FirstUncoveredArg == AllCovered) 8056 return; 8057 8058 // UncoveredArgHandler tracks the highest uncovered argument index 8059 // and with it all the strings that match this index. 8060 if (NewFirstUncoveredArg == FirstUncoveredArg) 8061 DiagnosticExprs.push_back(StrExpr); 8062 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 8063 DiagnosticExprs.clear(); 8064 DiagnosticExprs.push_back(StrExpr); 8065 FirstUncoveredArg = NewFirstUncoveredArg; 8066 } 8067 } 8068 8069 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 8070 }; 8071 8072 enum StringLiteralCheckType { 8073 SLCT_NotALiteral, 8074 SLCT_UncheckedLiteral, 8075 SLCT_CheckedLiteral 8076 }; 8077 8078 } // namespace 8079 8080 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 8081 BinaryOperatorKind BinOpKind, 8082 bool AddendIsRight) { 8083 unsigned BitWidth = Offset.getBitWidth(); 8084 unsigned AddendBitWidth = Addend.getBitWidth(); 8085 // There might be negative interim results. 8086 if (Addend.isUnsigned()) { 8087 Addend = Addend.zext(++AddendBitWidth); 8088 Addend.setIsSigned(true); 8089 } 8090 // Adjust the bit width of the APSInts. 8091 if (AddendBitWidth > BitWidth) { 8092 Offset = Offset.sext(AddendBitWidth); 8093 BitWidth = AddendBitWidth; 8094 } else if (BitWidth > AddendBitWidth) { 8095 Addend = Addend.sext(BitWidth); 8096 } 8097 8098 bool Ov = false; 8099 llvm::APSInt ResOffset = Offset; 8100 if (BinOpKind == BO_Add) 8101 ResOffset = Offset.sadd_ov(Addend, Ov); 8102 else { 8103 assert(AddendIsRight && BinOpKind == BO_Sub && 8104 "operator must be add or sub with addend on the right"); 8105 ResOffset = Offset.ssub_ov(Addend, Ov); 8106 } 8107 8108 // We add an offset to a pointer here so we should support an offset as big as 8109 // possible. 8110 if (Ov) { 8111 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 8112 "index (intermediate) result too big"); 8113 Offset = Offset.sext(2 * BitWidth); 8114 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 8115 return; 8116 } 8117 8118 Offset = ResOffset; 8119 } 8120 8121 namespace { 8122 8123 // This is a wrapper class around StringLiteral to support offsetted string 8124 // literals as format strings. It takes the offset into account when returning 8125 // the string and its length or the source locations to display notes correctly. 8126 class FormatStringLiteral { 8127 const StringLiteral *FExpr; 8128 int64_t Offset; 8129 8130 public: 8131 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 8132 : FExpr(fexpr), Offset(Offset) {} 8133 8134 StringRef getString() const { 8135 return FExpr->getString().drop_front(Offset); 8136 } 8137 8138 unsigned getByteLength() const { 8139 return FExpr->getByteLength() - getCharByteWidth() * Offset; 8140 } 8141 8142 unsigned getLength() const { return FExpr->getLength() - Offset; } 8143 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 8144 8145 StringLiteral::StringKind getKind() const { return FExpr->getKind(); } 8146 8147 QualType getType() const { return FExpr->getType(); } 8148 8149 bool isAscii() const { return FExpr->isAscii(); } 8150 bool isWide() const { return FExpr->isWide(); } 8151 bool isUTF8() const { return FExpr->isUTF8(); } 8152 bool isUTF16() const { return FExpr->isUTF16(); } 8153 bool isUTF32() const { return FExpr->isUTF32(); } 8154 bool isPascal() const { return FExpr->isPascal(); } 8155 8156 SourceLocation getLocationOfByte( 8157 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 8158 const TargetInfo &Target, unsigned *StartToken = nullptr, 8159 unsigned *StartTokenByteOffset = nullptr) const { 8160 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 8161 StartToken, StartTokenByteOffset); 8162 } 8163 8164 SourceLocation getBeginLoc() const LLVM_READONLY { 8165 return FExpr->getBeginLoc().getLocWithOffset(Offset); 8166 } 8167 8168 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 8169 }; 8170 8171 } // namespace 8172 8173 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 8174 const Expr *OrigFormatExpr, 8175 ArrayRef<const Expr *> Args, 8176 bool HasVAListArg, unsigned format_idx, 8177 unsigned firstDataArg, 8178 Sema::FormatStringType Type, 8179 bool inFunctionCall, 8180 Sema::VariadicCallType CallType, 8181 llvm::SmallBitVector &CheckedVarArgs, 8182 UncoveredArgHandler &UncoveredArg, 8183 bool IgnoreStringsWithoutSpecifiers); 8184 8185 // Determine if an expression is a string literal or constant string. 8186 // If this function returns false on the arguments to a function expecting a 8187 // format string, we will usually need to emit a warning. 8188 // True string literals are then checked by CheckFormatString. 8189 static StringLiteralCheckType 8190 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 8191 bool HasVAListArg, unsigned format_idx, 8192 unsigned firstDataArg, Sema::FormatStringType Type, 8193 Sema::VariadicCallType CallType, bool InFunctionCall, 8194 llvm::SmallBitVector &CheckedVarArgs, 8195 UncoveredArgHandler &UncoveredArg, 8196 llvm::APSInt Offset, 8197 bool IgnoreStringsWithoutSpecifiers = false) { 8198 if (S.isConstantEvaluated()) 8199 return SLCT_NotALiteral; 8200 tryAgain: 8201 assert(Offset.isSigned() && "invalid offset"); 8202 8203 if (E->isTypeDependent() || E->isValueDependent()) 8204 return SLCT_NotALiteral; 8205 8206 E = E->IgnoreParenCasts(); 8207 8208 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 8209 // Technically -Wformat-nonliteral does not warn about this case. 8210 // The behavior of printf and friends in this case is implementation 8211 // dependent. Ideally if the format string cannot be null then 8212 // it should have a 'nonnull' attribute in the function prototype. 8213 return SLCT_UncheckedLiteral; 8214 8215 switch (E->getStmtClass()) { 8216 case Stmt::BinaryConditionalOperatorClass: 8217 case Stmt::ConditionalOperatorClass: { 8218 // The expression is a literal if both sub-expressions were, and it was 8219 // completely checked only if both sub-expressions were checked. 8220 const AbstractConditionalOperator *C = 8221 cast<AbstractConditionalOperator>(E); 8222 8223 // Determine whether it is necessary to check both sub-expressions, for 8224 // example, because the condition expression is a constant that can be 8225 // evaluated at compile time. 8226 bool CheckLeft = true, CheckRight = true; 8227 8228 bool Cond; 8229 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(), 8230 S.isConstantEvaluated())) { 8231 if (Cond) 8232 CheckRight = false; 8233 else 8234 CheckLeft = false; 8235 } 8236 8237 // We need to maintain the offsets for the right and the left hand side 8238 // separately to check if every possible indexed expression is a valid 8239 // string literal. They might have different offsets for different string 8240 // literals in the end. 8241 StringLiteralCheckType Left; 8242 if (!CheckLeft) 8243 Left = SLCT_UncheckedLiteral; 8244 else { 8245 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, 8246 HasVAListArg, format_idx, firstDataArg, 8247 Type, CallType, InFunctionCall, 8248 CheckedVarArgs, UncoveredArg, Offset, 8249 IgnoreStringsWithoutSpecifiers); 8250 if (Left == SLCT_NotALiteral || !CheckRight) { 8251 return Left; 8252 } 8253 } 8254 8255 StringLiteralCheckType Right = checkFormatStringExpr( 8256 S, C->getFalseExpr(), Args, HasVAListArg, format_idx, firstDataArg, 8257 Type, CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8258 IgnoreStringsWithoutSpecifiers); 8259 8260 return (CheckLeft && Left < Right) ? Left : Right; 8261 } 8262 8263 case Stmt::ImplicitCastExprClass: 8264 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 8265 goto tryAgain; 8266 8267 case Stmt::OpaqueValueExprClass: 8268 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 8269 E = src; 8270 goto tryAgain; 8271 } 8272 return SLCT_NotALiteral; 8273 8274 case Stmt::PredefinedExprClass: 8275 // While __func__, etc., are technically not string literals, they 8276 // cannot contain format specifiers and thus are not a security 8277 // liability. 8278 return SLCT_UncheckedLiteral; 8279 8280 case Stmt::DeclRefExprClass: { 8281 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 8282 8283 // As an exception, do not flag errors for variables binding to 8284 // const string literals. 8285 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 8286 bool isConstant = false; 8287 QualType T = DR->getType(); 8288 8289 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 8290 isConstant = AT->getElementType().isConstant(S.Context); 8291 } else if (const PointerType *PT = T->getAs<PointerType>()) { 8292 isConstant = T.isConstant(S.Context) && 8293 PT->getPointeeType().isConstant(S.Context); 8294 } else if (T->isObjCObjectPointerType()) { 8295 // In ObjC, there is usually no "const ObjectPointer" type, 8296 // so don't check if the pointee type is constant. 8297 isConstant = T.isConstant(S.Context); 8298 } 8299 8300 if (isConstant) { 8301 if (const Expr *Init = VD->getAnyInitializer()) { 8302 // Look through initializers like const char c[] = { "foo" } 8303 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 8304 if (InitList->isStringLiteralInit()) 8305 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 8306 } 8307 return checkFormatStringExpr(S, Init, Args, 8308 HasVAListArg, format_idx, 8309 firstDataArg, Type, CallType, 8310 /*InFunctionCall*/ false, CheckedVarArgs, 8311 UncoveredArg, Offset); 8312 } 8313 } 8314 8315 // For vprintf* functions (i.e., HasVAListArg==true), we add a 8316 // special check to see if the format string is a function parameter 8317 // of the function calling the printf function. If the function 8318 // has an attribute indicating it is a printf-like function, then we 8319 // should suppress warnings concerning non-literals being used in a call 8320 // to a vprintf function. For example: 8321 // 8322 // void 8323 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){ 8324 // va_list ap; 8325 // va_start(ap, fmt); 8326 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt". 8327 // ... 8328 // } 8329 if (HasVAListArg) { 8330 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) { 8331 if (const Decl *D = dyn_cast<Decl>(PV->getDeclContext())) { 8332 int PVIndex = PV->getFunctionScopeIndex() + 1; 8333 for (const auto *PVFormat : D->specific_attrs<FormatAttr>()) { 8334 // adjust for implicit parameter 8335 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) 8336 if (MD->isInstance()) 8337 ++PVIndex; 8338 // We also check if the formats are compatible. 8339 // We can't pass a 'scanf' string to a 'printf' function. 8340 if (PVIndex == PVFormat->getFormatIdx() && 8341 Type == S.GetFormatStringType(PVFormat)) 8342 return SLCT_UncheckedLiteral; 8343 } 8344 } 8345 } 8346 } 8347 } 8348 8349 return SLCT_NotALiteral; 8350 } 8351 8352 case Stmt::CallExprClass: 8353 case Stmt::CXXMemberCallExprClass: { 8354 const CallExpr *CE = cast<CallExpr>(E); 8355 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 8356 bool IsFirst = true; 8357 StringLiteralCheckType CommonResult; 8358 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 8359 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 8360 StringLiteralCheckType Result = checkFormatStringExpr( 8361 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 8362 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8363 IgnoreStringsWithoutSpecifiers); 8364 if (IsFirst) { 8365 CommonResult = Result; 8366 IsFirst = false; 8367 } 8368 } 8369 if (!IsFirst) 8370 return CommonResult; 8371 8372 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 8373 unsigned BuiltinID = FD->getBuiltinID(); 8374 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 8375 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 8376 const Expr *Arg = CE->getArg(0); 8377 return checkFormatStringExpr(S, Arg, Args, 8378 HasVAListArg, format_idx, 8379 firstDataArg, Type, CallType, 8380 InFunctionCall, CheckedVarArgs, 8381 UncoveredArg, Offset, 8382 IgnoreStringsWithoutSpecifiers); 8383 } 8384 } 8385 } 8386 8387 return SLCT_NotALiteral; 8388 } 8389 case Stmt::ObjCMessageExprClass: { 8390 const auto *ME = cast<ObjCMessageExpr>(E); 8391 if (const auto *MD = ME->getMethodDecl()) { 8392 if (const auto *FA = MD->getAttr<FormatArgAttr>()) { 8393 // As a special case heuristic, if we're using the method -[NSBundle 8394 // localizedStringForKey:value:table:], ignore any key strings that lack 8395 // format specifiers. The idea is that if the key doesn't have any 8396 // format specifiers then its probably just a key to map to the 8397 // localized strings. If it does have format specifiers though, then its 8398 // likely that the text of the key is the format string in the 8399 // programmer's language, and should be checked. 8400 const ObjCInterfaceDecl *IFace; 8401 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) && 8402 IFace->getIdentifier()->isStr("NSBundle") && 8403 MD->getSelector().isKeywordSelector( 8404 {"localizedStringForKey", "value", "table"})) { 8405 IgnoreStringsWithoutSpecifiers = true; 8406 } 8407 8408 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 8409 return checkFormatStringExpr( 8410 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 8411 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8412 IgnoreStringsWithoutSpecifiers); 8413 } 8414 } 8415 8416 return SLCT_NotALiteral; 8417 } 8418 case Stmt::ObjCStringLiteralClass: 8419 case Stmt::StringLiteralClass: { 8420 const StringLiteral *StrE = nullptr; 8421 8422 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 8423 StrE = ObjCFExpr->getString(); 8424 else 8425 StrE = cast<StringLiteral>(E); 8426 8427 if (StrE) { 8428 if (Offset.isNegative() || Offset > StrE->getLength()) { 8429 // TODO: It would be better to have an explicit warning for out of 8430 // bounds literals. 8431 return SLCT_NotALiteral; 8432 } 8433 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 8434 CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx, 8435 firstDataArg, Type, InFunctionCall, CallType, 8436 CheckedVarArgs, UncoveredArg, 8437 IgnoreStringsWithoutSpecifiers); 8438 return SLCT_CheckedLiteral; 8439 } 8440 8441 return SLCT_NotALiteral; 8442 } 8443 case Stmt::BinaryOperatorClass: { 8444 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 8445 8446 // A string literal + an int offset is still a string literal. 8447 if (BinOp->isAdditiveOp()) { 8448 Expr::EvalResult LResult, RResult; 8449 8450 bool LIsInt = BinOp->getLHS()->EvaluateAsInt( 8451 LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 8452 bool RIsInt = BinOp->getRHS()->EvaluateAsInt( 8453 RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 8454 8455 if (LIsInt != RIsInt) { 8456 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 8457 8458 if (LIsInt) { 8459 if (BinOpKind == BO_Add) { 8460 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt); 8461 E = BinOp->getRHS(); 8462 goto tryAgain; 8463 } 8464 } else { 8465 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt); 8466 E = BinOp->getLHS(); 8467 goto tryAgain; 8468 } 8469 } 8470 } 8471 8472 return SLCT_NotALiteral; 8473 } 8474 case Stmt::UnaryOperatorClass: { 8475 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 8476 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 8477 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 8478 Expr::EvalResult IndexResult; 8479 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context, 8480 Expr::SE_NoSideEffects, 8481 S.isConstantEvaluated())) { 8482 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add, 8483 /*RHS is int*/ true); 8484 E = ASE->getBase(); 8485 goto tryAgain; 8486 } 8487 } 8488 8489 return SLCT_NotALiteral; 8490 } 8491 8492 default: 8493 return SLCT_NotALiteral; 8494 } 8495 } 8496 8497 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 8498 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 8499 .Case("scanf", FST_Scanf) 8500 .Cases("printf", "printf0", FST_Printf) 8501 .Cases("NSString", "CFString", FST_NSString) 8502 .Case("strftime", FST_Strftime) 8503 .Case("strfmon", FST_Strfmon) 8504 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 8505 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 8506 .Case("os_trace", FST_OSLog) 8507 .Case("os_log", FST_OSLog) 8508 .Default(FST_Unknown); 8509 } 8510 8511 /// CheckFormatArguments - Check calls to printf and scanf (and similar 8512 /// functions) for correct use of format strings. 8513 /// Returns true if a format string has been fully checked. 8514 bool Sema::CheckFormatArguments(const FormatAttr *Format, 8515 ArrayRef<const Expr *> Args, 8516 bool IsCXXMember, 8517 VariadicCallType CallType, 8518 SourceLocation Loc, SourceRange Range, 8519 llvm::SmallBitVector &CheckedVarArgs) { 8520 FormatStringInfo FSI; 8521 if (getFormatStringInfo(Format, IsCXXMember, &FSI)) 8522 return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx, 8523 FSI.FirstDataArg, GetFormatStringType(Format), 8524 CallType, Loc, Range, CheckedVarArgs); 8525 return false; 8526 } 8527 8528 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 8529 bool HasVAListArg, unsigned format_idx, 8530 unsigned firstDataArg, FormatStringType Type, 8531 VariadicCallType CallType, 8532 SourceLocation Loc, SourceRange Range, 8533 llvm::SmallBitVector &CheckedVarArgs) { 8534 // CHECK: printf/scanf-like function is called with no format string. 8535 if (format_idx >= Args.size()) { 8536 Diag(Loc, diag::warn_missing_format_string) << Range; 8537 return false; 8538 } 8539 8540 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 8541 8542 // CHECK: format string is not a string literal. 8543 // 8544 // Dynamically generated format strings are difficult to 8545 // automatically vet at compile time. Requiring that format strings 8546 // are string literals: (1) permits the checking of format strings by 8547 // the compiler and thereby (2) can practically remove the source of 8548 // many format string exploits. 8549 8550 // Format string can be either ObjC string (e.g. @"%d") or 8551 // C string (e.g. "%d") 8552 // ObjC string uses the same format specifiers as C string, so we can use 8553 // the same format string checking logic for both ObjC and C strings. 8554 UncoveredArgHandler UncoveredArg; 8555 StringLiteralCheckType CT = 8556 checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg, 8557 format_idx, firstDataArg, Type, CallType, 8558 /*IsFunctionCall*/ true, CheckedVarArgs, 8559 UncoveredArg, 8560 /*no string offset*/ llvm::APSInt(64, false) = 0); 8561 8562 // Generate a diagnostic where an uncovered argument is detected. 8563 if (UncoveredArg.hasUncoveredArg()) { 8564 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 8565 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 8566 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 8567 } 8568 8569 if (CT != SLCT_NotALiteral) 8570 // Literal format string found, check done! 8571 return CT == SLCT_CheckedLiteral; 8572 8573 // Strftime is particular as it always uses a single 'time' argument, 8574 // so it is safe to pass a non-literal string. 8575 if (Type == FST_Strftime) 8576 return false; 8577 8578 // Do not emit diag when the string param is a macro expansion and the 8579 // format is either NSString or CFString. This is a hack to prevent 8580 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 8581 // which are usually used in place of NS and CF string literals. 8582 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 8583 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 8584 return false; 8585 8586 // If there are no arguments specified, warn with -Wformat-security, otherwise 8587 // warn only with -Wformat-nonliteral. 8588 if (Args.size() == firstDataArg) { 8589 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 8590 << OrigFormatExpr->getSourceRange(); 8591 switch (Type) { 8592 default: 8593 break; 8594 case FST_Kprintf: 8595 case FST_FreeBSDKPrintf: 8596 case FST_Printf: 8597 Diag(FormatLoc, diag::note_format_security_fixit) 8598 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 8599 break; 8600 case FST_NSString: 8601 Diag(FormatLoc, diag::note_format_security_fixit) 8602 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 8603 break; 8604 } 8605 } else { 8606 Diag(FormatLoc, diag::warn_format_nonliteral) 8607 << OrigFormatExpr->getSourceRange(); 8608 } 8609 return false; 8610 } 8611 8612 namespace { 8613 8614 class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 8615 protected: 8616 Sema &S; 8617 const FormatStringLiteral *FExpr; 8618 const Expr *OrigFormatExpr; 8619 const Sema::FormatStringType FSType; 8620 const unsigned FirstDataArg; 8621 const unsigned NumDataArgs; 8622 const char *Beg; // Start of format string. 8623 const bool HasVAListArg; 8624 ArrayRef<const Expr *> Args; 8625 unsigned FormatIdx; 8626 llvm::SmallBitVector CoveredArgs; 8627 bool usesPositionalArgs = false; 8628 bool atFirstArg = true; 8629 bool inFunctionCall; 8630 Sema::VariadicCallType CallType; 8631 llvm::SmallBitVector &CheckedVarArgs; 8632 UncoveredArgHandler &UncoveredArg; 8633 8634 public: 8635 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 8636 const Expr *origFormatExpr, 8637 const Sema::FormatStringType type, unsigned firstDataArg, 8638 unsigned numDataArgs, const char *beg, bool hasVAListArg, 8639 ArrayRef<const Expr *> Args, unsigned formatIdx, 8640 bool inFunctionCall, Sema::VariadicCallType callType, 8641 llvm::SmallBitVector &CheckedVarArgs, 8642 UncoveredArgHandler &UncoveredArg) 8643 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 8644 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 8645 HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx), 8646 inFunctionCall(inFunctionCall), CallType(callType), 8647 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 8648 CoveredArgs.resize(numDataArgs); 8649 CoveredArgs.reset(); 8650 } 8651 8652 void DoneProcessing(); 8653 8654 void HandleIncompleteSpecifier(const char *startSpecifier, 8655 unsigned specifierLen) override; 8656 8657 void HandleInvalidLengthModifier( 8658 const analyze_format_string::FormatSpecifier &FS, 8659 const analyze_format_string::ConversionSpecifier &CS, 8660 const char *startSpecifier, unsigned specifierLen, 8661 unsigned DiagID); 8662 8663 void HandleNonStandardLengthModifier( 8664 const analyze_format_string::FormatSpecifier &FS, 8665 const char *startSpecifier, unsigned specifierLen); 8666 8667 void HandleNonStandardConversionSpecifier( 8668 const analyze_format_string::ConversionSpecifier &CS, 8669 const char *startSpecifier, unsigned specifierLen); 8670 8671 void HandlePosition(const char *startPos, unsigned posLen) override; 8672 8673 void HandleInvalidPosition(const char *startSpecifier, 8674 unsigned specifierLen, 8675 analyze_format_string::PositionContext p) override; 8676 8677 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 8678 8679 void HandleNullChar(const char *nullCharacter) override; 8680 8681 template <typename Range> 8682 static void 8683 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 8684 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 8685 bool IsStringLocation, Range StringRange, 8686 ArrayRef<FixItHint> Fixit = None); 8687 8688 protected: 8689 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 8690 const char *startSpec, 8691 unsigned specifierLen, 8692 const char *csStart, unsigned csLen); 8693 8694 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 8695 const char *startSpec, 8696 unsigned specifierLen); 8697 8698 SourceRange getFormatStringRange(); 8699 CharSourceRange getSpecifierRange(const char *startSpecifier, 8700 unsigned specifierLen); 8701 SourceLocation getLocationOfByte(const char *x); 8702 8703 const Expr *getDataArg(unsigned i) const; 8704 8705 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 8706 const analyze_format_string::ConversionSpecifier &CS, 8707 const char *startSpecifier, unsigned specifierLen, 8708 unsigned argIndex); 8709 8710 template <typename Range> 8711 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 8712 bool IsStringLocation, Range StringRange, 8713 ArrayRef<FixItHint> Fixit = None); 8714 }; 8715 8716 } // namespace 8717 8718 SourceRange CheckFormatHandler::getFormatStringRange() { 8719 return OrigFormatExpr->getSourceRange(); 8720 } 8721 8722 CharSourceRange CheckFormatHandler:: 8723 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 8724 SourceLocation Start = getLocationOfByte(startSpecifier); 8725 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 8726 8727 // Advance the end SourceLocation by one due to half-open ranges. 8728 End = End.getLocWithOffset(1); 8729 8730 return CharSourceRange::getCharRange(Start, End); 8731 } 8732 8733 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 8734 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 8735 S.getLangOpts(), S.Context.getTargetInfo()); 8736 } 8737 8738 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 8739 unsigned specifierLen){ 8740 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 8741 getLocationOfByte(startSpecifier), 8742 /*IsStringLocation*/true, 8743 getSpecifierRange(startSpecifier, specifierLen)); 8744 } 8745 8746 void CheckFormatHandler::HandleInvalidLengthModifier( 8747 const analyze_format_string::FormatSpecifier &FS, 8748 const analyze_format_string::ConversionSpecifier &CS, 8749 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 8750 using namespace analyze_format_string; 8751 8752 const LengthModifier &LM = FS.getLengthModifier(); 8753 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 8754 8755 // See if we know how to fix this length modifier. 8756 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 8757 if (FixedLM) { 8758 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 8759 getLocationOfByte(LM.getStart()), 8760 /*IsStringLocation*/true, 8761 getSpecifierRange(startSpecifier, specifierLen)); 8762 8763 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 8764 << FixedLM->toString() 8765 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 8766 8767 } else { 8768 FixItHint Hint; 8769 if (DiagID == diag::warn_format_nonsensical_length) 8770 Hint = FixItHint::CreateRemoval(LMRange); 8771 8772 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 8773 getLocationOfByte(LM.getStart()), 8774 /*IsStringLocation*/true, 8775 getSpecifierRange(startSpecifier, specifierLen), 8776 Hint); 8777 } 8778 } 8779 8780 void CheckFormatHandler::HandleNonStandardLengthModifier( 8781 const analyze_format_string::FormatSpecifier &FS, 8782 const char *startSpecifier, unsigned specifierLen) { 8783 using namespace analyze_format_string; 8784 8785 const LengthModifier &LM = FS.getLengthModifier(); 8786 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 8787 8788 // See if we know how to fix this length modifier. 8789 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 8790 if (FixedLM) { 8791 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8792 << LM.toString() << 0, 8793 getLocationOfByte(LM.getStart()), 8794 /*IsStringLocation*/true, 8795 getSpecifierRange(startSpecifier, specifierLen)); 8796 8797 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 8798 << FixedLM->toString() 8799 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 8800 8801 } else { 8802 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8803 << LM.toString() << 0, 8804 getLocationOfByte(LM.getStart()), 8805 /*IsStringLocation*/true, 8806 getSpecifierRange(startSpecifier, specifierLen)); 8807 } 8808 } 8809 8810 void CheckFormatHandler::HandleNonStandardConversionSpecifier( 8811 const analyze_format_string::ConversionSpecifier &CS, 8812 const char *startSpecifier, unsigned specifierLen) { 8813 using namespace analyze_format_string; 8814 8815 // See if we know how to fix this conversion specifier. 8816 Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 8817 if (FixedCS) { 8818 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8819 << CS.toString() << /*conversion specifier*/1, 8820 getLocationOfByte(CS.getStart()), 8821 /*IsStringLocation*/true, 8822 getSpecifierRange(startSpecifier, specifierLen)); 8823 8824 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 8825 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 8826 << FixedCS->toString() 8827 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 8828 } else { 8829 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8830 << CS.toString() << /*conversion specifier*/1, 8831 getLocationOfByte(CS.getStart()), 8832 /*IsStringLocation*/true, 8833 getSpecifierRange(startSpecifier, specifierLen)); 8834 } 8835 } 8836 8837 void CheckFormatHandler::HandlePosition(const char *startPos, 8838 unsigned posLen) { 8839 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 8840 getLocationOfByte(startPos), 8841 /*IsStringLocation*/true, 8842 getSpecifierRange(startPos, posLen)); 8843 } 8844 8845 void 8846 CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, 8847 analyze_format_string::PositionContext p) { 8848 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) 8849 << (unsigned) p, 8850 getLocationOfByte(startPos), /*IsStringLocation*/true, 8851 getSpecifierRange(startPos, posLen)); 8852 } 8853 8854 void CheckFormatHandler::HandleZeroPosition(const char *startPos, 8855 unsigned posLen) { 8856 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 8857 getLocationOfByte(startPos), 8858 /*IsStringLocation*/true, 8859 getSpecifierRange(startPos, posLen)); 8860 } 8861 8862 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 8863 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 8864 // The presence of a null character is likely an error. 8865 EmitFormatDiagnostic( 8866 S.PDiag(diag::warn_printf_format_string_contains_null_char), 8867 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 8868 getFormatStringRange()); 8869 } 8870 } 8871 8872 // Note that this may return NULL if there was an error parsing or building 8873 // one of the argument expressions. 8874 const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 8875 return Args[FirstDataArg + i]; 8876 } 8877 8878 void CheckFormatHandler::DoneProcessing() { 8879 // Does the number of data arguments exceed the number of 8880 // format conversions in the format string? 8881 if (!HasVAListArg) { 8882 // Find any arguments that weren't covered. 8883 CoveredArgs.flip(); 8884 signed notCoveredArg = CoveredArgs.find_first(); 8885 if (notCoveredArg >= 0) { 8886 assert((unsigned)notCoveredArg < NumDataArgs); 8887 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 8888 } else { 8889 UncoveredArg.setAllCovered(); 8890 } 8891 } 8892 } 8893 8894 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 8895 const Expr *ArgExpr) { 8896 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 && 8897 "Invalid state"); 8898 8899 if (!ArgExpr) 8900 return; 8901 8902 SourceLocation Loc = ArgExpr->getBeginLoc(); 8903 8904 if (S.getSourceManager().isInSystemMacro(Loc)) 8905 return; 8906 8907 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 8908 for (auto E : DiagnosticExprs) 8909 PDiag << E->getSourceRange(); 8910 8911 CheckFormatHandler::EmitFormatDiagnostic( 8912 S, IsFunctionCall, DiagnosticExprs[0], 8913 PDiag, Loc, /*IsStringLocation*/false, 8914 DiagnosticExprs[0]->getSourceRange()); 8915 } 8916 8917 bool 8918 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 8919 SourceLocation Loc, 8920 const char *startSpec, 8921 unsigned specifierLen, 8922 const char *csStart, 8923 unsigned csLen) { 8924 bool keepGoing = true; 8925 if (argIndex < NumDataArgs) { 8926 // Consider the argument coverered, even though the specifier doesn't 8927 // make sense. 8928 CoveredArgs.set(argIndex); 8929 } 8930 else { 8931 // If argIndex exceeds the number of data arguments we 8932 // don't issue a warning because that is just a cascade of warnings (and 8933 // they may have intended '%%' anyway). We don't want to continue processing 8934 // the format string after this point, however, as we will like just get 8935 // gibberish when trying to match arguments. 8936 keepGoing = false; 8937 } 8938 8939 StringRef Specifier(csStart, csLen); 8940 8941 // If the specifier in non-printable, it could be the first byte of a UTF-8 8942 // sequence. In that case, print the UTF-8 code point. If not, print the byte 8943 // hex value. 8944 std::string CodePointStr; 8945 if (!llvm::sys::locale::isPrint(*csStart)) { 8946 llvm::UTF32 CodePoint; 8947 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 8948 const llvm::UTF8 *E = 8949 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 8950 llvm::ConversionResult Result = 8951 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 8952 8953 if (Result != llvm::conversionOK) { 8954 unsigned char FirstChar = *csStart; 8955 CodePoint = (llvm::UTF32)FirstChar; 8956 } 8957 8958 llvm::raw_string_ostream OS(CodePointStr); 8959 if (CodePoint < 256) 8960 OS << "\\x" << llvm::format("%02x", CodePoint); 8961 else if (CodePoint <= 0xFFFF) 8962 OS << "\\u" << llvm::format("%04x", CodePoint); 8963 else 8964 OS << "\\U" << llvm::format("%08x", CodePoint); 8965 OS.flush(); 8966 Specifier = CodePointStr; 8967 } 8968 8969 EmitFormatDiagnostic( 8970 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 8971 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 8972 8973 return keepGoing; 8974 } 8975 8976 void 8977 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 8978 const char *startSpec, 8979 unsigned specifierLen) { 8980 EmitFormatDiagnostic( 8981 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 8982 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 8983 } 8984 8985 bool 8986 CheckFormatHandler::CheckNumArgs( 8987 const analyze_format_string::FormatSpecifier &FS, 8988 const analyze_format_string::ConversionSpecifier &CS, 8989 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 8990 8991 if (argIndex >= NumDataArgs) { 8992 PartialDiagnostic PDiag = FS.usesPositionalArg() 8993 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 8994 << (argIndex+1) << NumDataArgs) 8995 : S.PDiag(diag::warn_printf_insufficient_data_args); 8996 EmitFormatDiagnostic( 8997 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 8998 getSpecifierRange(startSpecifier, specifierLen)); 8999 9000 // Since more arguments than conversion tokens are given, by extension 9001 // all arguments are covered, so mark this as so. 9002 UncoveredArg.setAllCovered(); 9003 return false; 9004 } 9005 return true; 9006 } 9007 9008 template<typename Range> 9009 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 9010 SourceLocation Loc, 9011 bool IsStringLocation, 9012 Range StringRange, 9013 ArrayRef<FixItHint> FixIt) { 9014 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 9015 Loc, IsStringLocation, StringRange, FixIt); 9016 } 9017 9018 /// If the format string is not within the function call, emit a note 9019 /// so that the function call and string are in diagnostic messages. 9020 /// 9021 /// \param InFunctionCall if true, the format string is within the function 9022 /// call and only one diagnostic message will be produced. Otherwise, an 9023 /// extra note will be emitted pointing to location of the format string. 9024 /// 9025 /// \param ArgumentExpr the expression that is passed as the format string 9026 /// argument in the function call. Used for getting locations when two 9027 /// diagnostics are emitted. 9028 /// 9029 /// \param PDiag the callee should already have provided any strings for the 9030 /// diagnostic message. This function only adds locations and fixits 9031 /// to diagnostics. 9032 /// 9033 /// \param Loc primary location for diagnostic. If two diagnostics are 9034 /// required, one will be at Loc and a new SourceLocation will be created for 9035 /// the other one. 9036 /// 9037 /// \param IsStringLocation if true, Loc points to the format string should be 9038 /// used for the note. Otherwise, Loc points to the argument list and will 9039 /// be used with PDiag. 9040 /// 9041 /// \param StringRange some or all of the string to highlight. This is 9042 /// templated so it can accept either a CharSourceRange or a SourceRange. 9043 /// 9044 /// \param FixIt optional fix it hint for the format string. 9045 template <typename Range> 9046 void CheckFormatHandler::EmitFormatDiagnostic( 9047 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 9048 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 9049 Range StringRange, ArrayRef<FixItHint> FixIt) { 9050 if (InFunctionCall) { 9051 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 9052 D << StringRange; 9053 D << FixIt; 9054 } else { 9055 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 9056 << ArgumentExpr->getSourceRange(); 9057 9058 const Sema::SemaDiagnosticBuilder &Note = 9059 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 9060 diag::note_format_string_defined); 9061 9062 Note << StringRange; 9063 Note << FixIt; 9064 } 9065 } 9066 9067 //===--- CHECK: Printf format string checking ------------------------------===// 9068 9069 namespace { 9070 9071 class CheckPrintfHandler : public CheckFormatHandler { 9072 public: 9073 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 9074 const Expr *origFormatExpr, 9075 const Sema::FormatStringType type, unsigned firstDataArg, 9076 unsigned numDataArgs, bool isObjC, const char *beg, 9077 bool hasVAListArg, ArrayRef<const Expr *> Args, 9078 unsigned formatIdx, bool inFunctionCall, 9079 Sema::VariadicCallType CallType, 9080 llvm::SmallBitVector &CheckedVarArgs, 9081 UncoveredArgHandler &UncoveredArg) 9082 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 9083 numDataArgs, beg, hasVAListArg, Args, formatIdx, 9084 inFunctionCall, CallType, CheckedVarArgs, 9085 UncoveredArg) {} 9086 9087 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 9088 9089 /// Returns true if '%@' specifiers are allowed in the format string. 9090 bool allowsObjCArg() const { 9091 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 9092 FSType == Sema::FST_OSTrace; 9093 } 9094 9095 bool HandleInvalidPrintfConversionSpecifier( 9096 const analyze_printf::PrintfSpecifier &FS, 9097 const char *startSpecifier, 9098 unsigned specifierLen) override; 9099 9100 void handleInvalidMaskType(StringRef MaskType) override; 9101 9102 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 9103 const char *startSpecifier, unsigned specifierLen, 9104 const TargetInfo &Target) override; 9105 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 9106 const char *StartSpecifier, 9107 unsigned SpecifierLen, 9108 const Expr *E); 9109 9110 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 9111 const char *startSpecifier, unsigned specifierLen); 9112 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 9113 const analyze_printf::OptionalAmount &Amt, 9114 unsigned type, 9115 const char *startSpecifier, unsigned specifierLen); 9116 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 9117 const analyze_printf::OptionalFlag &flag, 9118 const char *startSpecifier, unsigned specifierLen); 9119 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 9120 const analyze_printf::OptionalFlag &ignoredFlag, 9121 const analyze_printf::OptionalFlag &flag, 9122 const char *startSpecifier, unsigned specifierLen); 9123 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 9124 const Expr *E); 9125 9126 void HandleEmptyObjCModifierFlag(const char *startFlag, 9127 unsigned flagLen) override; 9128 9129 void HandleInvalidObjCModifierFlag(const char *startFlag, 9130 unsigned flagLen) override; 9131 9132 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 9133 const char *flagsEnd, 9134 const char *conversionPosition) 9135 override; 9136 }; 9137 9138 } // namespace 9139 9140 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 9141 const analyze_printf::PrintfSpecifier &FS, 9142 const char *startSpecifier, 9143 unsigned specifierLen) { 9144 const analyze_printf::PrintfConversionSpecifier &CS = 9145 FS.getConversionSpecifier(); 9146 9147 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 9148 getLocationOfByte(CS.getStart()), 9149 startSpecifier, specifierLen, 9150 CS.getStart(), CS.getLength()); 9151 } 9152 9153 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 9154 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 9155 } 9156 9157 bool CheckPrintfHandler::HandleAmount( 9158 const analyze_format_string::OptionalAmount &Amt, 9159 unsigned k, const char *startSpecifier, 9160 unsigned specifierLen) { 9161 if (Amt.hasDataArgument()) { 9162 if (!HasVAListArg) { 9163 unsigned argIndex = Amt.getArgIndex(); 9164 if (argIndex >= NumDataArgs) { 9165 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 9166 << k, 9167 getLocationOfByte(Amt.getStart()), 9168 /*IsStringLocation*/true, 9169 getSpecifierRange(startSpecifier, specifierLen)); 9170 // Don't do any more checking. We will just emit 9171 // spurious errors. 9172 return false; 9173 } 9174 9175 // Type check the data argument. It should be an 'int'. 9176 // Although not in conformance with C99, we also allow the argument to be 9177 // an 'unsigned int' as that is a reasonably safe case. GCC also 9178 // doesn't emit a warning for that case. 9179 CoveredArgs.set(argIndex); 9180 const Expr *Arg = getDataArg(argIndex); 9181 if (!Arg) 9182 return false; 9183 9184 QualType T = Arg->getType(); 9185 9186 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 9187 assert(AT.isValid()); 9188 9189 if (!AT.matchesType(S.Context, T)) { 9190 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 9191 << k << AT.getRepresentativeTypeName(S.Context) 9192 << T << Arg->getSourceRange(), 9193 getLocationOfByte(Amt.getStart()), 9194 /*IsStringLocation*/true, 9195 getSpecifierRange(startSpecifier, specifierLen)); 9196 // Don't do any more checking. We will just emit 9197 // spurious errors. 9198 return false; 9199 } 9200 } 9201 } 9202 return true; 9203 } 9204 9205 void CheckPrintfHandler::HandleInvalidAmount( 9206 const analyze_printf::PrintfSpecifier &FS, 9207 const analyze_printf::OptionalAmount &Amt, 9208 unsigned type, 9209 const char *startSpecifier, 9210 unsigned specifierLen) { 9211 const analyze_printf::PrintfConversionSpecifier &CS = 9212 FS.getConversionSpecifier(); 9213 9214 FixItHint fixit = 9215 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 9216 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 9217 Amt.getConstantLength())) 9218 : FixItHint(); 9219 9220 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 9221 << type << CS.toString(), 9222 getLocationOfByte(Amt.getStart()), 9223 /*IsStringLocation*/true, 9224 getSpecifierRange(startSpecifier, specifierLen), 9225 fixit); 9226 } 9227 9228 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 9229 const analyze_printf::OptionalFlag &flag, 9230 const char *startSpecifier, 9231 unsigned specifierLen) { 9232 // Warn about pointless flag with a fixit removal. 9233 const analyze_printf::PrintfConversionSpecifier &CS = 9234 FS.getConversionSpecifier(); 9235 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 9236 << flag.toString() << CS.toString(), 9237 getLocationOfByte(flag.getPosition()), 9238 /*IsStringLocation*/true, 9239 getSpecifierRange(startSpecifier, specifierLen), 9240 FixItHint::CreateRemoval( 9241 getSpecifierRange(flag.getPosition(), 1))); 9242 } 9243 9244 void CheckPrintfHandler::HandleIgnoredFlag( 9245 const analyze_printf::PrintfSpecifier &FS, 9246 const analyze_printf::OptionalFlag &ignoredFlag, 9247 const analyze_printf::OptionalFlag &flag, 9248 const char *startSpecifier, 9249 unsigned specifierLen) { 9250 // Warn about ignored flag with a fixit removal. 9251 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 9252 << ignoredFlag.toString() << flag.toString(), 9253 getLocationOfByte(ignoredFlag.getPosition()), 9254 /*IsStringLocation*/true, 9255 getSpecifierRange(startSpecifier, specifierLen), 9256 FixItHint::CreateRemoval( 9257 getSpecifierRange(ignoredFlag.getPosition(), 1))); 9258 } 9259 9260 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 9261 unsigned flagLen) { 9262 // Warn about an empty flag. 9263 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 9264 getLocationOfByte(startFlag), 9265 /*IsStringLocation*/true, 9266 getSpecifierRange(startFlag, flagLen)); 9267 } 9268 9269 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 9270 unsigned flagLen) { 9271 // Warn about an invalid flag. 9272 auto Range = getSpecifierRange(startFlag, flagLen); 9273 StringRef flag(startFlag, flagLen); 9274 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 9275 getLocationOfByte(startFlag), 9276 /*IsStringLocation*/true, 9277 Range, FixItHint::CreateRemoval(Range)); 9278 } 9279 9280 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 9281 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 9282 // Warn about using '[...]' without a '@' conversion. 9283 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 9284 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 9285 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 9286 getLocationOfByte(conversionPosition), 9287 /*IsStringLocation*/true, 9288 Range, FixItHint::CreateRemoval(Range)); 9289 } 9290 9291 // Determines if the specified is a C++ class or struct containing 9292 // a member with the specified name and kind (e.g. a CXXMethodDecl named 9293 // "c_str()"). 9294 template<typename MemberKind> 9295 static llvm::SmallPtrSet<MemberKind*, 1> 9296 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 9297 const RecordType *RT = Ty->getAs<RecordType>(); 9298 llvm::SmallPtrSet<MemberKind*, 1> Results; 9299 9300 if (!RT) 9301 return Results; 9302 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 9303 if (!RD || !RD->getDefinition()) 9304 return Results; 9305 9306 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 9307 Sema::LookupMemberName); 9308 R.suppressDiagnostics(); 9309 9310 // We just need to include all members of the right kind turned up by the 9311 // filter, at this point. 9312 if (S.LookupQualifiedName(R, RT->getDecl())) 9313 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 9314 NamedDecl *decl = (*I)->getUnderlyingDecl(); 9315 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 9316 Results.insert(FK); 9317 } 9318 return Results; 9319 } 9320 9321 /// Check if we could call '.c_str()' on an object. 9322 /// 9323 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 9324 /// allow the call, or if it would be ambiguous). 9325 bool Sema::hasCStrMethod(const Expr *E) { 9326 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 9327 9328 MethodSet Results = 9329 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 9330 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 9331 MI != ME; ++MI) 9332 if ((*MI)->getMinRequiredArguments() == 0) 9333 return true; 9334 return false; 9335 } 9336 9337 // Check if a (w)string was passed when a (w)char* was needed, and offer a 9338 // better diagnostic if so. AT is assumed to be valid. 9339 // Returns true when a c_str() conversion method is found. 9340 bool CheckPrintfHandler::checkForCStrMembers( 9341 const analyze_printf::ArgType &AT, const Expr *E) { 9342 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 9343 9344 MethodSet Results = 9345 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 9346 9347 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 9348 MI != ME; ++MI) { 9349 const CXXMethodDecl *Method = *MI; 9350 if (Method->getMinRequiredArguments() == 0 && 9351 AT.matchesType(S.Context, Method->getReturnType())) { 9352 // FIXME: Suggest parens if the expression needs them. 9353 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 9354 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 9355 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 9356 return true; 9357 } 9358 } 9359 9360 return false; 9361 } 9362 9363 bool CheckPrintfHandler::HandlePrintfSpecifier( 9364 const analyze_printf::PrintfSpecifier &FS, const char *startSpecifier, 9365 unsigned specifierLen, const TargetInfo &Target) { 9366 using namespace analyze_format_string; 9367 using namespace analyze_printf; 9368 9369 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 9370 9371 if (FS.consumesDataArgument()) { 9372 if (atFirstArg) { 9373 atFirstArg = false; 9374 usesPositionalArgs = FS.usesPositionalArg(); 9375 } 9376 else if (usesPositionalArgs != FS.usesPositionalArg()) { 9377 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 9378 startSpecifier, specifierLen); 9379 return false; 9380 } 9381 } 9382 9383 // First check if the field width, precision, and conversion specifier 9384 // have matching data arguments. 9385 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 9386 startSpecifier, specifierLen)) { 9387 return false; 9388 } 9389 9390 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 9391 startSpecifier, specifierLen)) { 9392 return false; 9393 } 9394 9395 if (!CS.consumesDataArgument()) { 9396 // FIXME: Technically specifying a precision or field width here 9397 // makes no sense. Worth issuing a warning at some point. 9398 return true; 9399 } 9400 9401 // Consume the argument. 9402 unsigned argIndex = FS.getArgIndex(); 9403 if (argIndex < NumDataArgs) { 9404 // The check to see if the argIndex is valid will come later. 9405 // We set the bit here because we may exit early from this 9406 // function if we encounter some other error. 9407 CoveredArgs.set(argIndex); 9408 } 9409 9410 // FreeBSD kernel extensions. 9411 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 9412 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 9413 // We need at least two arguments. 9414 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 9415 return false; 9416 9417 // Claim the second argument. 9418 CoveredArgs.set(argIndex + 1); 9419 9420 // Type check the first argument (int for %b, pointer for %D) 9421 const Expr *Ex = getDataArg(argIndex); 9422 const analyze_printf::ArgType &AT = 9423 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? 9424 ArgType(S.Context.IntTy) : ArgType::CPointerTy; 9425 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 9426 EmitFormatDiagnostic( 9427 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 9428 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 9429 << false << Ex->getSourceRange(), 9430 Ex->getBeginLoc(), /*IsStringLocation*/ false, 9431 getSpecifierRange(startSpecifier, specifierLen)); 9432 9433 // Type check the second argument (char * for both %b and %D) 9434 Ex = getDataArg(argIndex + 1); 9435 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 9436 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 9437 EmitFormatDiagnostic( 9438 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 9439 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 9440 << false << Ex->getSourceRange(), 9441 Ex->getBeginLoc(), /*IsStringLocation*/ false, 9442 getSpecifierRange(startSpecifier, specifierLen)); 9443 9444 return true; 9445 } 9446 9447 // Check for using an Objective-C specific conversion specifier 9448 // in a non-ObjC literal. 9449 if (!allowsObjCArg() && CS.isObjCArg()) { 9450 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9451 specifierLen); 9452 } 9453 9454 // %P can only be used with os_log. 9455 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 9456 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9457 specifierLen); 9458 } 9459 9460 // %n is not allowed with os_log. 9461 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 9462 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 9463 getLocationOfByte(CS.getStart()), 9464 /*IsStringLocation*/ false, 9465 getSpecifierRange(startSpecifier, specifierLen)); 9466 9467 return true; 9468 } 9469 9470 // Only scalars are allowed for os_trace. 9471 if (FSType == Sema::FST_OSTrace && 9472 (CS.getKind() == ConversionSpecifier::PArg || 9473 CS.getKind() == ConversionSpecifier::sArg || 9474 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 9475 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9476 specifierLen); 9477 } 9478 9479 // Check for use of public/private annotation outside of os_log(). 9480 if (FSType != Sema::FST_OSLog) { 9481 if (FS.isPublic().isSet()) { 9482 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 9483 << "public", 9484 getLocationOfByte(FS.isPublic().getPosition()), 9485 /*IsStringLocation*/ false, 9486 getSpecifierRange(startSpecifier, specifierLen)); 9487 } 9488 if (FS.isPrivate().isSet()) { 9489 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 9490 << "private", 9491 getLocationOfByte(FS.isPrivate().getPosition()), 9492 /*IsStringLocation*/ false, 9493 getSpecifierRange(startSpecifier, specifierLen)); 9494 } 9495 } 9496 9497 const llvm::Triple &Triple = Target.getTriple(); 9498 if (CS.getKind() == ConversionSpecifier::nArg && 9499 (Triple.isAndroid() || Triple.isOSFuchsia())) { 9500 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_narg_not_supported), 9501 getLocationOfByte(CS.getStart()), 9502 /*IsStringLocation*/ false, 9503 getSpecifierRange(startSpecifier, specifierLen)); 9504 } 9505 9506 // Check for invalid use of field width 9507 if (!FS.hasValidFieldWidth()) { 9508 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 9509 startSpecifier, specifierLen); 9510 } 9511 9512 // Check for invalid use of precision 9513 if (!FS.hasValidPrecision()) { 9514 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 9515 startSpecifier, specifierLen); 9516 } 9517 9518 // Precision is mandatory for %P specifier. 9519 if (CS.getKind() == ConversionSpecifier::PArg && 9520 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 9521 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 9522 getLocationOfByte(startSpecifier), 9523 /*IsStringLocation*/ false, 9524 getSpecifierRange(startSpecifier, specifierLen)); 9525 } 9526 9527 // Check each flag does not conflict with any other component. 9528 if (!FS.hasValidThousandsGroupingPrefix()) 9529 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 9530 if (!FS.hasValidLeadingZeros()) 9531 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 9532 if (!FS.hasValidPlusPrefix()) 9533 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 9534 if (!FS.hasValidSpacePrefix()) 9535 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 9536 if (!FS.hasValidAlternativeForm()) 9537 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 9538 if (!FS.hasValidLeftJustified()) 9539 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 9540 9541 // Check that flags are not ignored by another flag 9542 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 9543 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 9544 startSpecifier, specifierLen); 9545 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 9546 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 9547 startSpecifier, specifierLen); 9548 9549 // Check the length modifier is valid with the given conversion specifier. 9550 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 9551 S.getLangOpts())) 9552 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9553 diag::warn_format_nonsensical_length); 9554 else if (!FS.hasStandardLengthModifier()) 9555 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 9556 else if (!FS.hasStandardLengthConversionCombination()) 9557 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9558 diag::warn_format_non_standard_conversion_spec); 9559 9560 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 9561 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 9562 9563 // The remaining checks depend on the data arguments. 9564 if (HasVAListArg) 9565 return true; 9566 9567 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 9568 return false; 9569 9570 const Expr *Arg = getDataArg(argIndex); 9571 if (!Arg) 9572 return true; 9573 9574 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 9575 } 9576 9577 static bool requiresParensToAddCast(const Expr *E) { 9578 // FIXME: We should have a general way to reason about operator 9579 // precedence and whether parens are actually needed here. 9580 // Take care of a few common cases where they aren't. 9581 const Expr *Inside = E->IgnoreImpCasts(); 9582 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 9583 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 9584 9585 switch (Inside->getStmtClass()) { 9586 case Stmt::ArraySubscriptExprClass: 9587 case Stmt::CallExprClass: 9588 case Stmt::CharacterLiteralClass: 9589 case Stmt::CXXBoolLiteralExprClass: 9590 case Stmt::DeclRefExprClass: 9591 case Stmt::FloatingLiteralClass: 9592 case Stmt::IntegerLiteralClass: 9593 case Stmt::MemberExprClass: 9594 case Stmt::ObjCArrayLiteralClass: 9595 case Stmt::ObjCBoolLiteralExprClass: 9596 case Stmt::ObjCBoxedExprClass: 9597 case Stmt::ObjCDictionaryLiteralClass: 9598 case Stmt::ObjCEncodeExprClass: 9599 case Stmt::ObjCIvarRefExprClass: 9600 case Stmt::ObjCMessageExprClass: 9601 case Stmt::ObjCPropertyRefExprClass: 9602 case Stmt::ObjCStringLiteralClass: 9603 case Stmt::ObjCSubscriptRefExprClass: 9604 case Stmt::ParenExprClass: 9605 case Stmt::StringLiteralClass: 9606 case Stmt::UnaryOperatorClass: 9607 return false; 9608 default: 9609 return true; 9610 } 9611 } 9612 9613 static std::pair<QualType, StringRef> 9614 shouldNotPrintDirectly(const ASTContext &Context, 9615 QualType IntendedTy, 9616 const Expr *E) { 9617 // Use a 'while' to peel off layers of typedefs. 9618 QualType TyTy = IntendedTy; 9619 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 9620 StringRef Name = UserTy->getDecl()->getName(); 9621 QualType CastTy = llvm::StringSwitch<QualType>(Name) 9622 .Case("CFIndex", Context.getNSIntegerType()) 9623 .Case("NSInteger", Context.getNSIntegerType()) 9624 .Case("NSUInteger", Context.getNSUIntegerType()) 9625 .Case("SInt32", Context.IntTy) 9626 .Case("UInt32", Context.UnsignedIntTy) 9627 .Default(QualType()); 9628 9629 if (!CastTy.isNull()) 9630 return std::make_pair(CastTy, Name); 9631 9632 TyTy = UserTy->desugar(); 9633 } 9634 9635 // Strip parens if necessary. 9636 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 9637 return shouldNotPrintDirectly(Context, 9638 PE->getSubExpr()->getType(), 9639 PE->getSubExpr()); 9640 9641 // If this is a conditional expression, then its result type is constructed 9642 // via usual arithmetic conversions and thus there might be no necessary 9643 // typedef sugar there. Recurse to operands to check for NSInteger & 9644 // Co. usage condition. 9645 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 9646 QualType TrueTy, FalseTy; 9647 StringRef TrueName, FalseName; 9648 9649 std::tie(TrueTy, TrueName) = 9650 shouldNotPrintDirectly(Context, 9651 CO->getTrueExpr()->getType(), 9652 CO->getTrueExpr()); 9653 std::tie(FalseTy, FalseName) = 9654 shouldNotPrintDirectly(Context, 9655 CO->getFalseExpr()->getType(), 9656 CO->getFalseExpr()); 9657 9658 if (TrueTy == FalseTy) 9659 return std::make_pair(TrueTy, TrueName); 9660 else if (TrueTy.isNull()) 9661 return std::make_pair(FalseTy, FalseName); 9662 else if (FalseTy.isNull()) 9663 return std::make_pair(TrueTy, TrueName); 9664 } 9665 9666 return std::make_pair(QualType(), StringRef()); 9667 } 9668 9669 /// Return true if \p ICE is an implicit argument promotion of an arithmetic 9670 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked 9671 /// type do not count. 9672 static bool 9673 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { 9674 QualType From = ICE->getSubExpr()->getType(); 9675 QualType To = ICE->getType(); 9676 // It's an integer promotion if the destination type is the promoted 9677 // source type. 9678 if (ICE->getCastKind() == CK_IntegralCast && 9679 From->isPromotableIntegerType() && 9680 S.Context.getPromotedIntegerType(From) == To) 9681 return true; 9682 // Look through vector types, since we do default argument promotion for 9683 // those in OpenCL. 9684 if (const auto *VecTy = From->getAs<ExtVectorType>()) 9685 From = VecTy->getElementType(); 9686 if (const auto *VecTy = To->getAs<ExtVectorType>()) 9687 To = VecTy->getElementType(); 9688 // It's a floating promotion if the source type is a lower rank. 9689 return ICE->getCastKind() == CK_FloatingCast && 9690 S.Context.getFloatingTypeOrder(From, To) < 0; 9691 } 9692 9693 bool 9694 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 9695 const char *StartSpecifier, 9696 unsigned SpecifierLen, 9697 const Expr *E) { 9698 using namespace analyze_format_string; 9699 using namespace analyze_printf; 9700 9701 // Now type check the data expression that matches the 9702 // format specifier. 9703 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 9704 if (!AT.isValid()) 9705 return true; 9706 9707 QualType ExprTy = E->getType(); 9708 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 9709 ExprTy = TET->getUnderlyingExpr()->getType(); 9710 } 9711 9712 // Diagnose attempts to print a boolean value as a character. Unlike other 9713 // -Wformat diagnostics, this is fine from a type perspective, but it still 9714 // doesn't make sense. 9715 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg && 9716 E->isKnownToHaveBooleanValue()) { 9717 const CharSourceRange &CSR = 9718 getSpecifierRange(StartSpecifier, SpecifierLen); 9719 SmallString<4> FSString; 9720 llvm::raw_svector_ostream os(FSString); 9721 FS.toString(os); 9722 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character) 9723 << FSString, 9724 E->getExprLoc(), false, CSR); 9725 return true; 9726 } 9727 9728 analyze_printf::ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy); 9729 if (Match == analyze_printf::ArgType::Match) 9730 return true; 9731 9732 // Look through argument promotions for our error message's reported type. 9733 // This includes the integral and floating promotions, but excludes array 9734 // and function pointer decay (seeing that an argument intended to be a 9735 // string has type 'char [6]' is probably more confusing than 'char *') and 9736 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type). 9737 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 9738 if (isArithmeticArgumentPromotion(S, ICE)) { 9739 E = ICE->getSubExpr(); 9740 ExprTy = E->getType(); 9741 9742 // Check if we didn't match because of an implicit cast from a 'char' 9743 // or 'short' to an 'int'. This is done because printf is a varargs 9744 // function. 9745 if (ICE->getType() == S.Context.IntTy || 9746 ICE->getType() == S.Context.UnsignedIntTy) { 9747 // All further checking is done on the subexpression 9748 const analyze_printf::ArgType::MatchKind ImplicitMatch = 9749 AT.matchesType(S.Context, ExprTy); 9750 if (ImplicitMatch == analyze_printf::ArgType::Match) 9751 return true; 9752 if (ImplicitMatch == ArgType::NoMatchPedantic || 9753 ImplicitMatch == ArgType::NoMatchTypeConfusion) 9754 Match = ImplicitMatch; 9755 } 9756 } 9757 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 9758 // Special case for 'a', which has type 'int' in C. 9759 // Note, however, that we do /not/ want to treat multibyte constants like 9760 // 'MooV' as characters! This form is deprecated but still exists. In 9761 // addition, don't treat expressions as of type 'char' if one byte length 9762 // modifier is provided. 9763 if (ExprTy == S.Context.IntTy && 9764 FS.getLengthModifier().getKind() != LengthModifier::AsChar) 9765 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) 9766 ExprTy = S.Context.CharTy; 9767 } 9768 9769 // Look through enums to their underlying type. 9770 bool IsEnum = false; 9771 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 9772 ExprTy = EnumTy->getDecl()->getIntegerType(); 9773 IsEnum = true; 9774 } 9775 9776 // %C in an Objective-C context prints a unichar, not a wchar_t. 9777 // If the argument is an integer of some kind, believe the %C and suggest 9778 // a cast instead of changing the conversion specifier. 9779 QualType IntendedTy = ExprTy; 9780 if (isObjCContext() && 9781 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 9782 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 9783 !ExprTy->isCharType()) { 9784 // 'unichar' is defined as a typedef of unsigned short, but we should 9785 // prefer using the typedef if it is visible. 9786 IntendedTy = S.Context.UnsignedShortTy; 9787 9788 // While we are here, check if the value is an IntegerLiteral that happens 9789 // to be within the valid range. 9790 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 9791 const llvm::APInt &V = IL->getValue(); 9792 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 9793 return true; 9794 } 9795 9796 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 9797 Sema::LookupOrdinaryName); 9798 if (S.LookupName(Result, S.getCurScope())) { 9799 NamedDecl *ND = Result.getFoundDecl(); 9800 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 9801 if (TD->getUnderlyingType() == IntendedTy) 9802 IntendedTy = S.Context.getTypedefType(TD); 9803 } 9804 } 9805 } 9806 9807 // Special-case some of Darwin's platform-independence types by suggesting 9808 // casts to primitive types that are known to be large enough. 9809 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 9810 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 9811 QualType CastTy; 9812 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 9813 if (!CastTy.isNull()) { 9814 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 9815 // (long in ASTContext). Only complain to pedants. 9816 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 9817 (AT.isSizeT() || AT.isPtrdiffT()) && 9818 AT.matchesType(S.Context, CastTy)) 9819 Match = ArgType::NoMatchPedantic; 9820 IntendedTy = CastTy; 9821 ShouldNotPrintDirectly = true; 9822 } 9823 } 9824 9825 // We may be able to offer a FixItHint if it is a supported type. 9826 PrintfSpecifier fixedFS = FS; 9827 bool Success = 9828 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 9829 9830 if (Success) { 9831 // Get the fix string from the fixed format specifier 9832 SmallString<16> buf; 9833 llvm::raw_svector_ostream os(buf); 9834 fixedFS.toString(os); 9835 9836 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 9837 9838 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) { 9839 unsigned Diag; 9840 switch (Match) { 9841 case ArgType::Match: llvm_unreachable("expected non-matching"); 9842 case ArgType::NoMatchPedantic: 9843 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 9844 break; 9845 case ArgType::NoMatchTypeConfusion: 9846 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 9847 break; 9848 case ArgType::NoMatch: 9849 Diag = diag::warn_format_conversion_argument_type_mismatch; 9850 break; 9851 } 9852 9853 // In this case, the specifier is wrong and should be changed to match 9854 // the argument. 9855 EmitFormatDiagnostic(S.PDiag(Diag) 9856 << AT.getRepresentativeTypeName(S.Context) 9857 << IntendedTy << IsEnum << E->getSourceRange(), 9858 E->getBeginLoc(), 9859 /*IsStringLocation*/ false, SpecRange, 9860 FixItHint::CreateReplacement(SpecRange, os.str())); 9861 } else { 9862 // The canonical type for formatting this value is different from the 9863 // actual type of the expression. (This occurs, for example, with Darwin's 9864 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 9865 // should be printed as 'long' for 64-bit compatibility.) 9866 // Rather than emitting a normal format/argument mismatch, we want to 9867 // add a cast to the recommended type (and correct the format string 9868 // if necessary). 9869 SmallString<16> CastBuf; 9870 llvm::raw_svector_ostream CastFix(CastBuf); 9871 CastFix << "("; 9872 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 9873 CastFix << ")"; 9874 9875 SmallVector<FixItHint,4> Hints; 9876 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly) 9877 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 9878 9879 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 9880 // If there's already a cast present, just replace it. 9881 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 9882 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 9883 9884 } else if (!requiresParensToAddCast(E)) { 9885 // If the expression has high enough precedence, 9886 // just write the C-style cast. 9887 Hints.push_back( 9888 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 9889 } else { 9890 // Otherwise, add parens around the expression as well as the cast. 9891 CastFix << "("; 9892 Hints.push_back( 9893 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 9894 9895 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc()); 9896 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 9897 } 9898 9899 if (ShouldNotPrintDirectly) { 9900 // The expression has a type that should not be printed directly. 9901 // We extract the name from the typedef because we don't want to show 9902 // the underlying type in the diagnostic. 9903 StringRef Name; 9904 if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy)) 9905 Name = TypedefTy->getDecl()->getName(); 9906 else 9907 Name = CastTyName; 9908 unsigned Diag = Match == ArgType::NoMatchPedantic 9909 ? diag::warn_format_argument_needs_cast_pedantic 9910 : diag::warn_format_argument_needs_cast; 9911 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 9912 << E->getSourceRange(), 9913 E->getBeginLoc(), /*IsStringLocation=*/false, 9914 SpecRange, Hints); 9915 } else { 9916 // In this case, the expression could be printed using a different 9917 // specifier, but we've decided that the specifier is probably correct 9918 // and we should cast instead. Just use the normal warning message. 9919 EmitFormatDiagnostic( 9920 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 9921 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 9922 << E->getSourceRange(), 9923 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 9924 } 9925 } 9926 } else { 9927 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 9928 SpecifierLen); 9929 // Since the warning for passing non-POD types to variadic functions 9930 // was deferred until now, we emit a warning for non-POD 9931 // arguments here. 9932 switch (S.isValidVarArgType(ExprTy)) { 9933 case Sema::VAK_Valid: 9934 case Sema::VAK_ValidInCXX11: { 9935 unsigned Diag; 9936 switch (Match) { 9937 case ArgType::Match: llvm_unreachable("expected non-matching"); 9938 case ArgType::NoMatchPedantic: 9939 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 9940 break; 9941 case ArgType::NoMatchTypeConfusion: 9942 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 9943 break; 9944 case ArgType::NoMatch: 9945 Diag = diag::warn_format_conversion_argument_type_mismatch; 9946 break; 9947 } 9948 9949 EmitFormatDiagnostic( 9950 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 9951 << IsEnum << CSR << E->getSourceRange(), 9952 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 9953 break; 9954 } 9955 case Sema::VAK_Undefined: 9956 case Sema::VAK_MSVCUndefined: 9957 EmitFormatDiagnostic(S.PDiag(diag::warn_non_pod_vararg_with_format_string) 9958 << S.getLangOpts().CPlusPlus11 << ExprTy 9959 << CallType 9960 << AT.getRepresentativeTypeName(S.Context) << CSR 9961 << E->getSourceRange(), 9962 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 9963 checkForCStrMembers(AT, E); 9964 break; 9965 9966 case Sema::VAK_Invalid: 9967 if (ExprTy->isObjCObjectType()) 9968 EmitFormatDiagnostic( 9969 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 9970 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 9971 << AT.getRepresentativeTypeName(S.Context) << CSR 9972 << E->getSourceRange(), 9973 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 9974 else 9975 // FIXME: If this is an initializer list, suggest removing the braces 9976 // or inserting a cast to the target type. 9977 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 9978 << isa<InitListExpr>(E) << ExprTy << CallType 9979 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 9980 break; 9981 } 9982 9983 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 9984 "format string specifier index out of range"); 9985 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 9986 } 9987 9988 return true; 9989 } 9990 9991 //===--- CHECK: Scanf format string checking ------------------------------===// 9992 9993 namespace { 9994 9995 class CheckScanfHandler : public CheckFormatHandler { 9996 public: 9997 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 9998 const Expr *origFormatExpr, Sema::FormatStringType type, 9999 unsigned firstDataArg, unsigned numDataArgs, 10000 const char *beg, bool hasVAListArg, 10001 ArrayRef<const Expr *> Args, unsigned formatIdx, 10002 bool inFunctionCall, Sema::VariadicCallType CallType, 10003 llvm::SmallBitVector &CheckedVarArgs, 10004 UncoveredArgHandler &UncoveredArg) 10005 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 10006 numDataArgs, beg, hasVAListArg, Args, formatIdx, 10007 inFunctionCall, CallType, CheckedVarArgs, 10008 UncoveredArg) {} 10009 10010 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 10011 const char *startSpecifier, 10012 unsigned specifierLen) override; 10013 10014 bool HandleInvalidScanfConversionSpecifier( 10015 const analyze_scanf::ScanfSpecifier &FS, 10016 const char *startSpecifier, 10017 unsigned specifierLen) override; 10018 10019 void HandleIncompleteScanList(const char *start, const char *end) override; 10020 }; 10021 10022 } // namespace 10023 10024 void CheckScanfHandler::HandleIncompleteScanList(const char *start, 10025 const char *end) { 10026 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 10027 getLocationOfByte(end), /*IsStringLocation*/true, 10028 getSpecifierRange(start, end - start)); 10029 } 10030 10031 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 10032 const analyze_scanf::ScanfSpecifier &FS, 10033 const char *startSpecifier, 10034 unsigned specifierLen) { 10035 const analyze_scanf::ScanfConversionSpecifier &CS = 10036 FS.getConversionSpecifier(); 10037 10038 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 10039 getLocationOfByte(CS.getStart()), 10040 startSpecifier, specifierLen, 10041 CS.getStart(), CS.getLength()); 10042 } 10043 10044 bool CheckScanfHandler::HandleScanfSpecifier( 10045 const analyze_scanf::ScanfSpecifier &FS, 10046 const char *startSpecifier, 10047 unsigned specifierLen) { 10048 using namespace analyze_scanf; 10049 using namespace analyze_format_string; 10050 10051 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 10052 10053 // Handle case where '%' and '*' don't consume an argument. These shouldn't 10054 // be used to decide if we are using positional arguments consistently. 10055 if (FS.consumesDataArgument()) { 10056 if (atFirstArg) { 10057 atFirstArg = false; 10058 usesPositionalArgs = FS.usesPositionalArg(); 10059 } 10060 else if (usesPositionalArgs != FS.usesPositionalArg()) { 10061 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 10062 startSpecifier, specifierLen); 10063 return false; 10064 } 10065 } 10066 10067 // Check if the field with is non-zero. 10068 const OptionalAmount &Amt = FS.getFieldWidth(); 10069 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 10070 if (Amt.getConstantAmount() == 0) { 10071 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 10072 Amt.getConstantLength()); 10073 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 10074 getLocationOfByte(Amt.getStart()), 10075 /*IsStringLocation*/true, R, 10076 FixItHint::CreateRemoval(R)); 10077 } 10078 } 10079 10080 if (!FS.consumesDataArgument()) { 10081 // FIXME: Technically specifying a precision or field width here 10082 // makes no sense. Worth issuing a warning at some point. 10083 return true; 10084 } 10085 10086 // Consume the argument. 10087 unsigned argIndex = FS.getArgIndex(); 10088 if (argIndex < NumDataArgs) { 10089 // The check to see if the argIndex is valid will come later. 10090 // We set the bit here because we may exit early from this 10091 // function if we encounter some other error. 10092 CoveredArgs.set(argIndex); 10093 } 10094 10095 // Check the length modifier is valid with the given conversion specifier. 10096 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 10097 S.getLangOpts())) 10098 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 10099 diag::warn_format_nonsensical_length); 10100 else if (!FS.hasStandardLengthModifier()) 10101 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 10102 else if (!FS.hasStandardLengthConversionCombination()) 10103 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 10104 diag::warn_format_non_standard_conversion_spec); 10105 10106 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 10107 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 10108 10109 // The remaining checks depend on the data arguments. 10110 if (HasVAListArg) 10111 return true; 10112 10113 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 10114 return false; 10115 10116 // Check that the argument type matches the format specifier. 10117 const Expr *Ex = getDataArg(argIndex); 10118 if (!Ex) 10119 return true; 10120 10121 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 10122 10123 if (!AT.isValid()) { 10124 return true; 10125 } 10126 10127 analyze_format_string::ArgType::MatchKind Match = 10128 AT.matchesType(S.Context, Ex->getType()); 10129 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 10130 if (Match == analyze_format_string::ArgType::Match) 10131 return true; 10132 10133 ScanfSpecifier fixedFS = FS; 10134 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 10135 S.getLangOpts(), S.Context); 10136 10137 unsigned Diag = 10138 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 10139 : diag::warn_format_conversion_argument_type_mismatch; 10140 10141 if (Success) { 10142 // Get the fix string from the fixed format specifier. 10143 SmallString<128> buf; 10144 llvm::raw_svector_ostream os(buf); 10145 fixedFS.toString(os); 10146 10147 EmitFormatDiagnostic( 10148 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 10149 << Ex->getType() << false << Ex->getSourceRange(), 10150 Ex->getBeginLoc(), 10151 /*IsStringLocation*/ false, 10152 getSpecifierRange(startSpecifier, specifierLen), 10153 FixItHint::CreateReplacement( 10154 getSpecifierRange(startSpecifier, specifierLen), os.str())); 10155 } else { 10156 EmitFormatDiagnostic(S.PDiag(Diag) 10157 << AT.getRepresentativeTypeName(S.Context) 10158 << Ex->getType() << false << Ex->getSourceRange(), 10159 Ex->getBeginLoc(), 10160 /*IsStringLocation*/ false, 10161 getSpecifierRange(startSpecifier, specifierLen)); 10162 } 10163 10164 return true; 10165 } 10166 10167 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 10168 const Expr *OrigFormatExpr, 10169 ArrayRef<const Expr *> Args, 10170 bool HasVAListArg, unsigned format_idx, 10171 unsigned firstDataArg, 10172 Sema::FormatStringType Type, 10173 bool inFunctionCall, 10174 Sema::VariadicCallType CallType, 10175 llvm::SmallBitVector &CheckedVarArgs, 10176 UncoveredArgHandler &UncoveredArg, 10177 bool IgnoreStringsWithoutSpecifiers) { 10178 // CHECK: is the format string a wide literal? 10179 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 10180 CheckFormatHandler::EmitFormatDiagnostic( 10181 S, inFunctionCall, Args[format_idx], 10182 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 10183 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 10184 return; 10185 } 10186 10187 // Str - The format string. NOTE: this is NOT null-terminated! 10188 StringRef StrRef = FExpr->getString(); 10189 const char *Str = StrRef.data(); 10190 // Account for cases where the string literal is truncated in a declaration. 10191 const ConstantArrayType *T = 10192 S.Context.getAsConstantArrayType(FExpr->getType()); 10193 assert(T && "String literal not of constant array type!"); 10194 size_t TypeSize = T->getSize().getZExtValue(); 10195 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 10196 const unsigned numDataArgs = Args.size() - firstDataArg; 10197 10198 if (IgnoreStringsWithoutSpecifiers && 10199 !analyze_format_string::parseFormatStringHasFormattingSpecifiers( 10200 Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 10201 return; 10202 10203 // Emit a warning if the string literal is truncated and does not contain an 10204 // embedded null character. 10205 if (TypeSize <= StrRef.size() && !StrRef.substr(0, TypeSize).contains('\0')) { 10206 CheckFormatHandler::EmitFormatDiagnostic( 10207 S, inFunctionCall, Args[format_idx], 10208 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 10209 FExpr->getBeginLoc(), 10210 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 10211 return; 10212 } 10213 10214 // CHECK: empty format string? 10215 if (StrLen == 0 && numDataArgs > 0) { 10216 CheckFormatHandler::EmitFormatDiagnostic( 10217 S, inFunctionCall, Args[format_idx], 10218 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 10219 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 10220 return; 10221 } 10222 10223 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 10224 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog || 10225 Type == Sema::FST_OSTrace) { 10226 CheckPrintfHandler H( 10227 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 10228 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, 10229 HasVAListArg, Args, format_idx, inFunctionCall, CallType, 10230 CheckedVarArgs, UncoveredArg); 10231 10232 if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen, 10233 S.getLangOpts(), 10234 S.Context.getTargetInfo(), 10235 Type == Sema::FST_FreeBSDKPrintf)) 10236 H.DoneProcessing(); 10237 } else if (Type == Sema::FST_Scanf) { 10238 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 10239 numDataArgs, Str, HasVAListArg, Args, format_idx, 10240 inFunctionCall, CallType, CheckedVarArgs, UncoveredArg); 10241 10242 if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen, 10243 S.getLangOpts(), 10244 S.Context.getTargetInfo())) 10245 H.DoneProcessing(); 10246 } // TODO: handle other formats 10247 } 10248 10249 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 10250 // Str - The format string. NOTE: this is NOT null-terminated! 10251 StringRef StrRef = FExpr->getString(); 10252 const char *Str = StrRef.data(); 10253 // Account for cases where the string literal is truncated in a declaration. 10254 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 10255 assert(T && "String literal not of constant array type!"); 10256 size_t TypeSize = T->getSize().getZExtValue(); 10257 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 10258 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 10259 getLangOpts(), 10260 Context.getTargetInfo()); 10261 } 10262 10263 //===--- CHECK: Warn on use of wrong absolute value function. -------------===// 10264 10265 // Returns the related absolute value function that is larger, of 0 if one 10266 // does not exist. 10267 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 10268 switch (AbsFunction) { 10269 default: 10270 return 0; 10271 10272 case Builtin::BI__builtin_abs: 10273 return Builtin::BI__builtin_labs; 10274 case Builtin::BI__builtin_labs: 10275 return Builtin::BI__builtin_llabs; 10276 case Builtin::BI__builtin_llabs: 10277 return 0; 10278 10279 case Builtin::BI__builtin_fabsf: 10280 return Builtin::BI__builtin_fabs; 10281 case Builtin::BI__builtin_fabs: 10282 return Builtin::BI__builtin_fabsl; 10283 case Builtin::BI__builtin_fabsl: 10284 return 0; 10285 10286 case Builtin::BI__builtin_cabsf: 10287 return Builtin::BI__builtin_cabs; 10288 case Builtin::BI__builtin_cabs: 10289 return Builtin::BI__builtin_cabsl; 10290 case Builtin::BI__builtin_cabsl: 10291 return 0; 10292 10293 case Builtin::BIabs: 10294 return Builtin::BIlabs; 10295 case Builtin::BIlabs: 10296 return Builtin::BIllabs; 10297 case Builtin::BIllabs: 10298 return 0; 10299 10300 case Builtin::BIfabsf: 10301 return Builtin::BIfabs; 10302 case Builtin::BIfabs: 10303 return Builtin::BIfabsl; 10304 case Builtin::BIfabsl: 10305 return 0; 10306 10307 case Builtin::BIcabsf: 10308 return Builtin::BIcabs; 10309 case Builtin::BIcabs: 10310 return Builtin::BIcabsl; 10311 case Builtin::BIcabsl: 10312 return 0; 10313 } 10314 } 10315 10316 // Returns the argument type of the absolute value function. 10317 static QualType getAbsoluteValueArgumentType(ASTContext &Context, 10318 unsigned AbsType) { 10319 if (AbsType == 0) 10320 return QualType(); 10321 10322 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 10323 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 10324 if (Error != ASTContext::GE_None) 10325 return QualType(); 10326 10327 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 10328 if (!FT) 10329 return QualType(); 10330 10331 if (FT->getNumParams() != 1) 10332 return QualType(); 10333 10334 return FT->getParamType(0); 10335 } 10336 10337 // Returns the best absolute value function, or zero, based on type and 10338 // current absolute value function. 10339 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 10340 unsigned AbsFunctionKind) { 10341 unsigned BestKind = 0; 10342 uint64_t ArgSize = Context.getTypeSize(ArgType); 10343 for (unsigned Kind = AbsFunctionKind; Kind != 0; 10344 Kind = getLargerAbsoluteValueFunction(Kind)) { 10345 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 10346 if (Context.getTypeSize(ParamType) >= ArgSize) { 10347 if (BestKind == 0) 10348 BestKind = Kind; 10349 else if (Context.hasSameType(ParamType, ArgType)) { 10350 BestKind = Kind; 10351 break; 10352 } 10353 } 10354 } 10355 return BestKind; 10356 } 10357 10358 enum AbsoluteValueKind { 10359 AVK_Integer, 10360 AVK_Floating, 10361 AVK_Complex 10362 }; 10363 10364 static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 10365 if (T->isIntegralOrEnumerationType()) 10366 return AVK_Integer; 10367 if (T->isRealFloatingType()) 10368 return AVK_Floating; 10369 if (T->isAnyComplexType()) 10370 return AVK_Complex; 10371 10372 llvm_unreachable("Type not integer, floating, or complex"); 10373 } 10374 10375 // Changes the absolute value function to a different type. Preserves whether 10376 // the function is a builtin. 10377 static unsigned changeAbsFunction(unsigned AbsKind, 10378 AbsoluteValueKind ValueKind) { 10379 switch (ValueKind) { 10380 case AVK_Integer: 10381 switch (AbsKind) { 10382 default: 10383 return 0; 10384 case Builtin::BI__builtin_fabsf: 10385 case Builtin::BI__builtin_fabs: 10386 case Builtin::BI__builtin_fabsl: 10387 case Builtin::BI__builtin_cabsf: 10388 case Builtin::BI__builtin_cabs: 10389 case Builtin::BI__builtin_cabsl: 10390 return Builtin::BI__builtin_abs; 10391 case Builtin::BIfabsf: 10392 case Builtin::BIfabs: 10393 case Builtin::BIfabsl: 10394 case Builtin::BIcabsf: 10395 case Builtin::BIcabs: 10396 case Builtin::BIcabsl: 10397 return Builtin::BIabs; 10398 } 10399 case AVK_Floating: 10400 switch (AbsKind) { 10401 default: 10402 return 0; 10403 case Builtin::BI__builtin_abs: 10404 case Builtin::BI__builtin_labs: 10405 case Builtin::BI__builtin_llabs: 10406 case Builtin::BI__builtin_cabsf: 10407 case Builtin::BI__builtin_cabs: 10408 case Builtin::BI__builtin_cabsl: 10409 return Builtin::BI__builtin_fabsf; 10410 case Builtin::BIabs: 10411 case Builtin::BIlabs: 10412 case Builtin::BIllabs: 10413 case Builtin::BIcabsf: 10414 case Builtin::BIcabs: 10415 case Builtin::BIcabsl: 10416 return Builtin::BIfabsf; 10417 } 10418 case AVK_Complex: 10419 switch (AbsKind) { 10420 default: 10421 return 0; 10422 case Builtin::BI__builtin_abs: 10423 case Builtin::BI__builtin_labs: 10424 case Builtin::BI__builtin_llabs: 10425 case Builtin::BI__builtin_fabsf: 10426 case Builtin::BI__builtin_fabs: 10427 case Builtin::BI__builtin_fabsl: 10428 return Builtin::BI__builtin_cabsf; 10429 case Builtin::BIabs: 10430 case Builtin::BIlabs: 10431 case Builtin::BIllabs: 10432 case Builtin::BIfabsf: 10433 case Builtin::BIfabs: 10434 case Builtin::BIfabsl: 10435 return Builtin::BIcabsf; 10436 } 10437 } 10438 llvm_unreachable("Unable to convert function"); 10439 } 10440 10441 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 10442 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 10443 if (!FnInfo) 10444 return 0; 10445 10446 switch (FDecl->getBuiltinID()) { 10447 default: 10448 return 0; 10449 case Builtin::BI__builtin_abs: 10450 case Builtin::BI__builtin_fabs: 10451 case Builtin::BI__builtin_fabsf: 10452 case Builtin::BI__builtin_fabsl: 10453 case Builtin::BI__builtin_labs: 10454 case Builtin::BI__builtin_llabs: 10455 case Builtin::BI__builtin_cabs: 10456 case Builtin::BI__builtin_cabsf: 10457 case Builtin::BI__builtin_cabsl: 10458 case Builtin::BIabs: 10459 case Builtin::BIlabs: 10460 case Builtin::BIllabs: 10461 case Builtin::BIfabs: 10462 case Builtin::BIfabsf: 10463 case Builtin::BIfabsl: 10464 case Builtin::BIcabs: 10465 case Builtin::BIcabsf: 10466 case Builtin::BIcabsl: 10467 return FDecl->getBuiltinID(); 10468 } 10469 llvm_unreachable("Unknown Builtin type"); 10470 } 10471 10472 // If the replacement is valid, emit a note with replacement function. 10473 // Additionally, suggest including the proper header if not already included. 10474 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 10475 unsigned AbsKind, QualType ArgType) { 10476 bool EmitHeaderHint = true; 10477 const char *HeaderName = nullptr; 10478 const char *FunctionName = nullptr; 10479 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 10480 FunctionName = "std::abs"; 10481 if (ArgType->isIntegralOrEnumerationType()) { 10482 HeaderName = "cstdlib"; 10483 } else if (ArgType->isRealFloatingType()) { 10484 HeaderName = "cmath"; 10485 } else { 10486 llvm_unreachable("Invalid Type"); 10487 } 10488 10489 // Lookup all std::abs 10490 if (NamespaceDecl *Std = S.getStdNamespace()) { 10491 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 10492 R.suppressDiagnostics(); 10493 S.LookupQualifiedName(R, Std); 10494 10495 for (const auto *I : R) { 10496 const FunctionDecl *FDecl = nullptr; 10497 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 10498 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 10499 } else { 10500 FDecl = dyn_cast<FunctionDecl>(I); 10501 } 10502 if (!FDecl) 10503 continue; 10504 10505 // Found std::abs(), check that they are the right ones. 10506 if (FDecl->getNumParams() != 1) 10507 continue; 10508 10509 // Check that the parameter type can handle the argument. 10510 QualType ParamType = FDecl->getParamDecl(0)->getType(); 10511 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 10512 S.Context.getTypeSize(ArgType) <= 10513 S.Context.getTypeSize(ParamType)) { 10514 // Found a function, don't need the header hint. 10515 EmitHeaderHint = false; 10516 break; 10517 } 10518 } 10519 } 10520 } else { 10521 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 10522 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 10523 10524 if (HeaderName) { 10525 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 10526 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 10527 R.suppressDiagnostics(); 10528 S.LookupName(R, S.getCurScope()); 10529 10530 if (R.isSingleResult()) { 10531 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 10532 if (FD && FD->getBuiltinID() == AbsKind) { 10533 EmitHeaderHint = false; 10534 } else { 10535 return; 10536 } 10537 } else if (!R.empty()) { 10538 return; 10539 } 10540 } 10541 } 10542 10543 S.Diag(Loc, diag::note_replace_abs_function) 10544 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 10545 10546 if (!HeaderName) 10547 return; 10548 10549 if (!EmitHeaderHint) 10550 return; 10551 10552 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 10553 << FunctionName; 10554 } 10555 10556 template <std::size_t StrLen> 10557 static bool IsStdFunction(const FunctionDecl *FDecl, 10558 const char (&Str)[StrLen]) { 10559 if (!FDecl) 10560 return false; 10561 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 10562 return false; 10563 if (!FDecl->isInStdNamespace()) 10564 return false; 10565 10566 return true; 10567 } 10568 10569 // Warn when using the wrong abs() function. 10570 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 10571 const FunctionDecl *FDecl) { 10572 if (Call->getNumArgs() != 1) 10573 return; 10574 10575 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 10576 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 10577 if (AbsKind == 0 && !IsStdAbs) 10578 return; 10579 10580 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 10581 QualType ParamType = Call->getArg(0)->getType(); 10582 10583 // Unsigned types cannot be negative. Suggest removing the absolute value 10584 // function call. 10585 if (ArgType->isUnsignedIntegerType()) { 10586 const char *FunctionName = 10587 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 10588 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 10589 Diag(Call->getExprLoc(), diag::note_remove_abs) 10590 << FunctionName 10591 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 10592 return; 10593 } 10594 10595 // Taking the absolute value of a pointer is very suspicious, they probably 10596 // wanted to index into an array, dereference a pointer, call a function, etc. 10597 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 10598 unsigned DiagType = 0; 10599 if (ArgType->isFunctionType()) 10600 DiagType = 1; 10601 else if (ArgType->isArrayType()) 10602 DiagType = 2; 10603 10604 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 10605 return; 10606 } 10607 10608 // std::abs has overloads which prevent most of the absolute value problems 10609 // from occurring. 10610 if (IsStdAbs) 10611 return; 10612 10613 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 10614 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 10615 10616 // The argument and parameter are the same kind. Check if they are the right 10617 // size. 10618 if (ArgValueKind == ParamValueKind) { 10619 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 10620 return; 10621 10622 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 10623 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 10624 << FDecl << ArgType << ParamType; 10625 10626 if (NewAbsKind == 0) 10627 return; 10628 10629 emitReplacement(*this, Call->getExprLoc(), 10630 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 10631 return; 10632 } 10633 10634 // ArgValueKind != ParamValueKind 10635 // The wrong type of absolute value function was used. Attempt to find the 10636 // proper one. 10637 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 10638 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 10639 if (NewAbsKind == 0) 10640 return; 10641 10642 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 10643 << FDecl << ParamValueKind << ArgValueKind; 10644 10645 emitReplacement(*this, Call->getExprLoc(), 10646 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 10647 } 10648 10649 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 10650 void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 10651 const FunctionDecl *FDecl) { 10652 if (!Call || !FDecl) return; 10653 10654 // Ignore template specializations and macros. 10655 if (inTemplateInstantiation()) return; 10656 if (Call->getExprLoc().isMacroID()) return; 10657 10658 // Only care about the one template argument, two function parameter std::max 10659 if (Call->getNumArgs() != 2) return; 10660 if (!IsStdFunction(FDecl, "max")) return; 10661 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 10662 if (!ArgList) return; 10663 if (ArgList->size() != 1) return; 10664 10665 // Check that template type argument is unsigned integer. 10666 const auto& TA = ArgList->get(0); 10667 if (TA.getKind() != TemplateArgument::Type) return; 10668 QualType ArgType = TA.getAsType(); 10669 if (!ArgType->isUnsignedIntegerType()) return; 10670 10671 // See if either argument is a literal zero. 10672 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 10673 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 10674 if (!MTE) return false; 10675 const auto *Num = dyn_cast<IntegerLiteral>(MTE->getSubExpr()); 10676 if (!Num) return false; 10677 if (Num->getValue() != 0) return false; 10678 return true; 10679 }; 10680 10681 const Expr *FirstArg = Call->getArg(0); 10682 const Expr *SecondArg = Call->getArg(1); 10683 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 10684 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 10685 10686 // Only warn when exactly one argument is zero. 10687 if (IsFirstArgZero == IsSecondArgZero) return; 10688 10689 SourceRange FirstRange = FirstArg->getSourceRange(); 10690 SourceRange SecondRange = SecondArg->getSourceRange(); 10691 10692 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 10693 10694 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 10695 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 10696 10697 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 10698 SourceRange RemovalRange; 10699 if (IsFirstArgZero) { 10700 RemovalRange = SourceRange(FirstRange.getBegin(), 10701 SecondRange.getBegin().getLocWithOffset(-1)); 10702 } else { 10703 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 10704 SecondRange.getEnd()); 10705 } 10706 10707 Diag(Call->getExprLoc(), diag::note_remove_max_call) 10708 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 10709 << FixItHint::CreateRemoval(RemovalRange); 10710 } 10711 10712 //===--- CHECK: Standard memory functions ---------------------------------===// 10713 10714 /// Takes the expression passed to the size_t parameter of functions 10715 /// such as memcmp, strncat, etc and warns if it's a comparison. 10716 /// 10717 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 10718 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 10719 IdentifierInfo *FnName, 10720 SourceLocation FnLoc, 10721 SourceLocation RParenLoc) { 10722 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 10723 if (!Size) 10724 return false; 10725 10726 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 10727 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 10728 return false; 10729 10730 SourceRange SizeRange = Size->getSourceRange(); 10731 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 10732 << SizeRange << FnName; 10733 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 10734 << FnName 10735 << FixItHint::CreateInsertion( 10736 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 10737 << FixItHint::CreateRemoval(RParenLoc); 10738 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 10739 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 10740 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 10741 ")"); 10742 10743 return true; 10744 } 10745 10746 /// Determine whether the given type is or contains a dynamic class type 10747 /// (e.g., whether it has a vtable). 10748 static const CXXRecordDecl *getContainedDynamicClass(QualType T, 10749 bool &IsContained) { 10750 // Look through array types while ignoring qualifiers. 10751 const Type *Ty = T->getBaseElementTypeUnsafe(); 10752 IsContained = false; 10753 10754 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 10755 RD = RD ? RD->getDefinition() : nullptr; 10756 if (!RD || RD->isInvalidDecl()) 10757 return nullptr; 10758 10759 if (RD->isDynamicClass()) 10760 return RD; 10761 10762 // Check all the fields. If any bases were dynamic, the class is dynamic. 10763 // It's impossible for a class to transitively contain itself by value, so 10764 // infinite recursion is impossible. 10765 for (auto *FD : RD->fields()) { 10766 bool SubContained; 10767 if (const CXXRecordDecl *ContainedRD = 10768 getContainedDynamicClass(FD->getType(), SubContained)) { 10769 IsContained = true; 10770 return ContainedRD; 10771 } 10772 } 10773 10774 return nullptr; 10775 } 10776 10777 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 10778 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 10779 if (Unary->getKind() == UETT_SizeOf) 10780 return Unary; 10781 return nullptr; 10782 } 10783 10784 /// If E is a sizeof expression, returns its argument expression, 10785 /// otherwise returns NULL. 10786 static const Expr *getSizeOfExprArg(const Expr *E) { 10787 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 10788 if (!SizeOf->isArgumentType()) 10789 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 10790 return nullptr; 10791 } 10792 10793 /// If E is a sizeof expression, returns its argument type. 10794 static QualType getSizeOfArgType(const Expr *E) { 10795 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 10796 return SizeOf->getTypeOfArgument(); 10797 return QualType(); 10798 } 10799 10800 namespace { 10801 10802 struct SearchNonTrivialToInitializeField 10803 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 10804 using Super = 10805 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 10806 10807 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 10808 10809 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 10810 SourceLocation SL) { 10811 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 10812 asDerived().visitArray(PDIK, AT, SL); 10813 return; 10814 } 10815 10816 Super::visitWithKind(PDIK, FT, SL); 10817 } 10818 10819 void visitARCStrong(QualType FT, SourceLocation SL) { 10820 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 10821 } 10822 void visitARCWeak(QualType FT, SourceLocation SL) { 10823 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 10824 } 10825 void visitStruct(QualType FT, SourceLocation SL) { 10826 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 10827 visit(FD->getType(), FD->getLocation()); 10828 } 10829 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 10830 const ArrayType *AT, SourceLocation SL) { 10831 visit(getContext().getBaseElementType(AT), SL); 10832 } 10833 void visitTrivial(QualType FT, SourceLocation SL) {} 10834 10835 static void diag(QualType RT, const Expr *E, Sema &S) { 10836 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 10837 } 10838 10839 ASTContext &getContext() { return S.getASTContext(); } 10840 10841 const Expr *E; 10842 Sema &S; 10843 }; 10844 10845 struct SearchNonTrivialToCopyField 10846 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 10847 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 10848 10849 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 10850 10851 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 10852 SourceLocation SL) { 10853 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 10854 asDerived().visitArray(PCK, AT, SL); 10855 return; 10856 } 10857 10858 Super::visitWithKind(PCK, FT, SL); 10859 } 10860 10861 void visitARCStrong(QualType FT, SourceLocation SL) { 10862 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 10863 } 10864 void visitARCWeak(QualType FT, SourceLocation SL) { 10865 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 10866 } 10867 void visitStruct(QualType FT, SourceLocation SL) { 10868 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 10869 visit(FD->getType(), FD->getLocation()); 10870 } 10871 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 10872 SourceLocation SL) { 10873 visit(getContext().getBaseElementType(AT), SL); 10874 } 10875 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 10876 SourceLocation SL) {} 10877 void visitTrivial(QualType FT, SourceLocation SL) {} 10878 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 10879 10880 static void diag(QualType RT, const Expr *E, Sema &S) { 10881 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 10882 } 10883 10884 ASTContext &getContext() { return S.getASTContext(); } 10885 10886 const Expr *E; 10887 Sema &S; 10888 }; 10889 10890 } 10891 10892 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 10893 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 10894 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 10895 10896 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 10897 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 10898 return false; 10899 10900 return doesExprLikelyComputeSize(BO->getLHS()) || 10901 doesExprLikelyComputeSize(BO->getRHS()); 10902 } 10903 10904 return getAsSizeOfExpr(SizeofExpr) != nullptr; 10905 } 10906 10907 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 10908 /// 10909 /// \code 10910 /// #define MACRO 0 10911 /// foo(MACRO); 10912 /// foo(0); 10913 /// \endcode 10914 /// 10915 /// This should return true for the first call to foo, but not for the second 10916 /// (regardless of whether foo is a macro or function). 10917 static bool isArgumentExpandedFromMacro(SourceManager &SM, 10918 SourceLocation CallLoc, 10919 SourceLocation ArgLoc) { 10920 if (!CallLoc.isMacroID()) 10921 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 10922 10923 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 10924 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 10925 } 10926 10927 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 10928 /// last two arguments transposed. 10929 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 10930 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 10931 return; 10932 10933 const Expr *SizeArg = 10934 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 10935 10936 auto isLiteralZero = [](const Expr *E) { 10937 return isa<IntegerLiteral>(E) && cast<IntegerLiteral>(E)->getValue() == 0; 10938 }; 10939 10940 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 10941 SourceLocation CallLoc = Call->getRParenLoc(); 10942 SourceManager &SM = S.getSourceManager(); 10943 if (isLiteralZero(SizeArg) && 10944 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 10945 10946 SourceLocation DiagLoc = SizeArg->getExprLoc(); 10947 10948 // Some platforms #define bzero to __builtin_memset. See if this is the 10949 // case, and if so, emit a better diagnostic. 10950 if (BId == Builtin::BIbzero || 10951 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 10952 CallLoc, SM, S.getLangOpts()) == "bzero")) { 10953 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 10954 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 10955 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 10956 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 10957 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 10958 } 10959 return; 10960 } 10961 10962 // If the second argument to a memset is a sizeof expression and the third 10963 // isn't, this is also likely an error. This should catch 10964 // 'memset(buf, sizeof(buf), 0xff)'. 10965 if (BId == Builtin::BImemset && 10966 doesExprLikelyComputeSize(Call->getArg(1)) && 10967 !doesExprLikelyComputeSize(Call->getArg(2))) { 10968 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 10969 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 10970 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 10971 return; 10972 } 10973 } 10974 10975 /// Check for dangerous or invalid arguments to memset(). 10976 /// 10977 /// This issues warnings on known problematic, dangerous or unspecified 10978 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 10979 /// function calls. 10980 /// 10981 /// \param Call The call expression to diagnose. 10982 void Sema::CheckMemaccessArguments(const CallExpr *Call, 10983 unsigned BId, 10984 IdentifierInfo *FnName) { 10985 assert(BId != 0); 10986 10987 // It is possible to have a non-standard definition of memset. Validate 10988 // we have enough arguments, and if not, abort further checking. 10989 unsigned ExpectedNumArgs = 10990 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 10991 if (Call->getNumArgs() < ExpectedNumArgs) 10992 return; 10993 10994 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 10995 BId == Builtin::BIstrndup ? 1 : 2); 10996 unsigned LenArg = 10997 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 10998 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 10999 11000 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 11001 Call->getBeginLoc(), Call->getRParenLoc())) 11002 return; 11003 11004 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 11005 CheckMemaccessSize(*this, BId, Call); 11006 11007 // We have special checking when the length is a sizeof expression. 11008 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 11009 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 11010 llvm::FoldingSetNodeID SizeOfArgID; 11011 11012 // Although widely used, 'bzero' is not a standard function. Be more strict 11013 // with the argument types before allowing diagnostics and only allow the 11014 // form bzero(ptr, sizeof(...)). 11015 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 11016 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 11017 return; 11018 11019 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 11020 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 11021 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 11022 11023 QualType DestTy = Dest->getType(); 11024 QualType PointeeTy; 11025 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 11026 PointeeTy = DestPtrTy->getPointeeType(); 11027 11028 // Never warn about void type pointers. This can be used to suppress 11029 // false positives. 11030 if (PointeeTy->isVoidType()) 11031 continue; 11032 11033 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 11034 // actually comparing the expressions for equality. Because computing the 11035 // expression IDs can be expensive, we only do this if the diagnostic is 11036 // enabled. 11037 if (SizeOfArg && 11038 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 11039 SizeOfArg->getExprLoc())) { 11040 // We only compute IDs for expressions if the warning is enabled, and 11041 // cache the sizeof arg's ID. 11042 if (SizeOfArgID == llvm::FoldingSetNodeID()) 11043 SizeOfArg->Profile(SizeOfArgID, Context, true); 11044 llvm::FoldingSetNodeID DestID; 11045 Dest->Profile(DestID, Context, true); 11046 if (DestID == SizeOfArgID) { 11047 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 11048 // over sizeof(src) as well. 11049 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 11050 StringRef ReadableName = FnName->getName(); 11051 11052 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 11053 if (UnaryOp->getOpcode() == UO_AddrOf) 11054 ActionIdx = 1; // If its an address-of operator, just remove it. 11055 if (!PointeeTy->isIncompleteType() && 11056 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 11057 ActionIdx = 2; // If the pointee's size is sizeof(char), 11058 // suggest an explicit length. 11059 11060 // If the function is defined as a builtin macro, do not show macro 11061 // expansion. 11062 SourceLocation SL = SizeOfArg->getExprLoc(); 11063 SourceRange DSR = Dest->getSourceRange(); 11064 SourceRange SSR = SizeOfArg->getSourceRange(); 11065 SourceManager &SM = getSourceManager(); 11066 11067 if (SM.isMacroArgExpansion(SL)) { 11068 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 11069 SL = SM.getSpellingLoc(SL); 11070 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 11071 SM.getSpellingLoc(DSR.getEnd())); 11072 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 11073 SM.getSpellingLoc(SSR.getEnd())); 11074 } 11075 11076 DiagRuntimeBehavior(SL, SizeOfArg, 11077 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 11078 << ReadableName 11079 << PointeeTy 11080 << DestTy 11081 << DSR 11082 << SSR); 11083 DiagRuntimeBehavior(SL, SizeOfArg, 11084 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 11085 << ActionIdx 11086 << SSR); 11087 11088 break; 11089 } 11090 } 11091 11092 // Also check for cases where the sizeof argument is the exact same 11093 // type as the memory argument, and where it points to a user-defined 11094 // record type. 11095 if (SizeOfArgTy != QualType()) { 11096 if (PointeeTy->isRecordType() && 11097 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 11098 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 11099 PDiag(diag::warn_sizeof_pointer_type_memaccess) 11100 << FnName << SizeOfArgTy << ArgIdx 11101 << PointeeTy << Dest->getSourceRange() 11102 << LenExpr->getSourceRange()); 11103 break; 11104 } 11105 } 11106 } else if (DestTy->isArrayType()) { 11107 PointeeTy = DestTy; 11108 } 11109 11110 if (PointeeTy == QualType()) 11111 continue; 11112 11113 // Always complain about dynamic classes. 11114 bool IsContained; 11115 if (const CXXRecordDecl *ContainedRD = 11116 getContainedDynamicClass(PointeeTy, IsContained)) { 11117 11118 unsigned OperationType = 0; 11119 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp; 11120 // "overwritten" if we're warning about the destination for any call 11121 // but memcmp; otherwise a verb appropriate to the call. 11122 if (ArgIdx != 0 || IsCmp) { 11123 if (BId == Builtin::BImemcpy) 11124 OperationType = 1; 11125 else if(BId == Builtin::BImemmove) 11126 OperationType = 2; 11127 else if (IsCmp) 11128 OperationType = 3; 11129 } 11130 11131 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 11132 PDiag(diag::warn_dyn_class_memaccess) 11133 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName 11134 << IsContained << ContainedRD << OperationType 11135 << Call->getCallee()->getSourceRange()); 11136 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 11137 BId != Builtin::BImemset) 11138 DiagRuntimeBehavior( 11139 Dest->getExprLoc(), Dest, 11140 PDiag(diag::warn_arc_object_memaccess) 11141 << ArgIdx << FnName << PointeeTy 11142 << Call->getCallee()->getSourceRange()); 11143 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 11144 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 11145 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 11146 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 11147 PDiag(diag::warn_cstruct_memaccess) 11148 << ArgIdx << FnName << PointeeTy << 0); 11149 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 11150 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 11151 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 11152 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 11153 PDiag(diag::warn_cstruct_memaccess) 11154 << ArgIdx << FnName << PointeeTy << 1); 11155 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 11156 } else { 11157 continue; 11158 } 11159 } else 11160 continue; 11161 11162 DiagRuntimeBehavior( 11163 Dest->getExprLoc(), Dest, 11164 PDiag(diag::note_bad_memaccess_silence) 11165 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 11166 break; 11167 } 11168 } 11169 11170 // A little helper routine: ignore addition and subtraction of integer literals. 11171 // This intentionally does not ignore all integer constant expressions because 11172 // we don't want to remove sizeof(). 11173 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 11174 Ex = Ex->IgnoreParenCasts(); 11175 11176 while (true) { 11177 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 11178 if (!BO || !BO->isAdditiveOp()) 11179 break; 11180 11181 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 11182 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 11183 11184 if (isa<IntegerLiteral>(RHS)) 11185 Ex = LHS; 11186 else if (isa<IntegerLiteral>(LHS)) 11187 Ex = RHS; 11188 else 11189 break; 11190 } 11191 11192 return Ex; 11193 } 11194 11195 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 11196 ASTContext &Context) { 11197 // Only handle constant-sized or VLAs, but not flexible members. 11198 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 11199 // Only issue the FIXIT for arrays of size > 1. 11200 if (CAT->getSize().getSExtValue() <= 1) 11201 return false; 11202 } else if (!Ty->isVariableArrayType()) { 11203 return false; 11204 } 11205 return true; 11206 } 11207 11208 // Warn if the user has made the 'size' argument to strlcpy or strlcat 11209 // be the size of the source, instead of the destination. 11210 void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 11211 IdentifierInfo *FnName) { 11212 11213 // Don't crash if the user has the wrong number of arguments 11214 unsigned NumArgs = Call->getNumArgs(); 11215 if ((NumArgs != 3) && (NumArgs != 4)) 11216 return; 11217 11218 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 11219 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 11220 const Expr *CompareWithSrc = nullptr; 11221 11222 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 11223 Call->getBeginLoc(), Call->getRParenLoc())) 11224 return; 11225 11226 // Look for 'strlcpy(dst, x, sizeof(x))' 11227 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 11228 CompareWithSrc = Ex; 11229 else { 11230 // Look for 'strlcpy(dst, x, strlen(x))' 11231 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 11232 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 11233 SizeCall->getNumArgs() == 1) 11234 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 11235 } 11236 } 11237 11238 if (!CompareWithSrc) 11239 return; 11240 11241 // Determine if the argument to sizeof/strlen is equal to the source 11242 // argument. In principle there's all kinds of things you could do 11243 // here, for instance creating an == expression and evaluating it with 11244 // EvaluateAsBooleanCondition, but this uses a more direct technique: 11245 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 11246 if (!SrcArgDRE) 11247 return; 11248 11249 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 11250 if (!CompareWithSrcDRE || 11251 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 11252 return; 11253 11254 const Expr *OriginalSizeArg = Call->getArg(2); 11255 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 11256 << OriginalSizeArg->getSourceRange() << FnName; 11257 11258 // Output a FIXIT hint if the destination is an array (rather than a 11259 // pointer to an array). This could be enhanced to handle some 11260 // pointers if we know the actual size, like if DstArg is 'array+2' 11261 // we could say 'sizeof(array)-2'. 11262 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 11263 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 11264 return; 11265 11266 SmallString<128> sizeString; 11267 llvm::raw_svector_ostream OS(sizeString); 11268 OS << "sizeof("; 11269 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11270 OS << ")"; 11271 11272 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 11273 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 11274 OS.str()); 11275 } 11276 11277 /// Check if two expressions refer to the same declaration. 11278 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 11279 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 11280 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 11281 return D1->getDecl() == D2->getDecl(); 11282 return false; 11283 } 11284 11285 static const Expr *getStrlenExprArg(const Expr *E) { 11286 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 11287 const FunctionDecl *FD = CE->getDirectCallee(); 11288 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 11289 return nullptr; 11290 return CE->getArg(0)->IgnoreParenCasts(); 11291 } 11292 return nullptr; 11293 } 11294 11295 // Warn on anti-patterns as the 'size' argument to strncat. 11296 // The correct size argument should look like following: 11297 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 11298 void Sema::CheckStrncatArguments(const CallExpr *CE, 11299 IdentifierInfo *FnName) { 11300 // Don't crash if the user has the wrong number of arguments. 11301 if (CE->getNumArgs() < 3) 11302 return; 11303 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 11304 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 11305 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 11306 11307 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 11308 CE->getRParenLoc())) 11309 return; 11310 11311 // Identify common expressions, which are wrongly used as the size argument 11312 // to strncat and may lead to buffer overflows. 11313 unsigned PatternType = 0; 11314 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 11315 // - sizeof(dst) 11316 if (referToTheSameDecl(SizeOfArg, DstArg)) 11317 PatternType = 1; 11318 // - sizeof(src) 11319 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 11320 PatternType = 2; 11321 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 11322 if (BE->getOpcode() == BO_Sub) { 11323 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 11324 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 11325 // - sizeof(dst) - strlen(dst) 11326 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 11327 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 11328 PatternType = 1; 11329 // - sizeof(src) - (anything) 11330 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 11331 PatternType = 2; 11332 } 11333 } 11334 11335 if (PatternType == 0) 11336 return; 11337 11338 // Generate the diagnostic. 11339 SourceLocation SL = LenArg->getBeginLoc(); 11340 SourceRange SR = LenArg->getSourceRange(); 11341 SourceManager &SM = getSourceManager(); 11342 11343 // If the function is defined as a builtin macro, do not show macro expansion. 11344 if (SM.isMacroArgExpansion(SL)) { 11345 SL = SM.getSpellingLoc(SL); 11346 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 11347 SM.getSpellingLoc(SR.getEnd())); 11348 } 11349 11350 // Check if the destination is an array (rather than a pointer to an array). 11351 QualType DstTy = DstArg->getType(); 11352 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 11353 Context); 11354 if (!isKnownSizeArray) { 11355 if (PatternType == 1) 11356 Diag(SL, diag::warn_strncat_wrong_size) << SR; 11357 else 11358 Diag(SL, diag::warn_strncat_src_size) << SR; 11359 return; 11360 } 11361 11362 if (PatternType == 1) 11363 Diag(SL, diag::warn_strncat_large_size) << SR; 11364 else 11365 Diag(SL, diag::warn_strncat_src_size) << SR; 11366 11367 SmallString<128> sizeString; 11368 llvm::raw_svector_ostream OS(sizeString); 11369 OS << "sizeof("; 11370 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11371 OS << ") - "; 11372 OS << "strlen("; 11373 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11374 OS << ") - 1"; 11375 11376 Diag(SL, diag::note_strncat_wrong_size) 11377 << FixItHint::CreateReplacement(SR, OS.str()); 11378 } 11379 11380 namespace { 11381 void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName, 11382 const UnaryOperator *UnaryExpr, const Decl *D) { 11383 if (isa<FieldDecl, FunctionDecl, VarDecl>(D)) { 11384 S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object) 11385 << CalleeName << 0 /*object: */ << cast<NamedDecl>(D); 11386 return; 11387 } 11388 } 11389 11390 void CheckFreeArgumentsAddressof(Sema &S, const std::string &CalleeName, 11391 const UnaryOperator *UnaryExpr) { 11392 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(UnaryExpr->getSubExpr())) { 11393 const Decl *D = Lvalue->getDecl(); 11394 if (isa<DeclaratorDecl>(D)) 11395 if (!dyn_cast<DeclaratorDecl>(D)->getType()->isReferenceType()) 11396 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, D); 11397 } 11398 11399 if (const auto *Lvalue = dyn_cast<MemberExpr>(UnaryExpr->getSubExpr())) 11400 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, 11401 Lvalue->getMemberDecl()); 11402 } 11403 11404 void CheckFreeArgumentsPlus(Sema &S, const std::string &CalleeName, 11405 const UnaryOperator *UnaryExpr) { 11406 const auto *Lambda = dyn_cast<LambdaExpr>( 11407 UnaryExpr->getSubExpr()->IgnoreImplicitAsWritten()->IgnoreParens()); 11408 if (!Lambda) 11409 return; 11410 11411 S.Diag(Lambda->getBeginLoc(), diag::warn_free_nonheap_object) 11412 << CalleeName << 2 /*object: lambda expression*/; 11413 } 11414 11415 void CheckFreeArgumentsStackArray(Sema &S, const std::string &CalleeName, 11416 const DeclRefExpr *Lvalue) { 11417 const auto *Var = dyn_cast<VarDecl>(Lvalue->getDecl()); 11418 if (Var == nullptr) 11419 return; 11420 11421 S.Diag(Lvalue->getBeginLoc(), diag::warn_free_nonheap_object) 11422 << CalleeName << 0 /*object: */ << Var; 11423 } 11424 11425 void CheckFreeArgumentsCast(Sema &S, const std::string &CalleeName, 11426 const CastExpr *Cast) { 11427 SmallString<128> SizeString; 11428 llvm::raw_svector_ostream OS(SizeString); 11429 11430 clang::CastKind Kind = Cast->getCastKind(); 11431 if (Kind == clang::CK_BitCast && 11432 !Cast->getSubExpr()->getType()->isFunctionPointerType()) 11433 return; 11434 if (Kind == clang::CK_IntegralToPointer && 11435 !isa<IntegerLiteral>( 11436 Cast->getSubExpr()->IgnoreParenImpCasts()->IgnoreParens())) 11437 return; 11438 11439 switch (Cast->getCastKind()) { 11440 case clang::CK_BitCast: 11441 case clang::CK_IntegralToPointer: 11442 case clang::CK_FunctionToPointerDecay: 11443 OS << '\''; 11444 Cast->printPretty(OS, nullptr, S.getPrintingPolicy()); 11445 OS << '\''; 11446 break; 11447 default: 11448 return; 11449 } 11450 11451 S.Diag(Cast->getBeginLoc(), diag::warn_free_nonheap_object) 11452 << CalleeName << 0 /*object: */ << OS.str(); 11453 } 11454 } // namespace 11455 11456 /// Alerts the user that they are attempting to free a non-malloc'd object. 11457 void Sema::CheckFreeArguments(const CallExpr *E) { 11458 const std::string CalleeName = 11459 cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString(); 11460 11461 { // Prefer something that doesn't involve a cast to make things simpler. 11462 const Expr *Arg = E->getArg(0)->IgnoreParenCasts(); 11463 if (const auto *UnaryExpr = dyn_cast<UnaryOperator>(Arg)) 11464 switch (UnaryExpr->getOpcode()) { 11465 case UnaryOperator::Opcode::UO_AddrOf: 11466 return CheckFreeArgumentsAddressof(*this, CalleeName, UnaryExpr); 11467 case UnaryOperator::Opcode::UO_Plus: 11468 return CheckFreeArgumentsPlus(*this, CalleeName, UnaryExpr); 11469 default: 11470 break; 11471 } 11472 11473 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Arg)) 11474 if (Lvalue->getType()->isArrayType()) 11475 return CheckFreeArgumentsStackArray(*this, CalleeName, Lvalue); 11476 11477 if (const auto *Label = dyn_cast<AddrLabelExpr>(Arg)) { 11478 Diag(Label->getBeginLoc(), diag::warn_free_nonheap_object) 11479 << CalleeName << 0 /*object: */ << Label->getLabel()->getIdentifier(); 11480 return; 11481 } 11482 11483 if (isa<BlockExpr>(Arg)) { 11484 Diag(Arg->getBeginLoc(), diag::warn_free_nonheap_object) 11485 << CalleeName << 1 /*object: block*/; 11486 return; 11487 } 11488 } 11489 // Maybe the cast was important, check after the other cases. 11490 if (const auto *Cast = dyn_cast<CastExpr>(E->getArg(0))) 11491 return CheckFreeArgumentsCast(*this, CalleeName, Cast); 11492 } 11493 11494 void 11495 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 11496 SourceLocation ReturnLoc, 11497 bool isObjCMethod, 11498 const AttrVec *Attrs, 11499 const FunctionDecl *FD) { 11500 // Check if the return value is null but should not be. 11501 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 11502 (!isObjCMethod && isNonNullType(Context, lhsType))) && 11503 CheckNonNullExpr(*this, RetValExp)) 11504 Diag(ReturnLoc, diag::warn_null_ret) 11505 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 11506 11507 // C++11 [basic.stc.dynamic.allocation]p4: 11508 // If an allocation function declared with a non-throwing 11509 // exception-specification fails to allocate storage, it shall return 11510 // a null pointer. Any other allocation function that fails to allocate 11511 // storage shall indicate failure only by throwing an exception [...] 11512 if (FD) { 11513 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 11514 if (Op == OO_New || Op == OO_Array_New) { 11515 const FunctionProtoType *Proto 11516 = FD->getType()->castAs<FunctionProtoType>(); 11517 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 11518 CheckNonNullExpr(*this, RetValExp)) 11519 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 11520 << FD << getLangOpts().CPlusPlus11; 11521 } 11522 } 11523 11524 // PPC MMA non-pointer types are not allowed as return type. Checking the type 11525 // here prevent the user from using a PPC MMA type as trailing return type. 11526 if (Context.getTargetInfo().getTriple().isPPC64()) 11527 CheckPPCMMAType(RetValExp->getType(), ReturnLoc); 11528 } 11529 11530 /// Check for comparisons of floating-point values using == and !=. Issue a 11531 /// warning if the comparison is not likely to do what the programmer intended. 11532 void Sema::CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS, 11533 BinaryOperatorKind Opcode) { 11534 // Match and capture subexpressions such as "(float) X == 0.1". 11535 FloatingLiteral *FPLiteral; 11536 CastExpr *FPCast; 11537 auto getCastAndLiteral = [&FPLiteral, &FPCast](Expr *L, Expr *R) { 11538 FPLiteral = dyn_cast<FloatingLiteral>(L->IgnoreParens()); 11539 FPCast = dyn_cast<CastExpr>(R->IgnoreParens()); 11540 return FPLiteral && FPCast; 11541 }; 11542 11543 if (getCastAndLiteral(LHS, RHS) || getCastAndLiteral(RHS, LHS)) { 11544 auto *SourceTy = FPCast->getSubExpr()->getType()->getAs<BuiltinType>(); 11545 auto *TargetTy = FPLiteral->getType()->getAs<BuiltinType>(); 11546 if (SourceTy && TargetTy && SourceTy->isFloatingPoint() && 11547 TargetTy->isFloatingPoint()) { 11548 bool Lossy; 11549 llvm::APFloat TargetC = FPLiteral->getValue(); 11550 TargetC.convert(Context.getFloatTypeSemantics(QualType(SourceTy, 0)), 11551 llvm::APFloat::rmNearestTiesToEven, &Lossy); 11552 if (Lossy) { 11553 // If the literal cannot be represented in the source type, then a 11554 // check for == is always false and check for != is always true. 11555 Diag(Loc, diag::warn_float_compare_literal) 11556 << (Opcode == BO_EQ) << QualType(SourceTy, 0) 11557 << LHS->getSourceRange() << RHS->getSourceRange(); 11558 return; 11559 } 11560 } 11561 } 11562 11563 // Match a more general floating-point equality comparison (-Wfloat-equal). 11564 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 11565 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 11566 11567 // Special case: check for x == x (which is OK). 11568 // Do not emit warnings for such cases. 11569 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 11570 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 11571 if (DRL->getDecl() == DRR->getDecl()) 11572 return; 11573 11574 // Special case: check for comparisons against literals that can be exactly 11575 // represented by APFloat. In such cases, do not emit a warning. This 11576 // is a heuristic: often comparison against such literals are used to 11577 // detect if a value in a variable has not changed. This clearly can 11578 // lead to false negatives. 11579 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 11580 if (FLL->isExact()) 11581 return; 11582 } else 11583 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 11584 if (FLR->isExact()) 11585 return; 11586 11587 // Check for comparisons with builtin types. 11588 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 11589 if (CL->getBuiltinCallee()) 11590 return; 11591 11592 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 11593 if (CR->getBuiltinCallee()) 11594 return; 11595 11596 // Emit the diagnostic. 11597 Diag(Loc, diag::warn_floatingpoint_eq) 11598 << LHS->getSourceRange() << RHS->getSourceRange(); 11599 } 11600 11601 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 11602 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 11603 11604 namespace { 11605 11606 /// Structure recording the 'active' range of an integer-valued 11607 /// expression. 11608 struct IntRange { 11609 /// The number of bits active in the int. Note that this includes exactly one 11610 /// sign bit if !NonNegative. 11611 unsigned Width; 11612 11613 /// True if the int is known not to have negative values. If so, all leading 11614 /// bits before Width are known zero, otherwise they are known to be the 11615 /// same as the MSB within Width. 11616 bool NonNegative; 11617 11618 IntRange(unsigned Width, bool NonNegative) 11619 : Width(Width), NonNegative(NonNegative) {} 11620 11621 /// Number of bits excluding the sign bit. 11622 unsigned valueBits() const { 11623 return NonNegative ? Width : Width - 1; 11624 } 11625 11626 /// Returns the range of the bool type. 11627 static IntRange forBoolType() { 11628 return IntRange(1, true); 11629 } 11630 11631 /// Returns the range of an opaque value of the given integral type. 11632 static IntRange forValueOfType(ASTContext &C, QualType T) { 11633 return forValueOfCanonicalType(C, 11634 T->getCanonicalTypeInternal().getTypePtr()); 11635 } 11636 11637 /// Returns the range of an opaque value of a canonical integral type. 11638 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 11639 assert(T->isCanonicalUnqualified()); 11640 11641 if (const VectorType *VT = dyn_cast<VectorType>(T)) 11642 T = VT->getElementType().getTypePtr(); 11643 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 11644 T = CT->getElementType().getTypePtr(); 11645 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 11646 T = AT->getValueType().getTypePtr(); 11647 11648 if (!C.getLangOpts().CPlusPlus) { 11649 // For enum types in C code, use the underlying datatype. 11650 if (const EnumType *ET = dyn_cast<EnumType>(T)) 11651 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 11652 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 11653 // For enum types in C++, use the known bit width of the enumerators. 11654 EnumDecl *Enum = ET->getDecl(); 11655 // In C++11, enums can have a fixed underlying type. Use this type to 11656 // compute the range. 11657 if (Enum->isFixed()) { 11658 return IntRange(C.getIntWidth(QualType(T, 0)), 11659 !ET->isSignedIntegerOrEnumerationType()); 11660 } 11661 11662 unsigned NumPositive = Enum->getNumPositiveBits(); 11663 unsigned NumNegative = Enum->getNumNegativeBits(); 11664 11665 if (NumNegative == 0) 11666 return IntRange(NumPositive, true/*NonNegative*/); 11667 else 11668 return IntRange(std::max(NumPositive + 1, NumNegative), 11669 false/*NonNegative*/); 11670 } 11671 11672 if (const auto *EIT = dyn_cast<BitIntType>(T)) 11673 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 11674 11675 const BuiltinType *BT = cast<BuiltinType>(T); 11676 assert(BT->isInteger()); 11677 11678 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 11679 } 11680 11681 /// Returns the "target" range of a canonical integral type, i.e. 11682 /// the range of values expressible in the type. 11683 /// 11684 /// This matches forValueOfCanonicalType except that enums have the 11685 /// full range of their type, not the range of their enumerators. 11686 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 11687 assert(T->isCanonicalUnqualified()); 11688 11689 if (const VectorType *VT = dyn_cast<VectorType>(T)) 11690 T = VT->getElementType().getTypePtr(); 11691 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 11692 T = CT->getElementType().getTypePtr(); 11693 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 11694 T = AT->getValueType().getTypePtr(); 11695 if (const EnumType *ET = dyn_cast<EnumType>(T)) 11696 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 11697 11698 if (const auto *EIT = dyn_cast<BitIntType>(T)) 11699 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 11700 11701 const BuiltinType *BT = cast<BuiltinType>(T); 11702 assert(BT->isInteger()); 11703 11704 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 11705 } 11706 11707 /// Returns the supremum of two ranges: i.e. their conservative merge. 11708 static IntRange join(IntRange L, IntRange R) { 11709 bool Unsigned = L.NonNegative && R.NonNegative; 11710 return IntRange(std::max(L.valueBits(), R.valueBits()) + !Unsigned, 11711 L.NonNegative && R.NonNegative); 11712 } 11713 11714 /// Return the range of a bitwise-AND of the two ranges. 11715 static IntRange bit_and(IntRange L, IntRange R) { 11716 unsigned Bits = std::max(L.Width, R.Width); 11717 bool NonNegative = false; 11718 if (L.NonNegative) { 11719 Bits = std::min(Bits, L.Width); 11720 NonNegative = true; 11721 } 11722 if (R.NonNegative) { 11723 Bits = std::min(Bits, R.Width); 11724 NonNegative = true; 11725 } 11726 return IntRange(Bits, NonNegative); 11727 } 11728 11729 /// Return the range of a sum of the two ranges. 11730 static IntRange sum(IntRange L, IntRange R) { 11731 bool Unsigned = L.NonNegative && R.NonNegative; 11732 return IntRange(std::max(L.valueBits(), R.valueBits()) + 1 + !Unsigned, 11733 Unsigned); 11734 } 11735 11736 /// Return the range of a difference of the two ranges. 11737 static IntRange difference(IntRange L, IntRange R) { 11738 // We need a 1-bit-wider range if: 11739 // 1) LHS can be negative: least value can be reduced. 11740 // 2) RHS can be negative: greatest value can be increased. 11741 bool CanWiden = !L.NonNegative || !R.NonNegative; 11742 bool Unsigned = L.NonNegative && R.Width == 0; 11743 return IntRange(std::max(L.valueBits(), R.valueBits()) + CanWiden + 11744 !Unsigned, 11745 Unsigned); 11746 } 11747 11748 /// Return the range of a product of the two ranges. 11749 static IntRange product(IntRange L, IntRange R) { 11750 // If both LHS and RHS can be negative, we can form 11751 // -2^L * -2^R = 2^(L + R) 11752 // which requires L + R + 1 value bits to represent. 11753 bool CanWiden = !L.NonNegative && !R.NonNegative; 11754 bool Unsigned = L.NonNegative && R.NonNegative; 11755 return IntRange(L.valueBits() + R.valueBits() + CanWiden + !Unsigned, 11756 Unsigned); 11757 } 11758 11759 /// Return the range of a remainder operation between the two ranges. 11760 static IntRange rem(IntRange L, IntRange R) { 11761 // The result of a remainder can't be larger than the result of 11762 // either side. The sign of the result is the sign of the LHS. 11763 bool Unsigned = L.NonNegative; 11764 return IntRange(std::min(L.valueBits(), R.valueBits()) + !Unsigned, 11765 Unsigned); 11766 } 11767 }; 11768 11769 } // namespace 11770 11771 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 11772 unsigned MaxWidth) { 11773 if (value.isSigned() && value.isNegative()) 11774 return IntRange(value.getMinSignedBits(), false); 11775 11776 if (value.getBitWidth() > MaxWidth) 11777 value = value.trunc(MaxWidth); 11778 11779 // isNonNegative() just checks the sign bit without considering 11780 // signedness. 11781 return IntRange(value.getActiveBits(), true); 11782 } 11783 11784 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 11785 unsigned MaxWidth) { 11786 if (result.isInt()) 11787 return GetValueRange(C, result.getInt(), MaxWidth); 11788 11789 if (result.isVector()) { 11790 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 11791 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 11792 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 11793 R = IntRange::join(R, El); 11794 } 11795 return R; 11796 } 11797 11798 if (result.isComplexInt()) { 11799 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 11800 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 11801 return IntRange::join(R, I); 11802 } 11803 11804 // This can happen with lossless casts to intptr_t of "based" lvalues. 11805 // Assume it might use arbitrary bits. 11806 // FIXME: The only reason we need to pass the type in here is to get 11807 // the sign right on this one case. It would be nice if APValue 11808 // preserved this. 11809 assert(result.isLValue() || result.isAddrLabelDiff()); 11810 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 11811 } 11812 11813 static QualType GetExprType(const Expr *E) { 11814 QualType Ty = E->getType(); 11815 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 11816 Ty = AtomicRHS->getValueType(); 11817 return Ty; 11818 } 11819 11820 /// Pseudo-evaluate the given integer expression, estimating the 11821 /// range of values it might take. 11822 /// 11823 /// \param MaxWidth The width to which the value will be truncated. 11824 /// \param Approximate If \c true, return a likely range for the result: in 11825 /// particular, assume that arithmetic on narrower types doesn't leave 11826 /// those types. If \c false, return a range including all possible 11827 /// result values. 11828 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth, 11829 bool InConstantContext, bool Approximate) { 11830 E = E->IgnoreParens(); 11831 11832 // Try a full evaluation first. 11833 Expr::EvalResult result; 11834 if (E->EvaluateAsRValue(result, C, InConstantContext)) 11835 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 11836 11837 // I think we only want to look through implicit casts here; if the 11838 // user has an explicit widening cast, we should treat the value as 11839 // being of the new, wider type. 11840 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 11841 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 11842 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext, 11843 Approximate); 11844 11845 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 11846 11847 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 11848 CE->getCastKind() == CK_BooleanToSignedIntegral; 11849 11850 // Assume that non-integer casts can span the full range of the type. 11851 if (!isIntegerCast) 11852 return OutputTypeRange; 11853 11854 IntRange SubRange = GetExprRange(C, CE->getSubExpr(), 11855 std::min(MaxWidth, OutputTypeRange.Width), 11856 InConstantContext, Approximate); 11857 11858 // Bail out if the subexpr's range is as wide as the cast type. 11859 if (SubRange.Width >= OutputTypeRange.Width) 11860 return OutputTypeRange; 11861 11862 // Otherwise, we take the smaller width, and we're non-negative if 11863 // either the output type or the subexpr is. 11864 return IntRange(SubRange.Width, 11865 SubRange.NonNegative || OutputTypeRange.NonNegative); 11866 } 11867 11868 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 11869 // If we can fold the condition, just take that operand. 11870 bool CondResult; 11871 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 11872 return GetExprRange(C, 11873 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(), 11874 MaxWidth, InConstantContext, Approximate); 11875 11876 // Otherwise, conservatively merge. 11877 // GetExprRange requires an integer expression, but a throw expression 11878 // results in a void type. 11879 Expr *E = CO->getTrueExpr(); 11880 IntRange L = E->getType()->isVoidType() 11881 ? IntRange{0, true} 11882 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 11883 E = CO->getFalseExpr(); 11884 IntRange R = E->getType()->isVoidType() 11885 ? IntRange{0, true} 11886 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 11887 return IntRange::join(L, R); 11888 } 11889 11890 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 11891 IntRange (*Combine)(IntRange, IntRange) = IntRange::join; 11892 11893 switch (BO->getOpcode()) { 11894 case BO_Cmp: 11895 llvm_unreachable("builtin <=> should have class type"); 11896 11897 // Boolean-valued operations are single-bit and positive. 11898 case BO_LAnd: 11899 case BO_LOr: 11900 case BO_LT: 11901 case BO_GT: 11902 case BO_LE: 11903 case BO_GE: 11904 case BO_EQ: 11905 case BO_NE: 11906 return IntRange::forBoolType(); 11907 11908 // The type of the assignments is the type of the LHS, so the RHS 11909 // is not necessarily the same type. 11910 case BO_MulAssign: 11911 case BO_DivAssign: 11912 case BO_RemAssign: 11913 case BO_AddAssign: 11914 case BO_SubAssign: 11915 case BO_XorAssign: 11916 case BO_OrAssign: 11917 // TODO: bitfields? 11918 return IntRange::forValueOfType(C, GetExprType(E)); 11919 11920 // Simple assignments just pass through the RHS, which will have 11921 // been coerced to the LHS type. 11922 case BO_Assign: 11923 // TODO: bitfields? 11924 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 11925 Approximate); 11926 11927 // Operations with opaque sources are black-listed. 11928 case BO_PtrMemD: 11929 case BO_PtrMemI: 11930 return IntRange::forValueOfType(C, GetExprType(E)); 11931 11932 // Bitwise-and uses the *infinum* of the two source ranges. 11933 case BO_And: 11934 case BO_AndAssign: 11935 Combine = IntRange::bit_and; 11936 break; 11937 11938 // Left shift gets black-listed based on a judgement call. 11939 case BO_Shl: 11940 // ...except that we want to treat '1 << (blah)' as logically 11941 // positive. It's an important idiom. 11942 if (IntegerLiteral *I 11943 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 11944 if (I->getValue() == 1) { 11945 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 11946 return IntRange(R.Width, /*NonNegative*/ true); 11947 } 11948 } 11949 LLVM_FALLTHROUGH; 11950 11951 case BO_ShlAssign: 11952 return IntRange::forValueOfType(C, GetExprType(E)); 11953 11954 // Right shift by a constant can narrow its left argument. 11955 case BO_Shr: 11956 case BO_ShrAssign: { 11957 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext, 11958 Approximate); 11959 11960 // If the shift amount is a positive constant, drop the width by 11961 // that much. 11962 if (Optional<llvm::APSInt> shift = 11963 BO->getRHS()->getIntegerConstantExpr(C)) { 11964 if (shift->isNonNegative()) { 11965 unsigned zext = shift->getZExtValue(); 11966 if (zext >= L.Width) 11967 L.Width = (L.NonNegative ? 0 : 1); 11968 else 11969 L.Width -= zext; 11970 } 11971 } 11972 11973 return L; 11974 } 11975 11976 // Comma acts as its right operand. 11977 case BO_Comma: 11978 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 11979 Approximate); 11980 11981 case BO_Add: 11982 if (!Approximate) 11983 Combine = IntRange::sum; 11984 break; 11985 11986 case BO_Sub: 11987 if (BO->getLHS()->getType()->isPointerType()) 11988 return IntRange::forValueOfType(C, GetExprType(E)); 11989 if (!Approximate) 11990 Combine = IntRange::difference; 11991 break; 11992 11993 case BO_Mul: 11994 if (!Approximate) 11995 Combine = IntRange::product; 11996 break; 11997 11998 // The width of a division result is mostly determined by the size 11999 // of the LHS. 12000 case BO_Div: { 12001 // Don't 'pre-truncate' the operands. 12002 unsigned opWidth = C.getIntWidth(GetExprType(E)); 12003 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, 12004 Approximate); 12005 12006 // If the divisor is constant, use that. 12007 if (Optional<llvm::APSInt> divisor = 12008 BO->getRHS()->getIntegerConstantExpr(C)) { 12009 unsigned log2 = divisor->logBase2(); // floor(log_2(divisor)) 12010 if (log2 >= L.Width) 12011 L.Width = (L.NonNegative ? 0 : 1); 12012 else 12013 L.Width = std::min(L.Width - log2, MaxWidth); 12014 return L; 12015 } 12016 12017 // Otherwise, just use the LHS's width. 12018 // FIXME: This is wrong if the LHS could be its minimal value and the RHS 12019 // could be -1. 12020 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, 12021 Approximate); 12022 return IntRange(L.Width, L.NonNegative && R.NonNegative); 12023 } 12024 12025 case BO_Rem: 12026 Combine = IntRange::rem; 12027 break; 12028 12029 // The default behavior is okay for these. 12030 case BO_Xor: 12031 case BO_Or: 12032 break; 12033 } 12034 12035 // Combine the two ranges, but limit the result to the type in which we 12036 // performed the computation. 12037 QualType T = GetExprType(E); 12038 unsigned opWidth = C.getIntWidth(T); 12039 IntRange L = 12040 GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, Approximate); 12041 IntRange R = 12042 GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, Approximate); 12043 IntRange C = Combine(L, R); 12044 C.NonNegative |= T->isUnsignedIntegerOrEnumerationType(); 12045 C.Width = std::min(C.Width, MaxWidth); 12046 return C; 12047 } 12048 12049 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 12050 switch (UO->getOpcode()) { 12051 // Boolean-valued operations are white-listed. 12052 case UO_LNot: 12053 return IntRange::forBoolType(); 12054 12055 // Operations with opaque sources are black-listed. 12056 case UO_Deref: 12057 case UO_AddrOf: // should be impossible 12058 return IntRange::forValueOfType(C, GetExprType(E)); 12059 12060 default: 12061 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext, 12062 Approximate); 12063 } 12064 } 12065 12066 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 12067 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext, 12068 Approximate); 12069 12070 if (const auto *BitField = E->getSourceBitField()) 12071 return IntRange(BitField->getBitWidthValue(C), 12072 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 12073 12074 return IntRange::forValueOfType(C, GetExprType(E)); 12075 } 12076 12077 static IntRange GetExprRange(ASTContext &C, const Expr *E, 12078 bool InConstantContext, bool Approximate) { 12079 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext, 12080 Approximate); 12081 } 12082 12083 /// Checks whether the given value, which currently has the given 12084 /// source semantics, has the same value when coerced through the 12085 /// target semantics. 12086 static bool IsSameFloatAfterCast(const llvm::APFloat &value, 12087 const llvm::fltSemantics &Src, 12088 const llvm::fltSemantics &Tgt) { 12089 llvm::APFloat truncated = value; 12090 12091 bool ignored; 12092 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 12093 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 12094 12095 return truncated.bitwiseIsEqual(value); 12096 } 12097 12098 /// Checks whether the given value, which currently has the given 12099 /// source semantics, has the same value when coerced through the 12100 /// target semantics. 12101 /// 12102 /// The value might be a vector of floats (or a complex number). 12103 static bool IsSameFloatAfterCast(const APValue &value, 12104 const llvm::fltSemantics &Src, 12105 const llvm::fltSemantics &Tgt) { 12106 if (value.isFloat()) 12107 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 12108 12109 if (value.isVector()) { 12110 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 12111 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 12112 return false; 12113 return true; 12114 } 12115 12116 assert(value.isComplexFloat()); 12117 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 12118 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 12119 } 12120 12121 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC, 12122 bool IsListInit = false); 12123 12124 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 12125 // Suppress cases where we are comparing against an enum constant. 12126 if (const DeclRefExpr *DR = 12127 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 12128 if (isa<EnumConstantDecl>(DR->getDecl())) 12129 return true; 12130 12131 // Suppress cases where the value is expanded from a macro, unless that macro 12132 // is how a language represents a boolean literal. This is the case in both C 12133 // and Objective-C. 12134 SourceLocation BeginLoc = E->getBeginLoc(); 12135 if (BeginLoc.isMacroID()) { 12136 StringRef MacroName = Lexer::getImmediateMacroName( 12137 BeginLoc, S.getSourceManager(), S.getLangOpts()); 12138 return MacroName != "YES" && MacroName != "NO" && 12139 MacroName != "true" && MacroName != "false"; 12140 } 12141 12142 return false; 12143 } 12144 12145 static bool isKnownToHaveUnsignedValue(Expr *E) { 12146 return E->getType()->isIntegerType() && 12147 (!E->getType()->isSignedIntegerType() || 12148 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 12149 } 12150 12151 namespace { 12152 /// The promoted range of values of a type. In general this has the 12153 /// following structure: 12154 /// 12155 /// |-----------| . . . |-----------| 12156 /// ^ ^ ^ ^ 12157 /// Min HoleMin HoleMax Max 12158 /// 12159 /// ... where there is only a hole if a signed type is promoted to unsigned 12160 /// (in which case Min and Max are the smallest and largest representable 12161 /// values). 12162 struct PromotedRange { 12163 // Min, or HoleMax if there is a hole. 12164 llvm::APSInt PromotedMin; 12165 // Max, or HoleMin if there is a hole. 12166 llvm::APSInt PromotedMax; 12167 12168 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 12169 if (R.Width == 0) 12170 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 12171 else if (R.Width >= BitWidth && !Unsigned) { 12172 // Promotion made the type *narrower*. This happens when promoting 12173 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 12174 // Treat all values of 'signed int' as being in range for now. 12175 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 12176 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 12177 } else { 12178 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 12179 .extOrTrunc(BitWidth); 12180 PromotedMin.setIsUnsigned(Unsigned); 12181 12182 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 12183 .extOrTrunc(BitWidth); 12184 PromotedMax.setIsUnsigned(Unsigned); 12185 } 12186 } 12187 12188 // Determine whether this range is contiguous (has no hole). 12189 bool isContiguous() const { return PromotedMin <= PromotedMax; } 12190 12191 // Where a constant value is within the range. 12192 enum ComparisonResult { 12193 LT = 0x1, 12194 LE = 0x2, 12195 GT = 0x4, 12196 GE = 0x8, 12197 EQ = 0x10, 12198 NE = 0x20, 12199 InRangeFlag = 0x40, 12200 12201 Less = LE | LT | NE, 12202 Min = LE | InRangeFlag, 12203 InRange = InRangeFlag, 12204 Max = GE | InRangeFlag, 12205 Greater = GE | GT | NE, 12206 12207 OnlyValue = LE | GE | EQ | InRangeFlag, 12208 InHole = NE 12209 }; 12210 12211 ComparisonResult compare(const llvm::APSInt &Value) const { 12212 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 12213 Value.isUnsigned() == PromotedMin.isUnsigned()); 12214 if (!isContiguous()) { 12215 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 12216 if (Value.isMinValue()) return Min; 12217 if (Value.isMaxValue()) return Max; 12218 if (Value >= PromotedMin) return InRange; 12219 if (Value <= PromotedMax) return InRange; 12220 return InHole; 12221 } 12222 12223 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 12224 case -1: return Less; 12225 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 12226 case 1: 12227 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 12228 case -1: return InRange; 12229 case 0: return Max; 12230 case 1: return Greater; 12231 } 12232 } 12233 12234 llvm_unreachable("impossible compare result"); 12235 } 12236 12237 static llvm::Optional<StringRef> 12238 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 12239 if (Op == BO_Cmp) { 12240 ComparisonResult LTFlag = LT, GTFlag = GT; 12241 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 12242 12243 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 12244 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 12245 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 12246 return llvm::None; 12247 } 12248 12249 ComparisonResult TrueFlag, FalseFlag; 12250 if (Op == BO_EQ) { 12251 TrueFlag = EQ; 12252 FalseFlag = NE; 12253 } else if (Op == BO_NE) { 12254 TrueFlag = NE; 12255 FalseFlag = EQ; 12256 } else { 12257 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 12258 TrueFlag = LT; 12259 FalseFlag = GE; 12260 } else { 12261 TrueFlag = GT; 12262 FalseFlag = LE; 12263 } 12264 if (Op == BO_GE || Op == BO_LE) 12265 std::swap(TrueFlag, FalseFlag); 12266 } 12267 if (R & TrueFlag) 12268 return StringRef("true"); 12269 if (R & FalseFlag) 12270 return StringRef("false"); 12271 return llvm::None; 12272 } 12273 }; 12274 } 12275 12276 static bool HasEnumType(Expr *E) { 12277 // Strip off implicit integral promotions. 12278 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 12279 if (ICE->getCastKind() != CK_IntegralCast && 12280 ICE->getCastKind() != CK_NoOp) 12281 break; 12282 E = ICE->getSubExpr(); 12283 } 12284 12285 return E->getType()->isEnumeralType(); 12286 } 12287 12288 static int classifyConstantValue(Expr *Constant) { 12289 // The values of this enumeration are used in the diagnostics 12290 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 12291 enum ConstantValueKind { 12292 Miscellaneous = 0, 12293 LiteralTrue, 12294 LiteralFalse 12295 }; 12296 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 12297 return BL->getValue() ? ConstantValueKind::LiteralTrue 12298 : ConstantValueKind::LiteralFalse; 12299 return ConstantValueKind::Miscellaneous; 12300 } 12301 12302 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 12303 Expr *Constant, Expr *Other, 12304 const llvm::APSInt &Value, 12305 bool RhsConstant) { 12306 if (S.inTemplateInstantiation()) 12307 return false; 12308 12309 Expr *OriginalOther = Other; 12310 12311 Constant = Constant->IgnoreParenImpCasts(); 12312 Other = Other->IgnoreParenImpCasts(); 12313 12314 // Suppress warnings on tautological comparisons between values of the same 12315 // enumeration type. There are only two ways we could warn on this: 12316 // - If the constant is outside the range of representable values of 12317 // the enumeration. In such a case, we should warn about the cast 12318 // to enumeration type, not about the comparison. 12319 // - If the constant is the maximum / minimum in-range value. For an 12320 // enumeratin type, such comparisons can be meaningful and useful. 12321 if (Constant->getType()->isEnumeralType() && 12322 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 12323 return false; 12324 12325 IntRange OtherValueRange = GetExprRange( 12326 S.Context, Other, S.isConstantEvaluated(), /*Approximate*/ false); 12327 12328 QualType OtherT = Other->getType(); 12329 if (const auto *AT = OtherT->getAs<AtomicType>()) 12330 OtherT = AT->getValueType(); 12331 IntRange OtherTypeRange = IntRange::forValueOfType(S.Context, OtherT); 12332 12333 // Special case for ObjC BOOL on targets where its a typedef for a signed char 12334 // (Namely, macOS). FIXME: IntRange::forValueOfType should do this. 12335 bool IsObjCSignedCharBool = S.getLangOpts().ObjC && 12336 S.NSAPIObj->isObjCBOOLType(OtherT) && 12337 OtherT->isSpecificBuiltinType(BuiltinType::SChar); 12338 12339 // Whether we're treating Other as being a bool because of the form of 12340 // expression despite it having another type (typically 'int' in C). 12341 bool OtherIsBooleanDespiteType = 12342 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 12343 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool) 12344 OtherTypeRange = OtherValueRange = IntRange::forBoolType(); 12345 12346 // Check if all values in the range of possible values of this expression 12347 // lead to the same comparison outcome. 12348 PromotedRange OtherPromotedValueRange(OtherValueRange, Value.getBitWidth(), 12349 Value.isUnsigned()); 12350 auto Cmp = OtherPromotedValueRange.compare(Value); 12351 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 12352 if (!Result) 12353 return false; 12354 12355 // Also consider the range determined by the type alone. This allows us to 12356 // classify the warning under the proper diagnostic group. 12357 bool TautologicalTypeCompare = false; 12358 { 12359 PromotedRange OtherPromotedTypeRange(OtherTypeRange, Value.getBitWidth(), 12360 Value.isUnsigned()); 12361 auto TypeCmp = OtherPromotedTypeRange.compare(Value); 12362 if (auto TypeResult = PromotedRange::constantValue(E->getOpcode(), TypeCmp, 12363 RhsConstant)) { 12364 TautologicalTypeCompare = true; 12365 Cmp = TypeCmp; 12366 Result = TypeResult; 12367 } 12368 } 12369 12370 // Don't warn if the non-constant operand actually always evaluates to the 12371 // same value. 12372 if (!TautologicalTypeCompare && OtherValueRange.Width == 0) 12373 return false; 12374 12375 // Suppress the diagnostic for an in-range comparison if the constant comes 12376 // from a macro or enumerator. We don't want to diagnose 12377 // 12378 // some_long_value <= INT_MAX 12379 // 12380 // when sizeof(int) == sizeof(long). 12381 bool InRange = Cmp & PromotedRange::InRangeFlag; 12382 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 12383 return false; 12384 12385 // A comparison of an unsigned bit-field against 0 is really a type problem, 12386 // even though at the type level the bit-field might promote to 'signed int'. 12387 if (Other->refersToBitField() && InRange && Value == 0 && 12388 Other->getType()->isUnsignedIntegerOrEnumerationType()) 12389 TautologicalTypeCompare = true; 12390 12391 // If this is a comparison to an enum constant, include that 12392 // constant in the diagnostic. 12393 const EnumConstantDecl *ED = nullptr; 12394 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 12395 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 12396 12397 // Should be enough for uint128 (39 decimal digits) 12398 SmallString<64> PrettySourceValue; 12399 llvm::raw_svector_ostream OS(PrettySourceValue); 12400 if (ED) { 12401 OS << '\'' << *ED << "' (" << Value << ")"; 12402 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>( 12403 Constant->IgnoreParenImpCasts())) { 12404 OS << (BL->getValue() ? "YES" : "NO"); 12405 } else { 12406 OS << Value; 12407 } 12408 12409 if (!TautologicalTypeCompare) { 12410 S.Diag(E->getOperatorLoc(), diag::warn_tautological_compare_value_range) 12411 << RhsConstant << OtherValueRange.Width << OtherValueRange.NonNegative 12412 << E->getOpcodeStr() << OS.str() << *Result 12413 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 12414 return true; 12415 } 12416 12417 if (IsObjCSignedCharBool) { 12418 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 12419 S.PDiag(diag::warn_tautological_compare_objc_bool) 12420 << OS.str() << *Result); 12421 return true; 12422 } 12423 12424 // FIXME: We use a somewhat different formatting for the in-range cases and 12425 // cases involving boolean values for historical reasons. We should pick a 12426 // consistent way of presenting these diagnostics. 12427 if (!InRange || Other->isKnownToHaveBooleanValue()) { 12428 12429 S.DiagRuntimeBehavior( 12430 E->getOperatorLoc(), E, 12431 S.PDiag(!InRange ? diag::warn_out_of_range_compare 12432 : diag::warn_tautological_bool_compare) 12433 << OS.str() << classifyConstantValue(Constant) << OtherT 12434 << OtherIsBooleanDespiteType << *Result 12435 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 12436 } else { 12437 bool IsCharTy = OtherT.withoutLocalFastQualifiers() == S.Context.CharTy; 12438 unsigned Diag = 12439 (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 12440 ? (HasEnumType(OriginalOther) 12441 ? diag::warn_unsigned_enum_always_true_comparison 12442 : IsCharTy ? diag::warn_unsigned_char_always_true_comparison 12443 : diag::warn_unsigned_always_true_comparison) 12444 : diag::warn_tautological_constant_compare; 12445 12446 S.Diag(E->getOperatorLoc(), Diag) 12447 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 12448 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 12449 } 12450 12451 return true; 12452 } 12453 12454 /// Analyze the operands of the given comparison. Implements the 12455 /// fallback case from AnalyzeComparison. 12456 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 12457 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12458 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12459 } 12460 12461 /// Implements -Wsign-compare. 12462 /// 12463 /// \param E the binary operator to check for warnings 12464 static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 12465 // The type the comparison is being performed in. 12466 QualType T = E->getLHS()->getType(); 12467 12468 // Only analyze comparison operators where both sides have been converted to 12469 // the same type. 12470 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 12471 return AnalyzeImpConvsInComparison(S, E); 12472 12473 // Don't analyze value-dependent comparisons directly. 12474 if (E->isValueDependent()) 12475 return AnalyzeImpConvsInComparison(S, E); 12476 12477 Expr *LHS = E->getLHS(); 12478 Expr *RHS = E->getRHS(); 12479 12480 if (T->isIntegralType(S.Context)) { 12481 Optional<llvm::APSInt> RHSValue = RHS->getIntegerConstantExpr(S.Context); 12482 Optional<llvm::APSInt> LHSValue = LHS->getIntegerConstantExpr(S.Context); 12483 12484 // We don't care about expressions whose result is a constant. 12485 if (RHSValue && LHSValue) 12486 return AnalyzeImpConvsInComparison(S, E); 12487 12488 // We only care about expressions where just one side is literal 12489 if ((bool)RHSValue ^ (bool)LHSValue) { 12490 // Is the constant on the RHS or LHS? 12491 const bool RhsConstant = (bool)RHSValue; 12492 Expr *Const = RhsConstant ? RHS : LHS; 12493 Expr *Other = RhsConstant ? LHS : RHS; 12494 const llvm::APSInt &Value = RhsConstant ? *RHSValue : *LHSValue; 12495 12496 // Check whether an integer constant comparison results in a value 12497 // of 'true' or 'false'. 12498 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 12499 return AnalyzeImpConvsInComparison(S, E); 12500 } 12501 } 12502 12503 if (!T->hasUnsignedIntegerRepresentation()) { 12504 // We don't do anything special if this isn't an unsigned integral 12505 // comparison: we're only interested in integral comparisons, and 12506 // signed comparisons only happen in cases we don't care to warn about. 12507 return AnalyzeImpConvsInComparison(S, E); 12508 } 12509 12510 LHS = LHS->IgnoreParenImpCasts(); 12511 RHS = RHS->IgnoreParenImpCasts(); 12512 12513 if (!S.getLangOpts().CPlusPlus) { 12514 // Avoid warning about comparison of integers with different signs when 12515 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 12516 // the type of `E`. 12517 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 12518 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 12519 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 12520 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 12521 } 12522 12523 // Check to see if one of the (unmodified) operands is of different 12524 // signedness. 12525 Expr *signedOperand, *unsignedOperand; 12526 if (LHS->getType()->hasSignedIntegerRepresentation()) { 12527 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 12528 "unsigned comparison between two signed integer expressions?"); 12529 signedOperand = LHS; 12530 unsignedOperand = RHS; 12531 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 12532 signedOperand = RHS; 12533 unsignedOperand = LHS; 12534 } else { 12535 return AnalyzeImpConvsInComparison(S, E); 12536 } 12537 12538 // Otherwise, calculate the effective range of the signed operand. 12539 IntRange signedRange = GetExprRange( 12540 S.Context, signedOperand, S.isConstantEvaluated(), /*Approximate*/ true); 12541 12542 // Go ahead and analyze implicit conversions in the operands. Note 12543 // that we skip the implicit conversions on both sides. 12544 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 12545 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 12546 12547 // If the signed range is non-negative, -Wsign-compare won't fire. 12548 if (signedRange.NonNegative) 12549 return; 12550 12551 // For (in)equality comparisons, if the unsigned operand is a 12552 // constant which cannot collide with a overflowed signed operand, 12553 // then reinterpreting the signed operand as unsigned will not 12554 // change the result of the comparison. 12555 if (E->isEqualityOp()) { 12556 unsigned comparisonWidth = S.Context.getIntWidth(T); 12557 IntRange unsignedRange = 12558 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated(), 12559 /*Approximate*/ true); 12560 12561 // We should never be unable to prove that the unsigned operand is 12562 // non-negative. 12563 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 12564 12565 if (unsignedRange.Width < comparisonWidth) 12566 return; 12567 } 12568 12569 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 12570 S.PDiag(diag::warn_mixed_sign_comparison) 12571 << LHS->getType() << RHS->getType() 12572 << LHS->getSourceRange() << RHS->getSourceRange()); 12573 } 12574 12575 /// Analyzes an attempt to assign the given value to a bitfield. 12576 /// 12577 /// Returns true if there was something fishy about the attempt. 12578 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 12579 SourceLocation InitLoc) { 12580 assert(Bitfield->isBitField()); 12581 if (Bitfield->isInvalidDecl()) 12582 return false; 12583 12584 // White-list bool bitfields. 12585 QualType BitfieldType = Bitfield->getType(); 12586 if (BitfieldType->isBooleanType()) 12587 return false; 12588 12589 if (BitfieldType->isEnumeralType()) { 12590 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl(); 12591 // If the underlying enum type was not explicitly specified as an unsigned 12592 // type and the enum contain only positive values, MSVC++ will cause an 12593 // inconsistency by storing this as a signed type. 12594 if (S.getLangOpts().CPlusPlus11 && 12595 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 12596 BitfieldEnumDecl->getNumPositiveBits() > 0 && 12597 BitfieldEnumDecl->getNumNegativeBits() == 0) { 12598 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 12599 << BitfieldEnumDecl; 12600 } 12601 } 12602 12603 if (Bitfield->getType()->isBooleanType()) 12604 return false; 12605 12606 // Ignore value- or type-dependent expressions. 12607 if (Bitfield->getBitWidth()->isValueDependent() || 12608 Bitfield->getBitWidth()->isTypeDependent() || 12609 Init->isValueDependent() || 12610 Init->isTypeDependent()) 12611 return false; 12612 12613 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 12614 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 12615 12616 Expr::EvalResult Result; 12617 if (!OriginalInit->EvaluateAsInt(Result, S.Context, 12618 Expr::SE_AllowSideEffects)) { 12619 // The RHS is not constant. If the RHS has an enum type, make sure the 12620 // bitfield is wide enough to hold all the values of the enum without 12621 // truncation. 12622 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 12623 EnumDecl *ED = EnumTy->getDecl(); 12624 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 12625 12626 // Enum types are implicitly signed on Windows, so check if there are any 12627 // negative enumerators to see if the enum was intended to be signed or 12628 // not. 12629 bool SignedEnum = ED->getNumNegativeBits() > 0; 12630 12631 // Check for surprising sign changes when assigning enum values to a 12632 // bitfield of different signedness. If the bitfield is signed and we 12633 // have exactly the right number of bits to store this unsigned enum, 12634 // suggest changing the enum to an unsigned type. This typically happens 12635 // on Windows where unfixed enums always use an underlying type of 'int'. 12636 unsigned DiagID = 0; 12637 if (SignedEnum && !SignedBitfield) { 12638 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 12639 } else if (SignedBitfield && !SignedEnum && 12640 ED->getNumPositiveBits() == FieldWidth) { 12641 DiagID = diag::warn_signed_bitfield_enum_conversion; 12642 } 12643 12644 if (DiagID) { 12645 S.Diag(InitLoc, DiagID) << Bitfield << ED; 12646 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 12647 SourceRange TypeRange = 12648 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 12649 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 12650 << SignedEnum << TypeRange; 12651 } 12652 12653 // Compute the required bitwidth. If the enum has negative values, we need 12654 // one more bit than the normal number of positive bits to represent the 12655 // sign bit. 12656 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 12657 ED->getNumNegativeBits()) 12658 : ED->getNumPositiveBits(); 12659 12660 // Check the bitwidth. 12661 if (BitsNeeded > FieldWidth) { 12662 Expr *WidthExpr = Bitfield->getBitWidth(); 12663 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 12664 << Bitfield << ED; 12665 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 12666 << BitsNeeded << ED << WidthExpr->getSourceRange(); 12667 } 12668 } 12669 12670 return false; 12671 } 12672 12673 llvm::APSInt Value = Result.Val.getInt(); 12674 12675 unsigned OriginalWidth = Value.getBitWidth(); 12676 12677 if (!Value.isSigned() || Value.isNegative()) 12678 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 12679 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 12680 OriginalWidth = Value.getMinSignedBits(); 12681 12682 if (OriginalWidth <= FieldWidth) 12683 return false; 12684 12685 // Compute the value which the bitfield will contain. 12686 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 12687 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 12688 12689 // Check whether the stored value is equal to the original value. 12690 TruncatedValue = TruncatedValue.extend(OriginalWidth); 12691 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 12692 return false; 12693 12694 // Special-case bitfields of width 1: booleans are naturally 0/1, and 12695 // therefore don't strictly fit into a signed bitfield of width 1. 12696 if (FieldWidth == 1 && Value == 1) 12697 return false; 12698 12699 std::string PrettyValue = toString(Value, 10); 12700 std::string PrettyTrunc = toString(TruncatedValue, 10); 12701 12702 S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant) 12703 << PrettyValue << PrettyTrunc << OriginalInit->getType() 12704 << Init->getSourceRange(); 12705 12706 return true; 12707 } 12708 12709 /// Analyze the given simple or compound assignment for warning-worthy 12710 /// operations. 12711 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 12712 // Just recurse on the LHS. 12713 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12714 12715 // We want to recurse on the RHS as normal unless we're assigning to 12716 // a bitfield. 12717 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 12718 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 12719 E->getOperatorLoc())) { 12720 // Recurse, ignoring any implicit conversions on the RHS. 12721 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 12722 E->getOperatorLoc()); 12723 } 12724 } 12725 12726 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12727 12728 // Diagnose implicitly sequentially-consistent atomic assignment. 12729 if (E->getLHS()->getType()->isAtomicType()) 12730 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 12731 } 12732 12733 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 12734 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 12735 SourceLocation CContext, unsigned diag, 12736 bool pruneControlFlow = false) { 12737 if (pruneControlFlow) { 12738 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12739 S.PDiag(diag) 12740 << SourceType << T << E->getSourceRange() 12741 << SourceRange(CContext)); 12742 return; 12743 } 12744 S.Diag(E->getExprLoc(), diag) 12745 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 12746 } 12747 12748 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 12749 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 12750 SourceLocation CContext, 12751 unsigned diag, bool pruneControlFlow = false) { 12752 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 12753 } 12754 12755 static bool isObjCSignedCharBool(Sema &S, QualType Ty) { 12756 return Ty->isSpecificBuiltinType(BuiltinType::SChar) && 12757 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty); 12758 } 12759 12760 static void adornObjCBoolConversionDiagWithTernaryFixit( 12761 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) { 12762 Expr *Ignored = SourceExpr->IgnoreImplicit(); 12763 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored)) 12764 Ignored = OVE->getSourceExpr(); 12765 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) || 12766 isa<BinaryOperator>(Ignored) || 12767 isa<CXXOperatorCallExpr>(Ignored); 12768 SourceLocation EndLoc = S.getLocForEndOfToken(SourceExpr->getEndLoc()); 12769 if (NeedsParens) 12770 Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(") 12771 << FixItHint::CreateInsertion(EndLoc, ")"); 12772 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO"); 12773 } 12774 12775 /// Diagnose an implicit cast from a floating point value to an integer value. 12776 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 12777 SourceLocation CContext) { 12778 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 12779 const bool PruneWarnings = S.inTemplateInstantiation(); 12780 12781 Expr *InnerE = E->IgnoreParenImpCasts(); 12782 // We also want to warn on, e.g., "int i = -1.234" 12783 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 12784 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 12785 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 12786 12787 const bool IsLiteral = 12788 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 12789 12790 llvm::APFloat Value(0.0); 12791 bool IsConstant = 12792 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 12793 if (!IsConstant) { 12794 if (isObjCSignedCharBool(S, T)) { 12795 return adornObjCBoolConversionDiagWithTernaryFixit( 12796 S, E, 12797 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool) 12798 << E->getType()); 12799 } 12800 12801 return DiagnoseImpCast(S, E, T, CContext, 12802 diag::warn_impcast_float_integer, PruneWarnings); 12803 } 12804 12805 bool isExact = false; 12806 12807 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 12808 T->hasUnsignedIntegerRepresentation()); 12809 llvm::APFloat::opStatus Result = Value.convertToInteger( 12810 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 12811 12812 // FIXME: Force the precision of the source value down so we don't print 12813 // digits which are usually useless (we don't really care here if we 12814 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 12815 // would automatically print the shortest representation, but it's a bit 12816 // tricky to implement. 12817 SmallString<16> PrettySourceValue; 12818 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 12819 precision = (precision * 59 + 195) / 196; 12820 Value.toString(PrettySourceValue, precision); 12821 12822 if (isObjCSignedCharBool(S, T) && IntegerValue != 0 && IntegerValue != 1) { 12823 return adornObjCBoolConversionDiagWithTernaryFixit( 12824 S, E, 12825 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool) 12826 << PrettySourceValue); 12827 } 12828 12829 if (Result == llvm::APFloat::opOK && isExact) { 12830 if (IsLiteral) return; 12831 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 12832 PruneWarnings); 12833 } 12834 12835 // Conversion of a floating-point value to a non-bool integer where the 12836 // integral part cannot be represented by the integer type is undefined. 12837 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 12838 return DiagnoseImpCast( 12839 S, E, T, CContext, 12840 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 12841 : diag::warn_impcast_float_to_integer_out_of_range, 12842 PruneWarnings); 12843 12844 unsigned DiagID = 0; 12845 if (IsLiteral) { 12846 // Warn on floating point literal to integer. 12847 DiagID = diag::warn_impcast_literal_float_to_integer; 12848 } else if (IntegerValue == 0) { 12849 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 12850 return DiagnoseImpCast(S, E, T, CContext, 12851 diag::warn_impcast_float_integer, PruneWarnings); 12852 } 12853 // Warn on non-zero to zero conversion. 12854 DiagID = diag::warn_impcast_float_to_integer_zero; 12855 } else { 12856 if (IntegerValue.isUnsigned()) { 12857 if (!IntegerValue.isMaxValue()) { 12858 return DiagnoseImpCast(S, E, T, CContext, 12859 diag::warn_impcast_float_integer, PruneWarnings); 12860 } 12861 } else { // IntegerValue.isSigned() 12862 if (!IntegerValue.isMaxSignedValue() && 12863 !IntegerValue.isMinSignedValue()) { 12864 return DiagnoseImpCast(S, E, T, CContext, 12865 diag::warn_impcast_float_integer, PruneWarnings); 12866 } 12867 } 12868 // Warn on evaluatable floating point expression to integer conversion. 12869 DiagID = diag::warn_impcast_float_to_integer; 12870 } 12871 12872 SmallString<16> PrettyTargetValue; 12873 if (IsBool) 12874 PrettyTargetValue = Value.isZero() ? "false" : "true"; 12875 else 12876 IntegerValue.toString(PrettyTargetValue); 12877 12878 if (PruneWarnings) { 12879 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12880 S.PDiag(DiagID) 12881 << E->getType() << T.getUnqualifiedType() 12882 << PrettySourceValue << PrettyTargetValue 12883 << E->getSourceRange() << SourceRange(CContext)); 12884 } else { 12885 S.Diag(E->getExprLoc(), DiagID) 12886 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 12887 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 12888 } 12889 } 12890 12891 /// Analyze the given compound assignment for the possible losing of 12892 /// floating-point precision. 12893 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 12894 assert(isa<CompoundAssignOperator>(E) && 12895 "Must be compound assignment operation"); 12896 // Recurse on the LHS and RHS in here 12897 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12898 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12899 12900 if (E->getLHS()->getType()->isAtomicType()) 12901 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 12902 12903 // Now check the outermost expression 12904 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 12905 const auto *RBT = cast<CompoundAssignOperator>(E) 12906 ->getComputationResultType() 12907 ->getAs<BuiltinType>(); 12908 12909 // The below checks assume source is floating point. 12910 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 12911 12912 // If source is floating point but target is an integer. 12913 if (ResultBT->isInteger()) 12914 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(), 12915 E->getExprLoc(), diag::warn_impcast_float_integer); 12916 12917 if (!ResultBT->isFloatingPoint()) 12918 return; 12919 12920 // If both source and target are floating points, warn about losing precision. 12921 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 12922 QualType(ResultBT, 0), QualType(RBT, 0)); 12923 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 12924 // warn about dropping FP rank. 12925 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 12926 diag::warn_impcast_float_result_precision); 12927 } 12928 12929 static std::string PrettyPrintInRange(const llvm::APSInt &Value, 12930 IntRange Range) { 12931 if (!Range.Width) return "0"; 12932 12933 llvm::APSInt ValueInRange = Value; 12934 ValueInRange.setIsSigned(!Range.NonNegative); 12935 ValueInRange = ValueInRange.trunc(Range.Width); 12936 return toString(ValueInRange, 10); 12937 } 12938 12939 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 12940 if (!isa<ImplicitCastExpr>(Ex)) 12941 return false; 12942 12943 Expr *InnerE = Ex->IgnoreParenImpCasts(); 12944 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 12945 const Type *Source = 12946 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 12947 if (Target->isDependentType()) 12948 return false; 12949 12950 const BuiltinType *FloatCandidateBT = 12951 dyn_cast<BuiltinType>(ToBool ? Source : Target); 12952 const Type *BoolCandidateType = ToBool ? Target : Source; 12953 12954 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 12955 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 12956 } 12957 12958 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 12959 SourceLocation CC) { 12960 unsigned NumArgs = TheCall->getNumArgs(); 12961 for (unsigned i = 0; i < NumArgs; ++i) { 12962 Expr *CurrA = TheCall->getArg(i); 12963 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 12964 continue; 12965 12966 bool IsSwapped = ((i > 0) && 12967 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 12968 IsSwapped |= ((i < (NumArgs - 1)) && 12969 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 12970 if (IsSwapped) { 12971 // Warn on this floating-point to bool conversion. 12972 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 12973 CurrA->getType(), CC, 12974 diag::warn_impcast_floating_point_to_bool); 12975 } 12976 } 12977 } 12978 12979 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 12980 SourceLocation CC) { 12981 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 12982 E->getExprLoc())) 12983 return; 12984 12985 // Don't warn on functions which have return type nullptr_t. 12986 if (isa<CallExpr>(E)) 12987 return; 12988 12989 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 12990 const Expr::NullPointerConstantKind NullKind = 12991 E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull); 12992 if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr) 12993 return; 12994 12995 // Return if target type is a safe conversion. 12996 if (T->isAnyPointerType() || T->isBlockPointerType() || 12997 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 12998 return; 12999 13000 SourceLocation Loc = E->getSourceRange().getBegin(); 13001 13002 // Venture through the macro stacks to get to the source of macro arguments. 13003 // The new location is a better location than the complete location that was 13004 // passed in. 13005 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 13006 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 13007 13008 // __null is usually wrapped in a macro. Go up a macro if that is the case. 13009 if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) { 13010 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 13011 Loc, S.SourceMgr, S.getLangOpts()); 13012 if (MacroName == "NULL") 13013 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 13014 } 13015 13016 // Only warn if the null and context location are in the same macro expansion. 13017 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 13018 return; 13019 13020 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 13021 << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC) 13022 << FixItHint::CreateReplacement(Loc, 13023 S.getFixItZeroLiteralForType(T, Loc)); 13024 } 13025 13026 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 13027 ObjCArrayLiteral *ArrayLiteral); 13028 13029 static void 13030 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 13031 ObjCDictionaryLiteral *DictionaryLiteral); 13032 13033 /// Check a single element within a collection literal against the 13034 /// target element type. 13035 static void checkObjCCollectionLiteralElement(Sema &S, 13036 QualType TargetElementType, 13037 Expr *Element, 13038 unsigned ElementKind) { 13039 // Skip a bitcast to 'id' or qualified 'id'. 13040 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 13041 if (ICE->getCastKind() == CK_BitCast && 13042 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 13043 Element = ICE->getSubExpr(); 13044 } 13045 13046 QualType ElementType = Element->getType(); 13047 ExprResult ElementResult(Element); 13048 if (ElementType->getAs<ObjCObjectPointerType>() && 13049 S.CheckSingleAssignmentConstraints(TargetElementType, 13050 ElementResult, 13051 false, false) 13052 != Sema::Compatible) { 13053 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 13054 << ElementType << ElementKind << TargetElementType 13055 << Element->getSourceRange(); 13056 } 13057 13058 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 13059 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 13060 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 13061 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 13062 } 13063 13064 /// Check an Objective-C array literal being converted to the given 13065 /// target type. 13066 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 13067 ObjCArrayLiteral *ArrayLiteral) { 13068 if (!S.NSArrayDecl) 13069 return; 13070 13071 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 13072 if (!TargetObjCPtr) 13073 return; 13074 13075 if (TargetObjCPtr->isUnspecialized() || 13076 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 13077 != S.NSArrayDecl->getCanonicalDecl()) 13078 return; 13079 13080 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 13081 if (TypeArgs.size() != 1) 13082 return; 13083 13084 QualType TargetElementType = TypeArgs[0]; 13085 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 13086 checkObjCCollectionLiteralElement(S, TargetElementType, 13087 ArrayLiteral->getElement(I), 13088 0); 13089 } 13090 } 13091 13092 /// Check an Objective-C dictionary literal being converted to the given 13093 /// target type. 13094 static void 13095 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 13096 ObjCDictionaryLiteral *DictionaryLiteral) { 13097 if (!S.NSDictionaryDecl) 13098 return; 13099 13100 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 13101 if (!TargetObjCPtr) 13102 return; 13103 13104 if (TargetObjCPtr->isUnspecialized() || 13105 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 13106 != S.NSDictionaryDecl->getCanonicalDecl()) 13107 return; 13108 13109 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 13110 if (TypeArgs.size() != 2) 13111 return; 13112 13113 QualType TargetKeyType = TypeArgs[0]; 13114 QualType TargetObjectType = TypeArgs[1]; 13115 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 13116 auto Element = DictionaryLiteral->getKeyValueElement(I); 13117 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 13118 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 13119 } 13120 } 13121 13122 // Helper function to filter out cases for constant width constant conversion. 13123 // Don't warn on char array initialization or for non-decimal values. 13124 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 13125 SourceLocation CC) { 13126 // If initializing from a constant, and the constant starts with '0', 13127 // then it is a binary, octal, or hexadecimal. Allow these constants 13128 // to fill all the bits, even if there is a sign change. 13129 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 13130 const char FirstLiteralCharacter = 13131 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 13132 if (FirstLiteralCharacter == '0') 13133 return false; 13134 } 13135 13136 // If the CC location points to a '{', and the type is char, then assume 13137 // assume it is an array initialization. 13138 if (CC.isValid() && T->isCharType()) { 13139 const char FirstContextCharacter = 13140 S.getSourceManager().getCharacterData(CC)[0]; 13141 if (FirstContextCharacter == '{') 13142 return false; 13143 } 13144 13145 return true; 13146 } 13147 13148 static const IntegerLiteral *getIntegerLiteral(Expr *E) { 13149 const auto *IL = dyn_cast<IntegerLiteral>(E); 13150 if (!IL) { 13151 if (auto *UO = dyn_cast<UnaryOperator>(E)) { 13152 if (UO->getOpcode() == UO_Minus) 13153 return dyn_cast<IntegerLiteral>(UO->getSubExpr()); 13154 } 13155 } 13156 13157 return IL; 13158 } 13159 13160 static void DiagnoseIntInBoolContext(Sema &S, Expr *E) { 13161 E = E->IgnoreParenImpCasts(); 13162 SourceLocation ExprLoc = E->getExprLoc(); 13163 13164 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 13165 BinaryOperator::Opcode Opc = BO->getOpcode(); 13166 Expr::EvalResult Result; 13167 // Do not diagnose unsigned shifts. 13168 if (Opc == BO_Shl) { 13169 const auto *LHS = getIntegerLiteral(BO->getLHS()); 13170 const auto *RHS = getIntegerLiteral(BO->getRHS()); 13171 if (LHS && LHS->getValue() == 0) 13172 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0; 13173 else if (!E->isValueDependent() && LHS && RHS && 13174 RHS->getValue().isNonNegative() && 13175 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) 13176 S.Diag(ExprLoc, diag::warn_left_shift_always) 13177 << (Result.Val.getInt() != 0); 13178 else if (E->getType()->isSignedIntegerType()) 13179 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E; 13180 } 13181 } 13182 13183 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 13184 const auto *LHS = getIntegerLiteral(CO->getTrueExpr()); 13185 const auto *RHS = getIntegerLiteral(CO->getFalseExpr()); 13186 if (!LHS || !RHS) 13187 return; 13188 if ((LHS->getValue() == 0 || LHS->getValue() == 1) && 13189 (RHS->getValue() == 0 || RHS->getValue() == 1)) 13190 // Do not diagnose common idioms. 13191 return; 13192 if (LHS->getValue() != 0 && RHS->getValue() != 0) 13193 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true); 13194 } 13195 } 13196 13197 static void CheckImplicitConversion(Sema &S, Expr *E, QualType T, 13198 SourceLocation CC, 13199 bool *ICContext = nullptr, 13200 bool IsListInit = false) { 13201 if (E->isTypeDependent() || E->isValueDependent()) return; 13202 13203 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 13204 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 13205 if (Source == Target) return; 13206 if (Target->isDependentType()) return; 13207 13208 // If the conversion context location is invalid don't complain. We also 13209 // don't want to emit a warning if the issue occurs from the expansion of 13210 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 13211 // delay this check as long as possible. Once we detect we are in that 13212 // scenario, we just return. 13213 if (CC.isInvalid()) 13214 return; 13215 13216 if (Source->isAtomicType()) 13217 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 13218 13219 // Diagnose implicit casts to bool. 13220 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 13221 if (isa<StringLiteral>(E)) 13222 // Warn on string literal to bool. Checks for string literals in logical 13223 // and expressions, for instance, assert(0 && "error here"), are 13224 // prevented by a check in AnalyzeImplicitConversions(). 13225 return DiagnoseImpCast(S, E, T, CC, 13226 diag::warn_impcast_string_literal_to_bool); 13227 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 13228 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 13229 // This covers the literal expressions that evaluate to Objective-C 13230 // objects. 13231 return DiagnoseImpCast(S, E, T, CC, 13232 diag::warn_impcast_objective_c_literal_to_bool); 13233 } 13234 if (Source->isPointerType() || Source->canDecayToPointerType()) { 13235 // Warn on pointer to bool conversion that is always true. 13236 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 13237 SourceRange(CC)); 13238 } 13239 } 13240 13241 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL 13242 // is a typedef for signed char (macOS), then that constant value has to be 1 13243 // or 0. 13244 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) { 13245 Expr::EvalResult Result; 13246 if (E->EvaluateAsInt(Result, S.getASTContext(), 13247 Expr::SE_AllowSideEffects)) { 13248 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) { 13249 adornObjCBoolConversionDiagWithTernaryFixit( 13250 S, E, 13251 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool) 13252 << toString(Result.Val.getInt(), 10)); 13253 } 13254 return; 13255 } 13256 } 13257 13258 // Check implicit casts from Objective-C collection literals to specialized 13259 // collection types, e.g., NSArray<NSString *> *. 13260 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 13261 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 13262 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 13263 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 13264 13265 // Strip vector types. 13266 if (isa<VectorType>(Source)) { 13267 if (Target->isVLSTBuiltinType() && 13268 (S.Context.areCompatibleSveTypes(QualType(Target, 0), 13269 QualType(Source, 0)) || 13270 S.Context.areLaxCompatibleSveTypes(QualType(Target, 0), 13271 QualType(Source, 0)))) 13272 return; 13273 13274 if (!isa<VectorType>(Target)) { 13275 if (S.SourceMgr.isInSystemMacro(CC)) 13276 return; 13277 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 13278 } 13279 13280 // If the vector cast is cast between two vectors of the same size, it is 13281 // a bitcast, not a conversion. 13282 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 13283 return; 13284 13285 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 13286 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 13287 } 13288 if (auto VecTy = dyn_cast<VectorType>(Target)) 13289 Target = VecTy->getElementType().getTypePtr(); 13290 13291 // Strip complex types. 13292 if (isa<ComplexType>(Source)) { 13293 if (!isa<ComplexType>(Target)) { 13294 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 13295 return; 13296 13297 return DiagnoseImpCast(S, E, T, CC, 13298 S.getLangOpts().CPlusPlus 13299 ? diag::err_impcast_complex_scalar 13300 : diag::warn_impcast_complex_scalar); 13301 } 13302 13303 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 13304 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 13305 } 13306 13307 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 13308 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 13309 13310 // If the source is floating point... 13311 if (SourceBT && SourceBT->isFloatingPoint()) { 13312 // ...and the target is floating point... 13313 if (TargetBT && TargetBT->isFloatingPoint()) { 13314 // ...then warn if we're dropping FP rank. 13315 13316 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 13317 QualType(SourceBT, 0), QualType(TargetBT, 0)); 13318 if (Order > 0) { 13319 // Don't warn about float constants that are precisely 13320 // representable in the target type. 13321 Expr::EvalResult result; 13322 if (E->EvaluateAsRValue(result, S.Context)) { 13323 // Value might be a float, a float vector, or a float complex. 13324 if (IsSameFloatAfterCast(result.Val, 13325 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 13326 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 13327 return; 13328 } 13329 13330 if (S.SourceMgr.isInSystemMacro(CC)) 13331 return; 13332 13333 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 13334 } 13335 // ... or possibly if we're increasing rank, too 13336 else if (Order < 0) { 13337 if (S.SourceMgr.isInSystemMacro(CC)) 13338 return; 13339 13340 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 13341 } 13342 return; 13343 } 13344 13345 // If the target is integral, always warn. 13346 if (TargetBT && TargetBT->isInteger()) { 13347 if (S.SourceMgr.isInSystemMacro(CC)) 13348 return; 13349 13350 DiagnoseFloatingImpCast(S, E, T, CC); 13351 } 13352 13353 // Detect the case where a call result is converted from floating-point to 13354 // to bool, and the final argument to the call is converted from bool, to 13355 // discover this typo: 13356 // 13357 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 13358 // 13359 // FIXME: This is an incredibly special case; is there some more general 13360 // way to detect this class of misplaced-parentheses bug? 13361 if (Target->isBooleanType() && isa<CallExpr>(E)) { 13362 // Check last argument of function call to see if it is an 13363 // implicit cast from a type matching the type the result 13364 // is being cast to. 13365 CallExpr *CEx = cast<CallExpr>(E); 13366 if (unsigned NumArgs = CEx->getNumArgs()) { 13367 Expr *LastA = CEx->getArg(NumArgs - 1); 13368 Expr *InnerE = LastA->IgnoreParenImpCasts(); 13369 if (isa<ImplicitCastExpr>(LastA) && 13370 InnerE->getType()->isBooleanType()) { 13371 // Warn on this floating-point to bool conversion 13372 DiagnoseImpCast(S, E, T, CC, 13373 diag::warn_impcast_floating_point_to_bool); 13374 } 13375 } 13376 } 13377 return; 13378 } 13379 13380 // Valid casts involving fixed point types should be accounted for here. 13381 if (Source->isFixedPointType()) { 13382 if (Target->isUnsaturatedFixedPointType()) { 13383 Expr::EvalResult Result; 13384 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects, 13385 S.isConstantEvaluated())) { 13386 llvm::APFixedPoint Value = Result.Val.getFixedPoint(); 13387 llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(T); 13388 llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(T); 13389 if (Value > MaxVal || Value < MinVal) { 13390 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13391 S.PDiag(diag::warn_impcast_fixed_point_range) 13392 << Value.toString() << T 13393 << E->getSourceRange() 13394 << clang::SourceRange(CC)); 13395 return; 13396 } 13397 } 13398 } else if (Target->isIntegerType()) { 13399 Expr::EvalResult Result; 13400 if (!S.isConstantEvaluated() && 13401 E->EvaluateAsFixedPoint(Result, S.Context, 13402 Expr::SE_AllowSideEffects)) { 13403 llvm::APFixedPoint FXResult = Result.Val.getFixedPoint(); 13404 13405 bool Overflowed; 13406 llvm::APSInt IntResult = FXResult.convertToInt( 13407 S.Context.getIntWidth(T), 13408 Target->isSignedIntegerOrEnumerationType(), &Overflowed); 13409 13410 if (Overflowed) { 13411 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13412 S.PDiag(diag::warn_impcast_fixed_point_range) 13413 << FXResult.toString() << T 13414 << E->getSourceRange() 13415 << clang::SourceRange(CC)); 13416 return; 13417 } 13418 } 13419 } 13420 } else if (Target->isUnsaturatedFixedPointType()) { 13421 if (Source->isIntegerType()) { 13422 Expr::EvalResult Result; 13423 if (!S.isConstantEvaluated() && 13424 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 13425 llvm::APSInt Value = Result.Val.getInt(); 13426 13427 bool Overflowed; 13428 llvm::APFixedPoint IntResult = llvm::APFixedPoint::getFromIntValue( 13429 Value, S.Context.getFixedPointSemantics(T), &Overflowed); 13430 13431 if (Overflowed) { 13432 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13433 S.PDiag(diag::warn_impcast_fixed_point_range) 13434 << toString(Value, /*Radix=*/10) << T 13435 << E->getSourceRange() 13436 << clang::SourceRange(CC)); 13437 return; 13438 } 13439 } 13440 } 13441 } 13442 13443 // If we are casting an integer type to a floating point type without 13444 // initialization-list syntax, we might lose accuracy if the floating 13445 // point type has a narrower significand than the integer type. 13446 if (SourceBT && TargetBT && SourceBT->isIntegerType() && 13447 TargetBT->isFloatingType() && !IsListInit) { 13448 // Determine the number of precision bits in the source integer type. 13449 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated(), 13450 /*Approximate*/ true); 13451 unsigned int SourcePrecision = SourceRange.Width; 13452 13453 // Determine the number of precision bits in the 13454 // target floating point type. 13455 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision( 13456 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 13457 13458 if (SourcePrecision > 0 && TargetPrecision > 0 && 13459 SourcePrecision > TargetPrecision) { 13460 13461 if (Optional<llvm::APSInt> SourceInt = 13462 E->getIntegerConstantExpr(S.Context)) { 13463 // If the source integer is a constant, convert it to the target 13464 // floating point type. Issue a warning if the value changes 13465 // during the whole conversion. 13466 llvm::APFloat TargetFloatValue( 13467 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 13468 llvm::APFloat::opStatus ConversionStatus = 13469 TargetFloatValue.convertFromAPInt( 13470 *SourceInt, SourceBT->isSignedInteger(), 13471 llvm::APFloat::rmNearestTiesToEven); 13472 13473 if (ConversionStatus != llvm::APFloat::opOK) { 13474 SmallString<32> PrettySourceValue; 13475 SourceInt->toString(PrettySourceValue, 10); 13476 SmallString<32> PrettyTargetValue; 13477 TargetFloatValue.toString(PrettyTargetValue, TargetPrecision); 13478 13479 S.DiagRuntimeBehavior( 13480 E->getExprLoc(), E, 13481 S.PDiag(diag::warn_impcast_integer_float_precision_constant) 13482 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13483 << E->getSourceRange() << clang::SourceRange(CC)); 13484 } 13485 } else { 13486 // Otherwise, the implicit conversion may lose precision. 13487 DiagnoseImpCast(S, E, T, CC, 13488 diag::warn_impcast_integer_float_precision); 13489 } 13490 } 13491 } 13492 13493 DiagnoseNullConversion(S, E, T, CC); 13494 13495 S.DiscardMisalignedMemberAddress(Target, E); 13496 13497 if (Target->isBooleanType()) 13498 DiagnoseIntInBoolContext(S, E); 13499 13500 if (!Source->isIntegerType() || !Target->isIntegerType()) 13501 return; 13502 13503 // TODO: remove this early return once the false positives for constant->bool 13504 // in templates, macros, etc, are reduced or removed. 13505 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 13506 return; 13507 13508 if (isObjCSignedCharBool(S, T) && !Source->isCharType() && 13509 !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) { 13510 return adornObjCBoolConversionDiagWithTernaryFixit( 13511 S, E, 13512 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool) 13513 << E->getType()); 13514 } 13515 13516 IntRange SourceTypeRange = 13517 IntRange::forTargetOfCanonicalType(S.Context, Source); 13518 IntRange LikelySourceRange = 13519 GetExprRange(S.Context, E, S.isConstantEvaluated(), /*Approximate*/ true); 13520 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 13521 13522 if (LikelySourceRange.Width > TargetRange.Width) { 13523 // If the source is a constant, use a default-on diagnostic. 13524 // TODO: this should happen for bitfield stores, too. 13525 Expr::EvalResult Result; 13526 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects, 13527 S.isConstantEvaluated())) { 13528 llvm::APSInt Value(32); 13529 Value = Result.Val.getInt(); 13530 13531 if (S.SourceMgr.isInSystemMacro(CC)) 13532 return; 13533 13534 std::string PrettySourceValue = toString(Value, 10); 13535 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 13536 13537 S.DiagRuntimeBehavior( 13538 E->getExprLoc(), E, 13539 S.PDiag(diag::warn_impcast_integer_precision_constant) 13540 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13541 << E->getSourceRange() << SourceRange(CC)); 13542 return; 13543 } 13544 13545 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 13546 if (S.SourceMgr.isInSystemMacro(CC)) 13547 return; 13548 13549 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 13550 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 13551 /* pruneControlFlow */ true); 13552 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 13553 } 13554 13555 if (TargetRange.Width > SourceTypeRange.Width) { 13556 if (auto *UO = dyn_cast<UnaryOperator>(E)) 13557 if (UO->getOpcode() == UO_Minus) 13558 if (Source->isUnsignedIntegerType()) { 13559 if (Target->isUnsignedIntegerType()) 13560 return DiagnoseImpCast(S, E, T, CC, 13561 diag::warn_impcast_high_order_zero_bits); 13562 if (Target->isSignedIntegerType()) 13563 return DiagnoseImpCast(S, E, T, CC, 13564 diag::warn_impcast_nonnegative_result); 13565 } 13566 } 13567 13568 if (TargetRange.Width == LikelySourceRange.Width && 13569 !TargetRange.NonNegative && LikelySourceRange.NonNegative && 13570 Source->isSignedIntegerType()) { 13571 // Warn when doing a signed to signed conversion, warn if the positive 13572 // source value is exactly the width of the target type, which will 13573 // cause a negative value to be stored. 13574 13575 Expr::EvalResult Result; 13576 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) && 13577 !S.SourceMgr.isInSystemMacro(CC)) { 13578 llvm::APSInt Value = Result.Val.getInt(); 13579 if (isSameWidthConstantConversion(S, E, T, CC)) { 13580 std::string PrettySourceValue = toString(Value, 10); 13581 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 13582 13583 S.DiagRuntimeBehavior( 13584 E->getExprLoc(), E, 13585 S.PDiag(diag::warn_impcast_integer_precision_constant) 13586 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13587 << E->getSourceRange() << SourceRange(CC)); 13588 return; 13589 } 13590 } 13591 13592 // Fall through for non-constants to give a sign conversion warning. 13593 } 13594 13595 if ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) || 13596 (!TargetRange.NonNegative && LikelySourceRange.NonNegative && 13597 LikelySourceRange.Width == TargetRange.Width)) { 13598 if (S.SourceMgr.isInSystemMacro(CC)) 13599 return; 13600 13601 unsigned DiagID = diag::warn_impcast_integer_sign; 13602 13603 // Traditionally, gcc has warned about this under -Wsign-compare. 13604 // We also want to warn about it in -Wconversion. 13605 // So if -Wconversion is off, use a completely identical diagnostic 13606 // in the sign-compare group. 13607 // The conditional-checking code will 13608 if (ICContext) { 13609 DiagID = diag::warn_impcast_integer_sign_conditional; 13610 *ICContext = true; 13611 } 13612 13613 return DiagnoseImpCast(S, E, T, CC, DiagID); 13614 } 13615 13616 // Diagnose conversions between different enumeration types. 13617 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 13618 // type, to give us better diagnostics. 13619 QualType SourceType = E->getType(); 13620 if (!S.getLangOpts().CPlusPlus) { 13621 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 13622 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 13623 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 13624 SourceType = S.Context.getTypeDeclType(Enum); 13625 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 13626 } 13627 } 13628 13629 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 13630 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 13631 if (SourceEnum->getDecl()->hasNameForLinkage() && 13632 TargetEnum->getDecl()->hasNameForLinkage() && 13633 SourceEnum != TargetEnum) { 13634 if (S.SourceMgr.isInSystemMacro(CC)) 13635 return; 13636 13637 return DiagnoseImpCast(S, E, SourceType, T, CC, 13638 diag::warn_impcast_different_enum_types); 13639 } 13640 } 13641 13642 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 13643 SourceLocation CC, QualType T); 13644 13645 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 13646 SourceLocation CC, bool &ICContext) { 13647 E = E->IgnoreParenImpCasts(); 13648 13649 if (auto *CO = dyn_cast<AbstractConditionalOperator>(E)) 13650 return CheckConditionalOperator(S, CO, CC, T); 13651 13652 AnalyzeImplicitConversions(S, E, CC); 13653 if (E->getType() != T) 13654 return CheckImplicitConversion(S, E, T, CC, &ICContext); 13655 } 13656 13657 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 13658 SourceLocation CC, QualType T) { 13659 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 13660 13661 Expr *TrueExpr = E->getTrueExpr(); 13662 if (auto *BCO = dyn_cast<BinaryConditionalOperator>(E)) 13663 TrueExpr = BCO->getCommon(); 13664 13665 bool Suspicious = false; 13666 CheckConditionalOperand(S, TrueExpr, T, CC, Suspicious); 13667 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 13668 13669 if (T->isBooleanType()) 13670 DiagnoseIntInBoolContext(S, E); 13671 13672 // If -Wconversion would have warned about either of the candidates 13673 // for a signedness conversion to the context type... 13674 if (!Suspicious) return; 13675 13676 // ...but it's currently ignored... 13677 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 13678 return; 13679 13680 // ...then check whether it would have warned about either of the 13681 // candidates for a signedness conversion to the condition type. 13682 if (E->getType() == T) return; 13683 13684 Suspicious = false; 13685 CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(), 13686 E->getType(), CC, &Suspicious); 13687 if (!Suspicious) 13688 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 13689 E->getType(), CC, &Suspicious); 13690 } 13691 13692 /// Check conversion of given expression to boolean. 13693 /// Input argument E is a logical expression. 13694 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 13695 if (S.getLangOpts().Bool) 13696 return; 13697 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 13698 return; 13699 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 13700 } 13701 13702 namespace { 13703 struct AnalyzeImplicitConversionsWorkItem { 13704 Expr *E; 13705 SourceLocation CC; 13706 bool IsListInit; 13707 }; 13708 } 13709 13710 /// Data recursive variant of AnalyzeImplicitConversions. Subexpressions 13711 /// that should be visited are added to WorkList. 13712 static void AnalyzeImplicitConversions( 13713 Sema &S, AnalyzeImplicitConversionsWorkItem Item, 13714 llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) { 13715 Expr *OrigE = Item.E; 13716 SourceLocation CC = Item.CC; 13717 13718 QualType T = OrigE->getType(); 13719 Expr *E = OrigE->IgnoreParenImpCasts(); 13720 13721 // Propagate whether we are in a C++ list initialization expression. 13722 // If so, we do not issue warnings for implicit int-float conversion 13723 // precision loss, because C++11 narrowing already handles it. 13724 bool IsListInit = Item.IsListInit || 13725 (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus); 13726 13727 if (E->isTypeDependent() || E->isValueDependent()) 13728 return; 13729 13730 Expr *SourceExpr = E; 13731 // Examine, but don't traverse into the source expression of an 13732 // OpaqueValueExpr, since it may have multiple parents and we don't want to 13733 // emit duplicate diagnostics. Its fine to examine the form or attempt to 13734 // evaluate it in the context of checking the specific conversion to T though. 13735 if (auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 13736 if (auto *Src = OVE->getSourceExpr()) 13737 SourceExpr = Src; 13738 13739 if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr)) 13740 if (UO->getOpcode() == UO_Not && 13741 UO->getSubExpr()->isKnownToHaveBooleanValue()) 13742 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool) 13743 << OrigE->getSourceRange() << T->isBooleanType() 13744 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!"); 13745 13746 if (const auto *BO = dyn_cast<BinaryOperator>(SourceExpr)) 13747 if ((BO->getOpcode() == BO_And || BO->getOpcode() == BO_Or) && 13748 BO->getLHS()->isKnownToHaveBooleanValue() && 13749 BO->getRHS()->isKnownToHaveBooleanValue() && 13750 BO->getLHS()->HasSideEffects(S.Context) && 13751 BO->getRHS()->HasSideEffects(S.Context)) { 13752 S.Diag(BO->getBeginLoc(), diag::warn_bitwise_instead_of_logical) 13753 << (BO->getOpcode() == BO_And ? "&" : "|") << OrigE->getSourceRange() 13754 << FixItHint::CreateReplacement( 13755 BO->getOperatorLoc(), 13756 (BO->getOpcode() == BO_And ? "&&" : "||")); 13757 S.Diag(BO->getBeginLoc(), diag::note_cast_operand_to_int); 13758 } 13759 13760 // For conditional operators, we analyze the arguments as if they 13761 // were being fed directly into the output. 13762 if (auto *CO = dyn_cast<AbstractConditionalOperator>(SourceExpr)) { 13763 CheckConditionalOperator(S, CO, CC, T); 13764 return; 13765 } 13766 13767 // Check implicit argument conversions for function calls. 13768 if (CallExpr *Call = dyn_cast<CallExpr>(SourceExpr)) 13769 CheckImplicitArgumentConversions(S, Call, CC); 13770 13771 // Go ahead and check any implicit conversions we might have skipped. 13772 // The non-canonical typecheck is just an optimization; 13773 // CheckImplicitConversion will filter out dead implicit conversions. 13774 if (SourceExpr->getType() != T) 13775 CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit); 13776 13777 // Now continue drilling into this expression. 13778 13779 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 13780 // The bound subexpressions in a PseudoObjectExpr are not reachable 13781 // as transitive children. 13782 // FIXME: Use a more uniform representation for this. 13783 for (auto *SE : POE->semantics()) 13784 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 13785 WorkList.push_back({OVE->getSourceExpr(), CC, IsListInit}); 13786 } 13787 13788 // Skip past explicit casts. 13789 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 13790 E = CE->getSubExpr()->IgnoreParenImpCasts(); 13791 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 13792 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 13793 WorkList.push_back({E, CC, IsListInit}); 13794 return; 13795 } 13796 13797 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 13798 // Do a somewhat different check with comparison operators. 13799 if (BO->isComparisonOp()) 13800 return AnalyzeComparison(S, BO); 13801 13802 // And with simple assignments. 13803 if (BO->getOpcode() == BO_Assign) 13804 return AnalyzeAssignment(S, BO); 13805 // And with compound assignments. 13806 if (BO->isAssignmentOp()) 13807 return AnalyzeCompoundAssignment(S, BO); 13808 } 13809 13810 // These break the otherwise-useful invariant below. Fortunately, 13811 // we don't really need to recurse into them, because any internal 13812 // expressions should have been analyzed already when they were 13813 // built into statements. 13814 if (isa<StmtExpr>(E)) return; 13815 13816 // Don't descend into unevaluated contexts. 13817 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 13818 13819 // Now just recurse over the expression's children. 13820 CC = E->getExprLoc(); 13821 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 13822 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 13823 for (Stmt *SubStmt : E->children()) { 13824 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 13825 if (!ChildExpr) 13826 continue; 13827 13828 if (IsLogicalAndOperator && 13829 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 13830 // Ignore checking string literals that are in logical and operators. 13831 // This is a common pattern for asserts. 13832 continue; 13833 WorkList.push_back({ChildExpr, CC, IsListInit}); 13834 } 13835 13836 if (BO && BO->isLogicalOp()) { 13837 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 13838 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 13839 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 13840 13841 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 13842 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 13843 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 13844 } 13845 13846 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 13847 if (U->getOpcode() == UO_LNot) { 13848 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 13849 } else if (U->getOpcode() != UO_AddrOf) { 13850 if (U->getSubExpr()->getType()->isAtomicType()) 13851 S.Diag(U->getSubExpr()->getBeginLoc(), 13852 diag::warn_atomic_implicit_seq_cst); 13853 } 13854 } 13855 } 13856 13857 /// AnalyzeImplicitConversions - Find and report any interesting 13858 /// implicit conversions in the given expression. There are a couple 13859 /// of competing diagnostics here, -Wconversion and -Wsign-compare. 13860 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC, 13861 bool IsListInit/*= false*/) { 13862 llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList; 13863 WorkList.push_back({OrigE, CC, IsListInit}); 13864 while (!WorkList.empty()) 13865 AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList); 13866 } 13867 13868 /// Diagnose integer type and any valid implicit conversion to it. 13869 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 13870 // Taking into account implicit conversions, 13871 // allow any integer. 13872 if (!E->getType()->isIntegerType()) { 13873 S.Diag(E->getBeginLoc(), 13874 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 13875 return true; 13876 } 13877 // Potentially emit standard warnings for implicit conversions if enabled 13878 // using -Wconversion. 13879 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 13880 return false; 13881 } 13882 13883 // Helper function for Sema::DiagnoseAlwaysNonNullPointer. 13884 // Returns true when emitting a warning about taking the address of a reference. 13885 static bool CheckForReference(Sema &SemaRef, const Expr *E, 13886 const PartialDiagnostic &PD) { 13887 E = E->IgnoreParenImpCasts(); 13888 13889 const FunctionDecl *FD = nullptr; 13890 13891 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 13892 if (!DRE->getDecl()->getType()->isReferenceType()) 13893 return false; 13894 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 13895 if (!M->getMemberDecl()->getType()->isReferenceType()) 13896 return false; 13897 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 13898 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 13899 return false; 13900 FD = Call->getDirectCallee(); 13901 } else { 13902 return false; 13903 } 13904 13905 SemaRef.Diag(E->getExprLoc(), PD); 13906 13907 // If possible, point to location of function. 13908 if (FD) { 13909 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 13910 } 13911 13912 return true; 13913 } 13914 13915 // Returns true if the SourceLocation is expanded from any macro body. 13916 // Returns false if the SourceLocation is invalid, is from not in a macro 13917 // expansion, or is from expanded from a top-level macro argument. 13918 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 13919 if (Loc.isInvalid()) 13920 return false; 13921 13922 while (Loc.isMacroID()) { 13923 if (SM.isMacroBodyExpansion(Loc)) 13924 return true; 13925 Loc = SM.getImmediateMacroCallerLoc(Loc); 13926 } 13927 13928 return false; 13929 } 13930 13931 /// Diagnose pointers that are always non-null. 13932 /// \param E the expression containing the pointer 13933 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 13934 /// compared to a null pointer 13935 /// \param IsEqual True when the comparison is equal to a null pointer 13936 /// \param Range Extra SourceRange to highlight in the diagnostic 13937 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 13938 Expr::NullPointerConstantKind NullKind, 13939 bool IsEqual, SourceRange Range) { 13940 if (!E) 13941 return; 13942 13943 // Don't warn inside macros. 13944 if (E->getExprLoc().isMacroID()) { 13945 const SourceManager &SM = getSourceManager(); 13946 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 13947 IsInAnyMacroBody(SM, Range.getBegin())) 13948 return; 13949 } 13950 E = E->IgnoreImpCasts(); 13951 13952 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 13953 13954 if (isa<CXXThisExpr>(E)) { 13955 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 13956 : diag::warn_this_bool_conversion; 13957 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 13958 return; 13959 } 13960 13961 bool IsAddressOf = false; 13962 13963 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 13964 if (UO->getOpcode() != UO_AddrOf) 13965 return; 13966 IsAddressOf = true; 13967 E = UO->getSubExpr(); 13968 } 13969 13970 if (IsAddressOf) { 13971 unsigned DiagID = IsCompare 13972 ? diag::warn_address_of_reference_null_compare 13973 : diag::warn_address_of_reference_bool_conversion; 13974 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 13975 << IsEqual; 13976 if (CheckForReference(*this, E, PD)) { 13977 return; 13978 } 13979 } 13980 13981 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 13982 bool IsParam = isa<NonNullAttr>(NonnullAttr); 13983 std::string Str; 13984 llvm::raw_string_ostream S(Str); 13985 E->printPretty(S, nullptr, getPrintingPolicy()); 13986 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 13987 : diag::warn_cast_nonnull_to_bool; 13988 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 13989 << E->getSourceRange() << Range << IsEqual; 13990 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 13991 }; 13992 13993 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 13994 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 13995 if (auto *Callee = Call->getDirectCallee()) { 13996 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 13997 ComplainAboutNonnullParamOrCall(A); 13998 return; 13999 } 14000 } 14001 } 14002 14003 // Expect to find a single Decl. Skip anything more complicated. 14004 ValueDecl *D = nullptr; 14005 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 14006 D = R->getDecl(); 14007 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 14008 D = M->getMemberDecl(); 14009 } 14010 14011 // Weak Decls can be null. 14012 if (!D || D->isWeak()) 14013 return; 14014 14015 // Check for parameter decl with nonnull attribute 14016 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 14017 if (getCurFunction() && 14018 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 14019 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 14020 ComplainAboutNonnullParamOrCall(A); 14021 return; 14022 } 14023 14024 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 14025 // Skip function template not specialized yet. 14026 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 14027 return; 14028 auto ParamIter = llvm::find(FD->parameters(), PV); 14029 assert(ParamIter != FD->param_end()); 14030 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 14031 14032 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 14033 if (!NonNull->args_size()) { 14034 ComplainAboutNonnullParamOrCall(NonNull); 14035 return; 14036 } 14037 14038 for (const ParamIdx &ArgNo : NonNull->args()) { 14039 if (ArgNo.getASTIndex() == ParamNo) { 14040 ComplainAboutNonnullParamOrCall(NonNull); 14041 return; 14042 } 14043 } 14044 } 14045 } 14046 } 14047 } 14048 14049 QualType T = D->getType(); 14050 const bool IsArray = T->isArrayType(); 14051 const bool IsFunction = T->isFunctionType(); 14052 14053 // Address of function is used to silence the function warning. 14054 if (IsAddressOf && IsFunction) { 14055 return; 14056 } 14057 14058 // Found nothing. 14059 if (!IsAddressOf && !IsFunction && !IsArray) 14060 return; 14061 14062 // Pretty print the expression for the diagnostic. 14063 std::string Str; 14064 llvm::raw_string_ostream S(Str); 14065 E->printPretty(S, nullptr, getPrintingPolicy()); 14066 14067 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 14068 : diag::warn_impcast_pointer_to_bool; 14069 enum { 14070 AddressOf, 14071 FunctionPointer, 14072 ArrayPointer 14073 } DiagType; 14074 if (IsAddressOf) 14075 DiagType = AddressOf; 14076 else if (IsFunction) 14077 DiagType = FunctionPointer; 14078 else if (IsArray) 14079 DiagType = ArrayPointer; 14080 else 14081 llvm_unreachable("Could not determine diagnostic."); 14082 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 14083 << Range << IsEqual; 14084 14085 if (!IsFunction) 14086 return; 14087 14088 // Suggest '&' to silence the function warning. 14089 Diag(E->getExprLoc(), diag::note_function_warning_silence) 14090 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 14091 14092 // Check to see if '()' fixit should be emitted. 14093 QualType ReturnType; 14094 UnresolvedSet<4> NonTemplateOverloads; 14095 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 14096 if (ReturnType.isNull()) 14097 return; 14098 14099 if (IsCompare) { 14100 // There are two cases here. If there is null constant, the only suggest 14101 // for a pointer return type. If the null is 0, then suggest if the return 14102 // type is a pointer or an integer type. 14103 if (!ReturnType->isPointerType()) { 14104 if (NullKind == Expr::NPCK_ZeroExpression || 14105 NullKind == Expr::NPCK_ZeroLiteral) { 14106 if (!ReturnType->isIntegerType()) 14107 return; 14108 } else { 14109 return; 14110 } 14111 } 14112 } else { // !IsCompare 14113 // For function to bool, only suggest if the function pointer has bool 14114 // return type. 14115 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 14116 return; 14117 } 14118 Diag(E->getExprLoc(), diag::note_function_to_function_call) 14119 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 14120 } 14121 14122 /// Diagnoses "dangerous" implicit conversions within the given 14123 /// expression (which is a full expression). Implements -Wconversion 14124 /// and -Wsign-compare. 14125 /// 14126 /// \param CC the "context" location of the implicit conversion, i.e. 14127 /// the most location of the syntactic entity requiring the implicit 14128 /// conversion 14129 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 14130 // Don't diagnose in unevaluated contexts. 14131 if (isUnevaluatedContext()) 14132 return; 14133 14134 // Don't diagnose for value- or type-dependent expressions. 14135 if (E->isTypeDependent() || E->isValueDependent()) 14136 return; 14137 14138 // Check for array bounds violations in cases where the check isn't triggered 14139 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 14140 // ArraySubscriptExpr is on the RHS of a variable initialization. 14141 CheckArrayAccess(E); 14142 14143 // This is not the right CC for (e.g.) a variable initialization. 14144 AnalyzeImplicitConversions(*this, E, CC); 14145 } 14146 14147 /// CheckBoolLikeConversion - Check conversion of given expression to boolean. 14148 /// Input argument E is a logical expression. 14149 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 14150 ::CheckBoolLikeConversion(*this, E, CC); 14151 } 14152 14153 /// Diagnose when expression is an integer constant expression and its evaluation 14154 /// results in integer overflow 14155 void Sema::CheckForIntOverflow (Expr *E) { 14156 // Use a work list to deal with nested struct initializers. 14157 SmallVector<Expr *, 2> Exprs(1, E); 14158 14159 do { 14160 Expr *OriginalE = Exprs.pop_back_val(); 14161 Expr *E = OriginalE->IgnoreParenCasts(); 14162 14163 if (isa<BinaryOperator>(E)) { 14164 E->EvaluateForOverflow(Context); 14165 continue; 14166 } 14167 14168 if (auto InitList = dyn_cast<InitListExpr>(OriginalE)) 14169 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 14170 else if (isa<ObjCBoxedExpr>(OriginalE)) 14171 E->EvaluateForOverflow(Context); 14172 else if (auto Call = dyn_cast<CallExpr>(E)) 14173 Exprs.append(Call->arg_begin(), Call->arg_end()); 14174 else if (auto Message = dyn_cast<ObjCMessageExpr>(E)) 14175 Exprs.append(Message->arg_begin(), Message->arg_end()); 14176 } while (!Exprs.empty()); 14177 } 14178 14179 namespace { 14180 14181 /// Visitor for expressions which looks for unsequenced operations on the 14182 /// same object. 14183 class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> { 14184 using Base = ConstEvaluatedExprVisitor<SequenceChecker>; 14185 14186 /// A tree of sequenced regions within an expression. Two regions are 14187 /// unsequenced if one is an ancestor or a descendent of the other. When we 14188 /// finish processing an expression with sequencing, such as a comma 14189 /// expression, we fold its tree nodes into its parent, since they are 14190 /// unsequenced with respect to nodes we will visit later. 14191 class SequenceTree { 14192 struct Value { 14193 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 14194 unsigned Parent : 31; 14195 unsigned Merged : 1; 14196 }; 14197 SmallVector<Value, 8> Values; 14198 14199 public: 14200 /// A region within an expression which may be sequenced with respect 14201 /// to some other region. 14202 class Seq { 14203 friend class SequenceTree; 14204 14205 unsigned Index; 14206 14207 explicit Seq(unsigned N) : Index(N) {} 14208 14209 public: 14210 Seq() : Index(0) {} 14211 }; 14212 14213 SequenceTree() { Values.push_back(Value(0)); } 14214 Seq root() const { return Seq(0); } 14215 14216 /// Create a new sequence of operations, which is an unsequenced 14217 /// subset of \p Parent. This sequence of operations is sequenced with 14218 /// respect to other children of \p Parent. 14219 Seq allocate(Seq Parent) { 14220 Values.push_back(Value(Parent.Index)); 14221 return Seq(Values.size() - 1); 14222 } 14223 14224 /// Merge a sequence of operations into its parent. 14225 void merge(Seq S) { 14226 Values[S.Index].Merged = true; 14227 } 14228 14229 /// Determine whether two operations are unsequenced. This operation 14230 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 14231 /// should have been merged into its parent as appropriate. 14232 bool isUnsequenced(Seq Cur, Seq Old) { 14233 unsigned C = representative(Cur.Index); 14234 unsigned Target = representative(Old.Index); 14235 while (C >= Target) { 14236 if (C == Target) 14237 return true; 14238 C = Values[C].Parent; 14239 } 14240 return false; 14241 } 14242 14243 private: 14244 /// Pick a representative for a sequence. 14245 unsigned representative(unsigned K) { 14246 if (Values[K].Merged) 14247 // Perform path compression as we go. 14248 return Values[K].Parent = representative(Values[K].Parent); 14249 return K; 14250 } 14251 }; 14252 14253 /// An object for which we can track unsequenced uses. 14254 using Object = const NamedDecl *; 14255 14256 /// Different flavors of object usage which we track. We only track the 14257 /// least-sequenced usage of each kind. 14258 enum UsageKind { 14259 /// A read of an object. Multiple unsequenced reads are OK. 14260 UK_Use, 14261 14262 /// A modification of an object which is sequenced before the value 14263 /// computation of the expression, such as ++n in C++. 14264 UK_ModAsValue, 14265 14266 /// A modification of an object which is not sequenced before the value 14267 /// computation of the expression, such as n++. 14268 UK_ModAsSideEffect, 14269 14270 UK_Count = UK_ModAsSideEffect + 1 14271 }; 14272 14273 /// Bundle together a sequencing region and the expression corresponding 14274 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo. 14275 struct Usage { 14276 const Expr *UsageExpr; 14277 SequenceTree::Seq Seq; 14278 14279 Usage() : UsageExpr(nullptr) {} 14280 }; 14281 14282 struct UsageInfo { 14283 Usage Uses[UK_Count]; 14284 14285 /// Have we issued a diagnostic for this object already? 14286 bool Diagnosed; 14287 14288 UsageInfo() : Diagnosed(false) {} 14289 }; 14290 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 14291 14292 Sema &SemaRef; 14293 14294 /// Sequenced regions within the expression. 14295 SequenceTree Tree; 14296 14297 /// Declaration modifications and references which we have seen. 14298 UsageInfoMap UsageMap; 14299 14300 /// The region we are currently within. 14301 SequenceTree::Seq Region; 14302 14303 /// Filled in with declarations which were modified as a side-effect 14304 /// (that is, post-increment operations). 14305 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 14306 14307 /// Expressions to check later. We defer checking these to reduce 14308 /// stack usage. 14309 SmallVectorImpl<const Expr *> &WorkList; 14310 14311 /// RAII object wrapping the visitation of a sequenced subexpression of an 14312 /// expression. At the end of this process, the side-effects of the evaluation 14313 /// become sequenced with respect to the value computation of the result, so 14314 /// we downgrade any UK_ModAsSideEffect within the evaluation to 14315 /// UK_ModAsValue. 14316 struct SequencedSubexpression { 14317 SequencedSubexpression(SequenceChecker &Self) 14318 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 14319 Self.ModAsSideEffect = &ModAsSideEffect; 14320 } 14321 14322 ~SequencedSubexpression() { 14323 for (const std::pair<Object, Usage> &M : llvm::reverse(ModAsSideEffect)) { 14324 // Add a new usage with usage kind UK_ModAsValue, and then restore 14325 // the previous usage with UK_ModAsSideEffect (thus clearing it if 14326 // the previous one was empty). 14327 UsageInfo &UI = Self.UsageMap[M.first]; 14328 auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect]; 14329 Self.addUsage(M.first, UI, SideEffectUsage.UsageExpr, UK_ModAsValue); 14330 SideEffectUsage = M.second; 14331 } 14332 Self.ModAsSideEffect = OldModAsSideEffect; 14333 } 14334 14335 SequenceChecker &Self; 14336 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 14337 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 14338 }; 14339 14340 /// RAII object wrapping the visitation of a subexpression which we might 14341 /// choose to evaluate as a constant. If any subexpression is evaluated and 14342 /// found to be non-constant, this allows us to suppress the evaluation of 14343 /// the outer expression. 14344 class EvaluationTracker { 14345 public: 14346 EvaluationTracker(SequenceChecker &Self) 14347 : Self(Self), Prev(Self.EvalTracker) { 14348 Self.EvalTracker = this; 14349 } 14350 14351 ~EvaluationTracker() { 14352 Self.EvalTracker = Prev; 14353 if (Prev) 14354 Prev->EvalOK &= EvalOK; 14355 } 14356 14357 bool evaluate(const Expr *E, bool &Result) { 14358 if (!EvalOK || E->isValueDependent()) 14359 return false; 14360 EvalOK = E->EvaluateAsBooleanCondition( 14361 Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated()); 14362 return EvalOK; 14363 } 14364 14365 private: 14366 SequenceChecker &Self; 14367 EvaluationTracker *Prev; 14368 bool EvalOK = true; 14369 } *EvalTracker = nullptr; 14370 14371 /// Find the object which is produced by the specified expression, 14372 /// if any. 14373 Object getObject(const Expr *E, bool Mod) const { 14374 E = E->IgnoreParenCasts(); 14375 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 14376 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 14377 return getObject(UO->getSubExpr(), Mod); 14378 } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 14379 if (BO->getOpcode() == BO_Comma) 14380 return getObject(BO->getRHS(), Mod); 14381 if (Mod && BO->isAssignmentOp()) 14382 return getObject(BO->getLHS(), Mod); 14383 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 14384 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 14385 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 14386 return ME->getMemberDecl(); 14387 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 14388 // FIXME: If this is a reference, map through to its value. 14389 return DRE->getDecl(); 14390 return nullptr; 14391 } 14392 14393 /// Note that an object \p O was modified or used by an expression 14394 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for 14395 /// the object \p O as obtained via the \p UsageMap. 14396 void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) { 14397 // Get the old usage for the given object and usage kind. 14398 Usage &U = UI.Uses[UK]; 14399 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) { 14400 // If we have a modification as side effect and are in a sequenced 14401 // subexpression, save the old Usage so that we can restore it later 14402 // in SequencedSubexpression::~SequencedSubexpression. 14403 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 14404 ModAsSideEffect->push_back(std::make_pair(O, U)); 14405 // Then record the new usage with the current sequencing region. 14406 U.UsageExpr = UsageExpr; 14407 U.Seq = Region; 14408 } 14409 } 14410 14411 /// Check whether a modification or use of an object \p O in an expression 14412 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is 14413 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap. 14414 /// \p IsModMod is true when we are checking for a mod-mod unsequenced 14415 /// usage and false we are checking for a mod-use unsequenced usage. 14416 void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, 14417 UsageKind OtherKind, bool IsModMod) { 14418 if (UI.Diagnosed) 14419 return; 14420 14421 const Usage &U = UI.Uses[OtherKind]; 14422 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) 14423 return; 14424 14425 const Expr *Mod = U.UsageExpr; 14426 const Expr *ModOrUse = UsageExpr; 14427 if (OtherKind == UK_Use) 14428 std::swap(Mod, ModOrUse); 14429 14430 SemaRef.DiagRuntimeBehavior( 14431 Mod->getExprLoc(), {Mod, ModOrUse}, 14432 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod 14433 : diag::warn_unsequenced_mod_use) 14434 << O << SourceRange(ModOrUse->getExprLoc())); 14435 UI.Diagnosed = true; 14436 } 14437 14438 // A note on note{Pre, Post}{Use, Mod}: 14439 // 14440 // (It helps to follow the algorithm with an expression such as 14441 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced 14442 // operations before C++17 and both are well-defined in C++17). 14443 // 14444 // When visiting a node which uses/modify an object we first call notePreUse 14445 // or notePreMod before visiting its sub-expression(s). At this point the 14446 // children of the current node have not yet been visited and so the eventual 14447 // uses/modifications resulting from the children of the current node have not 14448 // been recorded yet. 14449 // 14450 // We then visit the children of the current node. After that notePostUse or 14451 // notePostMod is called. These will 1) detect an unsequenced modification 14452 // as side effect (as in "k++ + k") and 2) add a new usage with the 14453 // appropriate usage kind. 14454 // 14455 // We also have to be careful that some operation sequences modification as 14456 // side effect as well (for example: || or ,). To account for this we wrap 14457 // the visitation of such a sub-expression (for example: the LHS of || or ,) 14458 // with SequencedSubexpression. SequencedSubexpression is an RAII object 14459 // which record usages which are modifications as side effect, and then 14460 // downgrade them (or more accurately restore the previous usage which was a 14461 // modification as side effect) when exiting the scope of the sequenced 14462 // subexpression. 14463 14464 void notePreUse(Object O, const Expr *UseExpr) { 14465 UsageInfo &UI = UsageMap[O]; 14466 // Uses conflict with other modifications. 14467 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false); 14468 } 14469 14470 void notePostUse(Object O, const Expr *UseExpr) { 14471 UsageInfo &UI = UsageMap[O]; 14472 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsSideEffect, 14473 /*IsModMod=*/false); 14474 addUsage(O, UI, UseExpr, /*UsageKind=*/UK_Use); 14475 } 14476 14477 void notePreMod(Object O, const Expr *ModExpr) { 14478 UsageInfo &UI = UsageMap[O]; 14479 // Modifications conflict with other modifications and with uses. 14480 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true); 14481 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false); 14482 } 14483 14484 void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) { 14485 UsageInfo &UI = UsageMap[O]; 14486 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsSideEffect, 14487 /*IsModMod=*/true); 14488 addUsage(O, UI, ModExpr, /*UsageKind=*/UK); 14489 } 14490 14491 public: 14492 SequenceChecker(Sema &S, const Expr *E, 14493 SmallVectorImpl<const Expr *> &WorkList) 14494 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 14495 Visit(E); 14496 // Silence a -Wunused-private-field since WorkList is now unused. 14497 // TODO: Evaluate if it can be used, and if not remove it. 14498 (void)this->WorkList; 14499 } 14500 14501 void VisitStmt(const Stmt *S) { 14502 // Skip all statements which aren't expressions for now. 14503 } 14504 14505 void VisitExpr(const Expr *E) { 14506 // By default, just recurse to evaluated subexpressions. 14507 Base::VisitStmt(E); 14508 } 14509 14510 void VisitCastExpr(const CastExpr *E) { 14511 Object O = Object(); 14512 if (E->getCastKind() == CK_LValueToRValue) 14513 O = getObject(E->getSubExpr(), false); 14514 14515 if (O) 14516 notePreUse(O, E); 14517 VisitExpr(E); 14518 if (O) 14519 notePostUse(O, E); 14520 } 14521 14522 void VisitSequencedExpressions(const Expr *SequencedBefore, 14523 const Expr *SequencedAfter) { 14524 SequenceTree::Seq BeforeRegion = Tree.allocate(Region); 14525 SequenceTree::Seq AfterRegion = Tree.allocate(Region); 14526 SequenceTree::Seq OldRegion = Region; 14527 14528 { 14529 SequencedSubexpression SeqBefore(*this); 14530 Region = BeforeRegion; 14531 Visit(SequencedBefore); 14532 } 14533 14534 Region = AfterRegion; 14535 Visit(SequencedAfter); 14536 14537 Region = OldRegion; 14538 14539 Tree.merge(BeforeRegion); 14540 Tree.merge(AfterRegion); 14541 } 14542 14543 void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) { 14544 // C++17 [expr.sub]p1: 14545 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The 14546 // expression E1 is sequenced before the expression E2. 14547 if (SemaRef.getLangOpts().CPlusPlus17) 14548 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS()); 14549 else { 14550 Visit(ASE->getLHS()); 14551 Visit(ASE->getRHS()); 14552 } 14553 } 14554 14555 void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 14556 void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 14557 void VisitBinPtrMem(const BinaryOperator *BO) { 14558 // C++17 [expr.mptr.oper]p4: 14559 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...] 14560 // the expression E1 is sequenced before the expression E2. 14561 if (SemaRef.getLangOpts().CPlusPlus17) 14562 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14563 else { 14564 Visit(BO->getLHS()); 14565 Visit(BO->getRHS()); 14566 } 14567 } 14568 14569 void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); } 14570 void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); } 14571 void VisitBinShlShr(const BinaryOperator *BO) { 14572 // C++17 [expr.shift]p4: 14573 // The expression E1 is sequenced before the expression E2. 14574 if (SemaRef.getLangOpts().CPlusPlus17) 14575 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14576 else { 14577 Visit(BO->getLHS()); 14578 Visit(BO->getRHS()); 14579 } 14580 } 14581 14582 void VisitBinComma(const BinaryOperator *BO) { 14583 // C++11 [expr.comma]p1: 14584 // Every value computation and side effect associated with the left 14585 // expression is sequenced before every value computation and side 14586 // effect associated with the right expression. 14587 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14588 } 14589 14590 void VisitBinAssign(const BinaryOperator *BO) { 14591 SequenceTree::Seq RHSRegion; 14592 SequenceTree::Seq LHSRegion; 14593 if (SemaRef.getLangOpts().CPlusPlus17) { 14594 RHSRegion = Tree.allocate(Region); 14595 LHSRegion = Tree.allocate(Region); 14596 } else { 14597 RHSRegion = Region; 14598 LHSRegion = Region; 14599 } 14600 SequenceTree::Seq OldRegion = Region; 14601 14602 // C++11 [expr.ass]p1: 14603 // [...] the assignment is sequenced after the value computation 14604 // of the right and left operands, [...] 14605 // 14606 // so check it before inspecting the operands and update the 14607 // map afterwards. 14608 Object O = getObject(BO->getLHS(), /*Mod=*/true); 14609 if (O) 14610 notePreMod(O, BO); 14611 14612 if (SemaRef.getLangOpts().CPlusPlus17) { 14613 // C++17 [expr.ass]p1: 14614 // [...] The right operand is sequenced before the left operand. [...] 14615 { 14616 SequencedSubexpression SeqBefore(*this); 14617 Region = RHSRegion; 14618 Visit(BO->getRHS()); 14619 } 14620 14621 Region = LHSRegion; 14622 Visit(BO->getLHS()); 14623 14624 if (O && isa<CompoundAssignOperator>(BO)) 14625 notePostUse(O, BO); 14626 14627 } else { 14628 // C++11 does not specify any sequencing between the LHS and RHS. 14629 Region = LHSRegion; 14630 Visit(BO->getLHS()); 14631 14632 if (O && isa<CompoundAssignOperator>(BO)) 14633 notePostUse(O, BO); 14634 14635 Region = RHSRegion; 14636 Visit(BO->getRHS()); 14637 } 14638 14639 // C++11 [expr.ass]p1: 14640 // the assignment is sequenced [...] before the value computation of the 14641 // assignment expression. 14642 // C11 6.5.16/3 has no such rule. 14643 Region = OldRegion; 14644 if (O) 14645 notePostMod(O, BO, 14646 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 14647 : UK_ModAsSideEffect); 14648 if (SemaRef.getLangOpts().CPlusPlus17) { 14649 Tree.merge(RHSRegion); 14650 Tree.merge(LHSRegion); 14651 } 14652 } 14653 14654 void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) { 14655 VisitBinAssign(CAO); 14656 } 14657 14658 void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 14659 void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 14660 void VisitUnaryPreIncDec(const UnaryOperator *UO) { 14661 Object O = getObject(UO->getSubExpr(), true); 14662 if (!O) 14663 return VisitExpr(UO); 14664 14665 notePreMod(O, UO); 14666 Visit(UO->getSubExpr()); 14667 // C++11 [expr.pre.incr]p1: 14668 // the expression ++x is equivalent to x+=1 14669 notePostMod(O, UO, 14670 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 14671 : UK_ModAsSideEffect); 14672 } 14673 14674 void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 14675 void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 14676 void VisitUnaryPostIncDec(const UnaryOperator *UO) { 14677 Object O = getObject(UO->getSubExpr(), true); 14678 if (!O) 14679 return VisitExpr(UO); 14680 14681 notePreMod(O, UO); 14682 Visit(UO->getSubExpr()); 14683 notePostMod(O, UO, UK_ModAsSideEffect); 14684 } 14685 14686 void VisitBinLOr(const BinaryOperator *BO) { 14687 // C++11 [expr.log.or]p2: 14688 // If the second expression is evaluated, every value computation and 14689 // side effect associated with the first expression is sequenced before 14690 // every value computation and side effect associated with the 14691 // second expression. 14692 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 14693 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 14694 SequenceTree::Seq OldRegion = Region; 14695 14696 EvaluationTracker Eval(*this); 14697 { 14698 SequencedSubexpression Sequenced(*this); 14699 Region = LHSRegion; 14700 Visit(BO->getLHS()); 14701 } 14702 14703 // C++11 [expr.log.or]p1: 14704 // [...] the second operand is not evaluated if the first operand 14705 // evaluates to true. 14706 bool EvalResult = false; 14707 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 14708 bool ShouldVisitRHS = !EvalOK || (EvalOK && !EvalResult); 14709 if (ShouldVisitRHS) { 14710 Region = RHSRegion; 14711 Visit(BO->getRHS()); 14712 } 14713 14714 Region = OldRegion; 14715 Tree.merge(LHSRegion); 14716 Tree.merge(RHSRegion); 14717 } 14718 14719 void VisitBinLAnd(const BinaryOperator *BO) { 14720 // C++11 [expr.log.and]p2: 14721 // If the second expression is evaluated, every value computation and 14722 // side effect associated with the first expression is sequenced before 14723 // every value computation and side effect associated with the 14724 // second expression. 14725 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 14726 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 14727 SequenceTree::Seq OldRegion = Region; 14728 14729 EvaluationTracker Eval(*this); 14730 { 14731 SequencedSubexpression Sequenced(*this); 14732 Region = LHSRegion; 14733 Visit(BO->getLHS()); 14734 } 14735 14736 // C++11 [expr.log.and]p1: 14737 // [...] the second operand is not evaluated if the first operand is false. 14738 bool EvalResult = false; 14739 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 14740 bool ShouldVisitRHS = !EvalOK || (EvalOK && EvalResult); 14741 if (ShouldVisitRHS) { 14742 Region = RHSRegion; 14743 Visit(BO->getRHS()); 14744 } 14745 14746 Region = OldRegion; 14747 Tree.merge(LHSRegion); 14748 Tree.merge(RHSRegion); 14749 } 14750 14751 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) { 14752 // C++11 [expr.cond]p1: 14753 // [...] Every value computation and side effect associated with the first 14754 // expression is sequenced before every value computation and side effect 14755 // associated with the second or third expression. 14756 SequenceTree::Seq ConditionRegion = Tree.allocate(Region); 14757 14758 // No sequencing is specified between the true and false expression. 14759 // However since exactly one of both is going to be evaluated we can 14760 // consider them to be sequenced. This is needed to avoid warning on 14761 // something like "x ? y+= 1 : y += 2;" in the case where we will visit 14762 // both the true and false expressions because we can't evaluate x. 14763 // This will still allow us to detect an expression like (pre C++17) 14764 // "(x ? y += 1 : y += 2) = y". 14765 // 14766 // We don't wrap the visitation of the true and false expression with 14767 // SequencedSubexpression because we don't want to downgrade modifications 14768 // as side effect in the true and false expressions after the visition 14769 // is done. (for example in the expression "(x ? y++ : y++) + y" we should 14770 // not warn between the two "y++", but we should warn between the "y++" 14771 // and the "y". 14772 SequenceTree::Seq TrueRegion = Tree.allocate(Region); 14773 SequenceTree::Seq FalseRegion = Tree.allocate(Region); 14774 SequenceTree::Seq OldRegion = Region; 14775 14776 EvaluationTracker Eval(*this); 14777 { 14778 SequencedSubexpression Sequenced(*this); 14779 Region = ConditionRegion; 14780 Visit(CO->getCond()); 14781 } 14782 14783 // C++11 [expr.cond]p1: 14784 // [...] The first expression is contextually converted to bool (Clause 4). 14785 // It is evaluated and if it is true, the result of the conditional 14786 // expression is the value of the second expression, otherwise that of the 14787 // third expression. Only one of the second and third expressions is 14788 // evaluated. [...] 14789 bool EvalResult = false; 14790 bool EvalOK = Eval.evaluate(CO->getCond(), EvalResult); 14791 bool ShouldVisitTrueExpr = !EvalOK || (EvalOK && EvalResult); 14792 bool ShouldVisitFalseExpr = !EvalOK || (EvalOK && !EvalResult); 14793 if (ShouldVisitTrueExpr) { 14794 Region = TrueRegion; 14795 Visit(CO->getTrueExpr()); 14796 } 14797 if (ShouldVisitFalseExpr) { 14798 Region = FalseRegion; 14799 Visit(CO->getFalseExpr()); 14800 } 14801 14802 Region = OldRegion; 14803 Tree.merge(ConditionRegion); 14804 Tree.merge(TrueRegion); 14805 Tree.merge(FalseRegion); 14806 } 14807 14808 void VisitCallExpr(const CallExpr *CE) { 14809 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 14810 14811 if (CE->isUnevaluatedBuiltinCall(Context)) 14812 return; 14813 14814 // C++11 [intro.execution]p15: 14815 // When calling a function [...], every value computation and side effect 14816 // associated with any argument expression, or with the postfix expression 14817 // designating the called function, is sequenced before execution of every 14818 // expression or statement in the body of the function [and thus before 14819 // the value computation of its result]. 14820 SequencedSubexpression Sequenced(*this); 14821 SemaRef.runWithSufficientStackSpace(CE->getExprLoc(), [&] { 14822 // C++17 [expr.call]p5 14823 // The postfix-expression is sequenced before each expression in the 14824 // expression-list and any default argument. [...] 14825 SequenceTree::Seq CalleeRegion; 14826 SequenceTree::Seq OtherRegion; 14827 if (SemaRef.getLangOpts().CPlusPlus17) { 14828 CalleeRegion = Tree.allocate(Region); 14829 OtherRegion = Tree.allocate(Region); 14830 } else { 14831 CalleeRegion = Region; 14832 OtherRegion = Region; 14833 } 14834 SequenceTree::Seq OldRegion = Region; 14835 14836 // Visit the callee expression first. 14837 Region = CalleeRegion; 14838 if (SemaRef.getLangOpts().CPlusPlus17) { 14839 SequencedSubexpression Sequenced(*this); 14840 Visit(CE->getCallee()); 14841 } else { 14842 Visit(CE->getCallee()); 14843 } 14844 14845 // Then visit the argument expressions. 14846 Region = OtherRegion; 14847 for (const Expr *Argument : CE->arguments()) 14848 Visit(Argument); 14849 14850 Region = OldRegion; 14851 if (SemaRef.getLangOpts().CPlusPlus17) { 14852 Tree.merge(CalleeRegion); 14853 Tree.merge(OtherRegion); 14854 } 14855 }); 14856 } 14857 14858 void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) { 14859 // C++17 [over.match.oper]p2: 14860 // [...] the operator notation is first transformed to the equivalent 14861 // function-call notation as summarized in Table 12 (where @ denotes one 14862 // of the operators covered in the specified subclause). However, the 14863 // operands are sequenced in the order prescribed for the built-in 14864 // operator (Clause 8). 14865 // 14866 // From the above only overloaded binary operators and overloaded call 14867 // operators have sequencing rules in C++17 that we need to handle 14868 // separately. 14869 if (!SemaRef.getLangOpts().CPlusPlus17 || 14870 (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call)) 14871 return VisitCallExpr(CXXOCE); 14872 14873 enum { 14874 NoSequencing, 14875 LHSBeforeRHS, 14876 RHSBeforeLHS, 14877 LHSBeforeRest 14878 } SequencingKind; 14879 switch (CXXOCE->getOperator()) { 14880 case OO_Equal: 14881 case OO_PlusEqual: 14882 case OO_MinusEqual: 14883 case OO_StarEqual: 14884 case OO_SlashEqual: 14885 case OO_PercentEqual: 14886 case OO_CaretEqual: 14887 case OO_AmpEqual: 14888 case OO_PipeEqual: 14889 case OO_LessLessEqual: 14890 case OO_GreaterGreaterEqual: 14891 SequencingKind = RHSBeforeLHS; 14892 break; 14893 14894 case OO_LessLess: 14895 case OO_GreaterGreater: 14896 case OO_AmpAmp: 14897 case OO_PipePipe: 14898 case OO_Comma: 14899 case OO_ArrowStar: 14900 case OO_Subscript: 14901 SequencingKind = LHSBeforeRHS; 14902 break; 14903 14904 case OO_Call: 14905 SequencingKind = LHSBeforeRest; 14906 break; 14907 14908 default: 14909 SequencingKind = NoSequencing; 14910 break; 14911 } 14912 14913 if (SequencingKind == NoSequencing) 14914 return VisitCallExpr(CXXOCE); 14915 14916 // This is a call, so all subexpressions are sequenced before the result. 14917 SequencedSubexpression Sequenced(*this); 14918 14919 SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] { 14920 assert(SemaRef.getLangOpts().CPlusPlus17 && 14921 "Should only get there with C++17 and above!"); 14922 assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) && 14923 "Should only get there with an overloaded binary operator" 14924 " or an overloaded call operator!"); 14925 14926 if (SequencingKind == LHSBeforeRest) { 14927 assert(CXXOCE->getOperator() == OO_Call && 14928 "We should only have an overloaded call operator here!"); 14929 14930 // This is very similar to VisitCallExpr, except that we only have the 14931 // C++17 case. The postfix-expression is the first argument of the 14932 // CXXOperatorCallExpr. The expressions in the expression-list, if any, 14933 // are in the following arguments. 14934 // 14935 // Note that we intentionally do not visit the callee expression since 14936 // it is just a decayed reference to a function. 14937 SequenceTree::Seq PostfixExprRegion = Tree.allocate(Region); 14938 SequenceTree::Seq ArgsRegion = Tree.allocate(Region); 14939 SequenceTree::Seq OldRegion = Region; 14940 14941 assert(CXXOCE->getNumArgs() >= 1 && 14942 "An overloaded call operator must have at least one argument" 14943 " for the postfix-expression!"); 14944 const Expr *PostfixExpr = CXXOCE->getArgs()[0]; 14945 llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1, 14946 CXXOCE->getNumArgs() - 1); 14947 14948 // Visit the postfix-expression first. 14949 { 14950 Region = PostfixExprRegion; 14951 SequencedSubexpression Sequenced(*this); 14952 Visit(PostfixExpr); 14953 } 14954 14955 // Then visit the argument expressions. 14956 Region = ArgsRegion; 14957 for (const Expr *Arg : Args) 14958 Visit(Arg); 14959 14960 Region = OldRegion; 14961 Tree.merge(PostfixExprRegion); 14962 Tree.merge(ArgsRegion); 14963 } else { 14964 assert(CXXOCE->getNumArgs() == 2 && 14965 "Should only have two arguments here!"); 14966 assert((SequencingKind == LHSBeforeRHS || 14967 SequencingKind == RHSBeforeLHS) && 14968 "Unexpected sequencing kind!"); 14969 14970 // We do not visit the callee expression since it is just a decayed 14971 // reference to a function. 14972 const Expr *E1 = CXXOCE->getArg(0); 14973 const Expr *E2 = CXXOCE->getArg(1); 14974 if (SequencingKind == RHSBeforeLHS) 14975 std::swap(E1, E2); 14976 14977 return VisitSequencedExpressions(E1, E2); 14978 } 14979 }); 14980 } 14981 14982 void VisitCXXConstructExpr(const CXXConstructExpr *CCE) { 14983 // This is a call, so all subexpressions are sequenced before the result. 14984 SequencedSubexpression Sequenced(*this); 14985 14986 if (!CCE->isListInitialization()) 14987 return VisitExpr(CCE); 14988 14989 // In C++11, list initializations are sequenced. 14990 SmallVector<SequenceTree::Seq, 32> Elts; 14991 SequenceTree::Seq Parent = Region; 14992 for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(), 14993 E = CCE->arg_end(); 14994 I != E; ++I) { 14995 Region = Tree.allocate(Parent); 14996 Elts.push_back(Region); 14997 Visit(*I); 14998 } 14999 15000 // Forget that the initializers are sequenced. 15001 Region = Parent; 15002 for (unsigned I = 0; I < Elts.size(); ++I) 15003 Tree.merge(Elts[I]); 15004 } 15005 15006 void VisitInitListExpr(const InitListExpr *ILE) { 15007 if (!SemaRef.getLangOpts().CPlusPlus11) 15008 return VisitExpr(ILE); 15009 15010 // In C++11, list initializations are sequenced. 15011 SmallVector<SequenceTree::Seq, 32> Elts; 15012 SequenceTree::Seq Parent = Region; 15013 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 15014 const Expr *E = ILE->getInit(I); 15015 if (!E) 15016 continue; 15017 Region = Tree.allocate(Parent); 15018 Elts.push_back(Region); 15019 Visit(E); 15020 } 15021 15022 // Forget that the initializers are sequenced. 15023 Region = Parent; 15024 for (unsigned I = 0; I < Elts.size(); ++I) 15025 Tree.merge(Elts[I]); 15026 } 15027 }; 15028 15029 } // namespace 15030 15031 void Sema::CheckUnsequencedOperations(const Expr *E) { 15032 SmallVector<const Expr *, 8> WorkList; 15033 WorkList.push_back(E); 15034 while (!WorkList.empty()) { 15035 const Expr *Item = WorkList.pop_back_val(); 15036 SequenceChecker(*this, Item, WorkList); 15037 } 15038 } 15039 15040 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 15041 bool IsConstexpr) { 15042 llvm::SaveAndRestore<bool> ConstantContext( 15043 isConstantEvaluatedOverride, IsConstexpr || isa<ConstantExpr>(E)); 15044 CheckImplicitConversions(E, CheckLoc); 15045 if (!E->isInstantiationDependent()) 15046 CheckUnsequencedOperations(E); 15047 if (!IsConstexpr && !E->isValueDependent()) 15048 CheckForIntOverflow(E); 15049 DiagnoseMisalignedMembers(); 15050 } 15051 15052 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 15053 FieldDecl *BitField, 15054 Expr *Init) { 15055 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 15056 } 15057 15058 static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 15059 SourceLocation Loc) { 15060 if (!PType->isVariablyModifiedType()) 15061 return; 15062 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 15063 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 15064 return; 15065 } 15066 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 15067 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 15068 return; 15069 } 15070 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 15071 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 15072 return; 15073 } 15074 15075 const ArrayType *AT = S.Context.getAsArrayType(PType); 15076 if (!AT) 15077 return; 15078 15079 if (AT->getSizeModifier() != ArrayType::Star) { 15080 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 15081 return; 15082 } 15083 15084 S.Diag(Loc, diag::err_array_star_in_function_definition); 15085 } 15086 15087 /// CheckParmsForFunctionDef - Check that the parameters of the given 15088 /// function are appropriate for the definition of a function. This 15089 /// takes care of any checks that cannot be performed on the 15090 /// declaration itself, e.g., that the types of each of the function 15091 /// parameters are complete. 15092 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 15093 bool CheckParameterNames) { 15094 bool HasInvalidParm = false; 15095 for (ParmVarDecl *Param : Parameters) { 15096 // C99 6.7.5.3p4: the parameters in a parameter type list in a 15097 // function declarator that is part of a function definition of 15098 // that function shall not have incomplete type. 15099 // 15100 // This is also C++ [dcl.fct]p6. 15101 if (!Param->isInvalidDecl() && 15102 RequireCompleteType(Param->getLocation(), Param->getType(), 15103 diag::err_typecheck_decl_incomplete_type)) { 15104 Param->setInvalidDecl(); 15105 HasInvalidParm = true; 15106 } 15107 15108 // C99 6.9.1p5: If the declarator includes a parameter type list, the 15109 // declaration of each parameter shall include an identifier. 15110 if (CheckParameterNames && Param->getIdentifier() == nullptr && 15111 !Param->isImplicit() && !getLangOpts().CPlusPlus) { 15112 // Diagnose this as an extension in C17 and earlier. 15113 if (!getLangOpts().C2x) 15114 Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x); 15115 } 15116 15117 // C99 6.7.5.3p12: 15118 // If the function declarator is not part of a definition of that 15119 // function, parameters may have incomplete type and may use the [*] 15120 // notation in their sequences of declarator specifiers to specify 15121 // variable length array types. 15122 QualType PType = Param->getOriginalType(); 15123 // FIXME: This diagnostic should point the '[*]' if source-location 15124 // information is added for it. 15125 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 15126 15127 // If the parameter is a c++ class type and it has to be destructed in the 15128 // callee function, declare the destructor so that it can be called by the 15129 // callee function. Do not perform any direct access check on the dtor here. 15130 if (!Param->isInvalidDecl()) { 15131 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 15132 if (!ClassDecl->isInvalidDecl() && 15133 !ClassDecl->hasIrrelevantDestructor() && 15134 !ClassDecl->isDependentContext() && 15135 ClassDecl->isParamDestroyedInCallee()) { 15136 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 15137 MarkFunctionReferenced(Param->getLocation(), Destructor); 15138 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 15139 } 15140 } 15141 } 15142 15143 // Parameters with the pass_object_size attribute only need to be marked 15144 // constant at function definitions. Because we lack information about 15145 // whether we're on a declaration or definition when we're instantiating the 15146 // attribute, we need to check for constness here. 15147 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 15148 if (!Param->getType().isConstQualified()) 15149 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 15150 << Attr->getSpelling() << 1; 15151 15152 // Check for parameter names shadowing fields from the class. 15153 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) { 15154 // The owning context for the parameter should be the function, but we 15155 // want to see if this function's declaration context is a record. 15156 DeclContext *DC = Param->getDeclContext(); 15157 if (DC && DC->isFunctionOrMethod()) { 15158 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent())) 15159 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(), 15160 RD, /*DeclIsField*/ false); 15161 } 15162 } 15163 } 15164 15165 return HasInvalidParm; 15166 } 15167 15168 Optional<std::pair<CharUnits, CharUnits>> 15169 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx); 15170 15171 /// Compute the alignment and offset of the base class object given the 15172 /// derived-to-base cast expression and the alignment and offset of the derived 15173 /// class object. 15174 static std::pair<CharUnits, CharUnits> 15175 getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType, 15176 CharUnits BaseAlignment, CharUnits Offset, 15177 ASTContext &Ctx) { 15178 for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE; 15179 ++PathI) { 15180 const CXXBaseSpecifier *Base = *PathI; 15181 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl(); 15182 if (Base->isVirtual()) { 15183 // The complete object may have a lower alignment than the non-virtual 15184 // alignment of the base, in which case the base may be misaligned. Choose 15185 // the smaller of the non-virtual alignment and BaseAlignment, which is a 15186 // conservative lower bound of the complete object alignment. 15187 CharUnits NonVirtualAlignment = 15188 Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment(); 15189 BaseAlignment = std::min(BaseAlignment, NonVirtualAlignment); 15190 Offset = CharUnits::Zero(); 15191 } else { 15192 const ASTRecordLayout &RL = 15193 Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl()); 15194 Offset += RL.getBaseClassOffset(BaseDecl); 15195 } 15196 DerivedType = Base->getType(); 15197 } 15198 15199 return std::make_pair(BaseAlignment, Offset); 15200 } 15201 15202 /// Compute the alignment and offset of a binary additive operator. 15203 static Optional<std::pair<CharUnits, CharUnits>> 15204 getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE, 15205 bool IsSub, ASTContext &Ctx) { 15206 QualType PointeeType = PtrE->getType()->getPointeeType(); 15207 15208 if (!PointeeType->isConstantSizeType()) 15209 return llvm::None; 15210 15211 auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx); 15212 15213 if (!P) 15214 return llvm::None; 15215 15216 CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType); 15217 if (Optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) { 15218 CharUnits Offset = EltSize * IdxRes->getExtValue(); 15219 if (IsSub) 15220 Offset = -Offset; 15221 return std::make_pair(P->first, P->second + Offset); 15222 } 15223 15224 // If the integer expression isn't a constant expression, compute the lower 15225 // bound of the alignment using the alignment and offset of the pointer 15226 // expression and the element size. 15227 return std::make_pair( 15228 P->first.alignmentAtOffset(P->second).alignmentAtOffset(EltSize), 15229 CharUnits::Zero()); 15230 } 15231 15232 /// This helper function takes an lvalue expression and returns the alignment of 15233 /// a VarDecl and a constant offset from the VarDecl. 15234 Optional<std::pair<CharUnits, CharUnits>> 15235 static getBaseAlignmentAndOffsetFromLValue(const Expr *E, ASTContext &Ctx) { 15236 E = E->IgnoreParens(); 15237 switch (E->getStmtClass()) { 15238 default: 15239 break; 15240 case Stmt::CStyleCastExprClass: 15241 case Stmt::CXXStaticCastExprClass: 15242 case Stmt::ImplicitCastExprClass: { 15243 auto *CE = cast<CastExpr>(E); 15244 const Expr *From = CE->getSubExpr(); 15245 switch (CE->getCastKind()) { 15246 default: 15247 break; 15248 case CK_NoOp: 15249 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15250 case CK_UncheckedDerivedToBase: 15251 case CK_DerivedToBase: { 15252 auto P = getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15253 if (!P) 15254 break; 15255 return getDerivedToBaseAlignmentAndOffset(CE, From->getType(), P->first, 15256 P->second, Ctx); 15257 } 15258 } 15259 break; 15260 } 15261 case Stmt::ArraySubscriptExprClass: { 15262 auto *ASE = cast<ArraySubscriptExpr>(E); 15263 return getAlignmentAndOffsetFromBinAddOrSub(ASE->getBase(), ASE->getIdx(), 15264 false, Ctx); 15265 } 15266 case Stmt::DeclRefExprClass: { 15267 if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) { 15268 // FIXME: If VD is captured by copy or is an escaping __block variable, 15269 // use the alignment of VD's type. 15270 if (!VD->getType()->isReferenceType()) 15271 return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero()); 15272 if (VD->hasInit()) 15273 return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx); 15274 } 15275 break; 15276 } 15277 case Stmt::MemberExprClass: { 15278 auto *ME = cast<MemberExpr>(E); 15279 auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); 15280 if (!FD || FD->getType()->isReferenceType() || 15281 FD->getParent()->isInvalidDecl()) 15282 break; 15283 Optional<std::pair<CharUnits, CharUnits>> P; 15284 if (ME->isArrow()) 15285 P = getBaseAlignmentAndOffsetFromPtr(ME->getBase(), Ctx); 15286 else 15287 P = getBaseAlignmentAndOffsetFromLValue(ME->getBase(), Ctx); 15288 if (!P) 15289 break; 15290 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent()); 15291 uint64_t Offset = Layout.getFieldOffset(FD->getFieldIndex()); 15292 return std::make_pair(P->first, 15293 P->second + CharUnits::fromQuantity(Offset)); 15294 } 15295 case Stmt::UnaryOperatorClass: { 15296 auto *UO = cast<UnaryOperator>(E); 15297 switch (UO->getOpcode()) { 15298 default: 15299 break; 15300 case UO_Deref: 15301 return getBaseAlignmentAndOffsetFromPtr(UO->getSubExpr(), Ctx); 15302 } 15303 break; 15304 } 15305 case Stmt::BinaryOperatorClass: { 15306 auto *BO = cast<BinaryOperator>(E); 15307 auto Opcode = BO->getOpcode(); 15308 switch (Opcode) { 15309 default: 15310 break; 15311 case BO_Comma: 15312 return getBaseAlignmentAndOffsetFromLValue(BO->getRHS(), Ctx); 15313 } 15314 break; 15315 } 15316 } 15317 return llvm::None; 15318 } 15319 15320 /// This helper function takes a pointer expression and returns the alignment of 15321 /// a VarDecl and a constant offset from the VarDecl. 15322 Optional<std::pair<CharUnits, CharUnits>> 15323 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx) { 15324 E = E->IgnoreParens(); 15325 switch (E->getStmtClass()) { 15326 default: 15327 break; 15328 case Stmt::CStyleCastExprClass: 15329 case Stmt::CXXStaticCastExprClass: 15330 case Stmt::ImplicitCastExprClass: { 15331 auto *CE = cast<CastExpr>(E); 15332 const Expr *From = CE->getSubExpr(); 15333 switch (CE->getCastKind()) { 15334 default: 15335 break; 15336 case CK_NoOp: 15337 return getBaseAlignmentAndOffsetFromPtr(From, Ctx); 15338 case CK_ArrayToPointerDecay: 15339 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15340 case CK_UncheckedDerivedToBase: 15341 case CK_DerivedToBase: { 15342 auto P = getBaseAlignmentAndOffsetFromPtr(From, Ctx); 15343 if (!P) 15344 break; 15345 return getDerivedToBaseAlignmentAndOffset( 15346 CE, From->getType()->getPointeeType(), P->first, P->second, Ctx); 15347 } 15348 } 15349 break; 15350 } 15351 case Stmt::CXXThisExprClass: { 15352 auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl(); 15353 CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment(); 15354 return std::make_pair(Alignment, CharUnits::Zero()); 15355 } 15356 case Stmt::UnaryOperatorClass: { 15357 auto *UO = cast<UnaryOperator>(E); 15358 if (UO->getOpcode() == UO_AddrOf) 15359 return getBaseAlignmentAndOffsetFromLValue(UO->getSubExpr(), Ctx); 15360 break; 15361 } 15362 case Stmt::BinaryOperatorClass: { 15363 auto *BO = cast<BinaryOperator>(E); 15364 auto Opcode = BO->getOpcode(); 15365 switch (Opcode) { 15366 default: 15367 break; 15368 case BO_Add: 15369 case BO_Sub: { 15370 const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS(); 15371 if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType()) 15372 std::swap(LHS, RHS); 15373 return getAlignmentAndOffsetFromBinAddOrSub(LHS, RHS, Opcode == BO_Sub, 15374 Ctx); 15375 } 15376 case BO_Comma: 15377 return getBaseAlignmentAndOffsetFromPtr(BO->getRHS(), Ctx); 15378 } 15379 break; 15380 } 15381 } 15382 return llvm::None; 15383 } 15384 15385 static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) { 15386 // See if we can compute the alignment of a VarDecl and an offset from it. 15387 Optional<std::pair<CharUnits, CharUnits>> P = 15388 getBaseAlignmentAndOffsetFromPtr(E, S.Context); 15389 15390 if (P) 15391 return P->first.alignmentAtOffset(P->second); 15392 15393 // If that failed, return the type's alignment. 15394 return S.Context.getTypeAlignInChars(E->getType()->getPointeeType()); 15395 } 15396 15397 /// CheckCastAlign - Implements -Wcast-align, which warns when a 15398 /// pointer cast increases the alignment requirements. 15399 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 15400 // This is actually a lot of work to potentially be doing on every 15401 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 15402 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 15403 return; 15404 15405 // Ignore dependent types. 15406 if (T->isDependentType() || Op->getType()->isDependentType()) 15407 return; 15408 15409 // Require that the destination be a pointer type. 15410 const PointerType *DestPtr = T->getAs<PointerType>(); 15411 if (!DestPtr) return; 15412 15413 // If the destination has alignment 1, we're done. 15414 QualType DestPointee = DestPtr->getPointeeType(); 15415 if (DestPointee->isIncompleteType()) return; 15416 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 15417 if (DestAlign.isOne()) return; 15418 15419 // Require that the source be a pointer type. 15420 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 15421 if (!SrcPtr) return; 15422 QualType SrcPointee = SrcPtr->getPointeeType(); 15423 15424 // Explicitly allow casts from cv void*. We already implicitly 15425 // allowed casts to cv void*, since they have alignment 1. 15426 // Also allow casts involving incomplete types, which implicitly 15427 // includes 'void'. 15428 if (SrcPointee->isIncompleteType()) return; 15429 15430 CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this); 15431 15432 if (SrcAlign >= DestAlign) return; 15433 15434 Diag(TRange.getBegin(), diag::warn_cast_align) 15435 << Op->getType() << T 15436 << static_cast<unsigned>(SrcAlign.getQuantity()) 15437 << static_cast<unsigned>(DestAlign.getQuantity()) 15438 << TRange << Op->getSourceRange(); 15439 } 15440 15441 /// Check whether this array fits the idiom of a size-one tail padded 15442 /// array member of a struct. 15443 /// 15444 /// We avoid emitting out-of-bounds access warnings for such arrays as they are 15445 /// commonly used to emulate flexible arrays in C89 code. 15446 static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size, 15447 const NamedDecl *ND) { 15448 if (Size != 1 || !ND) return false; 15449 15450 const FieldDecl *FD = dyn_cast<FieldDecl>(ND); 15451 if (!FD) return false; 15452 15453 // Don't consider sizes resulting from macro expansions or template argument 15454 // substitution to form C89 tail-padded arrays. 15455 15456 TypeSourceInfo *TInfo = FD->getTypeSourceInfo(); 15457 while (TInfo) { 15458 TypeLoc TL = TInfo->getTypeLoc(); 15459 // Look through typedefs. 15460 if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) { 15461 const TypedefNameDecl *TDL = TTL.getTypedefNameDecl(); 15462 TInfo = TDL->getTypeSourceInfo(); 15463 continue; 15464 } 15465 if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) { 15466 const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr()); 15467 if (!SizeExpr || SizeExpr->getExprLoc().isMacroID()) 15468 return false; 15469 } 15470 break; 15471 } 15472 15473 const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext()); 15474 if (!RD) return false; 15475 if (RD->isUnion()) return false; 15476 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 15477 if (!CRD->isStandardLayout()) return false; 15478 } 15479 15480 // See if this is the last field decl in the record. 15481 const Decl *D = FD; 15482 while ((D = D->getNextDeclInContext())) 15483 if (isa<FieldDecl>(D)) 15484 return false; 15485 return true; 15486 } 15487 15488 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 15489 const ArraySubscriptExpr *ASE, 15490 bool AllowOnePastEnd, bool IndexNegated) { 15491 // Already diagnosed by the constant evaluator. 15492 if (isConstantEvaluated()) 15493 return; 15494 15495 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 15496 if (IndexExpr->isValueDependent()) 15497 return; 15498 15499 const Type *EffectiveType = 15500 BaseExpr->getType()->getPointeeOrArrayElementType(); 15501 BaseExpr = BaseExpr->IgnoreParenCasts(); 15502 const ConstantArrayType *ArrayTy = 15503 Context.getAsConstantArrayType(BaseExpr->getType()); 15504 15505 const Type *BaseType = 15506 ArrayTy == nullptr ? nullptr : ArrayTy->getElementType().getTypePtr(); 15507 bool IsUnboundedArray = (BaseType == nullptr); 15508 if (EffectiveType->isDependentType() || 15509 (!IsUnboundedArray && BaseType->isDependentType())) 15510 return; 15511 15512 Expr::EvalResult Result; 15513 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) 15514 return; 15515 15516 llvm::APSInt index = Result.Val.getInt(); 15517 if (IndexNegated) { 15518 index.setIsUnsigned(false); 15519 index = -index; 15520 } 15521 15522 const NamedDecl *ND = nullptr; 15523 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15524 ND = DRE->getDecl(); 15525 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 15526 ND = ME->getMemberDecl(); 15527 15528 if (IsUnboundedArray) { 15529 if (EffectiveType->isFunctionType()) 15530 return; 15531 if (index.isUnsigned() || !index.isNegative()) { 15532 const auto &ASTC = getASTContext(); 15533 unsigned AddrBits = 15534 ASTC.getTargetInfo().getPointerWidth(ASTC.getTargetAddressSpace( 15535 EffectiveType->getCanonicalTypeInternal())); 15536 if (index.getBitWidth() < AddrBits) 15537 index = index.zext(AddrBits); 15538 Optional<CharUnits> ElemCharUnits = 15539 ASTC.getTypeSizeInCharsIfKnown(EffectiveType); 15540 // PR50741 - If EffectiveType has unknown size (e.g., if it's a void 15541 // pointer) bounds-checking isn't meaningful. 15542 if (!ElemCharUnits) 15543 return; 15544 llvm::APInt ElemBytes(index.getBitWidth(), ElemCharUnits->getQuantity()); 15545 // If index has more active bits than address space, we already know 15546 // we have a bounds violation to warn about. Otherwise, compute 15547 // address of (index + 1)th element, and warn about bounds violation 15548 // only if that address exceeds address space. 15549 if (index.getActiveBits() <= AddrBits) { 15550 bool Overflow; 15551 llvm::APInt Product(index); 15552 Product += 1; 15553 Product = Product.umul_ov(ElemBytes, Overflow); 15554 if (!Overflow && Product.getActiveBits() <= AddrBits) 15555 return; 15556 } 15557 15558 // Need to compute max possible elements in address space, since that 15559 // is included in diag message. 15560 llvm::APInt MaxElems = llvm::APInt::getMaxValue(AddrBits); 15561 MaxElems = MaxElems.zext(std::max(AddrBits + 1, ElemBytes.getBitWidth())); 15562 MaxElems += 1; 15563 ElemBytes = ElemBytes.zextOrTrunc(MaxElems.getBitWidth()); 15564 MaxElems = MaxElems.udiv(ElemBytes); 15565 15566 unsigned DiagID = 15567 ASE ? diag::warn_array_index_exceeds_max_addressable_bounds 15568 : diag::warn_ptr_arith_exceeds_max_addressable_bounds; 15569 15570 // Diag message shows element size in bits and in "bytes" (platform- 15571 // dependent CharUnits) 15572 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15573 PDiag(DiagID) 15574 << toString(index, 10, true) << AddrBits 15575 << (unsigned)ASTC.toBits(*ElemCharUnits) 15576 << toString(ElemBytes, 10, false) 15577 << toString(MaxElems, 10, false) 15578 << (unsigned)MaxElems.getLimitedValue(~0U) 15579 << IndexExpr->getSourceRange()); 15580 15581 if (!ND) { 15582 // Try harder to find a NamedDecl to point at in the note. 15583 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 15584 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 15585 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15586 ND = DRE->getDecl(); 15587 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 15588 ND = ME->getMemberDecl(); 15589 } 15590 15591 if (ND) 15592 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 15593 PDiag(diag::note_array_declared_here) << ND); 15594 } 15595 return; 15596 } 15597 15598 if (index.isUnsigned() || !index.isNegative()) { 15599 // It is possible that the type of the base expression after 15600 // IgnoreParenCasts is incomplete, even though the type of the base 15601 // expression before IgnoreParenCasts is complete (see PR39746 for an 15602 // example). In this case we have no information about whether the array 15603 // access exceeds the array bounds. However we can still diagnose an array 15604 // access which precedes the array bounds. 15605 if (BaseType->isIncompleteType()) 15606 return; 15607 15608 llvm::APInt size = ArrayTy->getSize(); 15609 if (!size.isStrictlyPositive()) 15610 return; 15611 15612 if (BaseType != EffectiveType) { 15613 // Make sure we're comparing apples to apples when comparing index to size 15614 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 15615 uint64_t array_typesize = Context.getTypeSize(BaseType); 15616 // Handle ptrarith_typesize being zero, such as when casting to void* 15617 if (!ptrarith_typesize) ptrarith_typesize = 1; 15618 if (ptrarith_typesize != array_typesize) { 15619 // There's a cast to a different size type involved 15620 uint64_t ratio = array_typesize / ptrarith_typesize; 15621 // TODO: Be smarter about handling cases where array_typesize is not a 15622 // multiple of ptrarith_typesize 15623 if (ptrarith_typesize * ratio == array_typesize) 15624 size *= llvm::APInt(size.getBitWidth(), ratio); 15625 } 15626 } 15627 15628 if (size.getBitWidth() > index.getBitWidth()) 15629 index = index.zext(size.getBitWidth()); 15630 else if (size.getBitWidth() < index.getBitWidth()) 15631 size = size.zext(index.getBitWidth()); 15632 15633 // For array subscripting the index must be less than size, but for pointer 15634 // arithmetic also allow the index (offset) to be equal to size since 15635 // computing the next address after the end of the array is legal and 15636 // commonly done e.g. in C++ iterators and range-based for loops. 15637 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 15638 return; 15639 15640 // Also don't warn for arrays of size 1 which are members of some 15641 // structure. These are often used to approximate flexible arrays in C89 15642 // code. 15643 if (IsTailPaddedMemberArray(*this, size, ND)) 15644 return; 15645 15646 // Suppress the warning if the subscript expression (as identified by the 15647 // ']' location) and the index expression are both from macro expansions 15648 // within a system header. 15649 if (ASE) { 15650 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 15651 ASE->getRBracketLoc()); 15652 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 15653 SourceLocation IndexLoc = 15654 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 15655 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 15656 return; 15657 } 15658 } 15659 15660 unsigned DiagID = ASE ? diag::warn_array_index_exceeds_bounds 15661 : diag::warn_ptr_arith_exceeds_bounds; 15662 15663 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15664 PDiag(DiagID) << toString(index, 10, true) 15665 << toString(size, 10, true) 15666 << (unsigned)size.getLimitedValue(~0U) 15667 << IndexExpr->getSourceRange()); 15668 } else { 15669 unsigned DiagID = diag::warn_array_index_precedes_bounds; 15670 if (!ASE) { 15671 DiagID = diag::warn_ptr_arith_precedes_bounds; 15672 if (index.isNegative()) index = -index; 15673 } 15674 15675 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15676 PDiag(DiagID) << toString(index, 10, true) 15677 << IndexExpr->getSourceRange()); 15678 } 15679 15680 if (!ND) { 15681 // Try harder to find a NamedDecl to point at in the note. 15682 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 15683 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 15684 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15685 ND = DRE->getDecl(); 15686 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 15687 ND = ME->getMemberDecl(); 15688 } 15689 15690 if (ND) 15691 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 15692 PDiag(diag::note_array_declared_here) << ND); 15693 } 15694 15695 void Sema::CheckArrayAccess(const Expr *expr) { 15696 int AllowOnePastEnd = 0; 15697 while (expr) { 15698 expr = expr->IgnoreParenImpCasts(); 15699 switch (expr->getStmtClass()) { 15700 case Stmt::ArraySubscriptExprClass: { 15701 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 15702 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 15703 AllowOnePastEnd > 0); 15704 expr = ASE->getBase(); 15705 break; 15706 } 15707 case Stmt::MemberExprClass: { 15708 expr = cast<MemberExpr>(expr)->getBase(); 15709 break; 15710 } 15711 case Stmt::OMPArraySectionExprClass: { 15712 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 15713 if (ASE->getLowerBound()) 15714 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 15715 /*ASE=*/nullptr, AllowOnePastEnd > 0); 15716 return; 15717 } 15718 case Stmt::UnaryOperatorClass: { 15719 // Only unwrap the * and & unary operators 15720 const UnaryOperator *UO = cast<UnaryOperator>(expr); 15721 expr = UO->getSubExpr(); 15722 switch (UO->getOpcode()) { 15723 case UO_AddrOf: 15724 AllowOnePastEnd++; 15725 break; 15726 case UO_Deref: 15727 AllowOnePastEnd--; 15728 break; 15729 default: 15730 return; 15731 } 15732 break; 15733 } 15734 case Stmt::ConditionalOperatorClass: { 15735 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 15736 if (const Expr *lhs = cond->getLHS()) 15737 CheckArrayAccess(lhs); 15738 if (const Expr *rhs = cond->getRHS()) 15739 CheckArrayAccess(rhs); 15740 return; 15741 } 15742 case Stmt::CXXOperatorCallExprClass: { 15743 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 15744 for (const auto *Arg : OCE->arguments()) 15745 CheckArrayAccess(Arg); 15746 return; 15747 } 15748 default: 15749 return; 15750 } 15751 } 15752 } 15753 15754 //===--- CHECK: Objective-C retain cycles ----------------------------------// 15755 15756 namespace { 15757 15758 struct RetainCycleOwner { 15759 VarDecl *Variable = nullptr; 15760 SourceRange Range; 15761 SourceLocation Loc; 15762 bool Indirect = false; 15763 15764 RetainCycleOwner() = default; 15765 15766 void setLocsFrom(Expr *e) { 15767 Loc = e->getExprLoc(); 15768 Range = e->getSourceRange(); 15769 } 15770 }; 15771 15772 } // namespace 15773 15774 /// Consider whether capturing the given variable can possibly lead to 15775 /// a retain cycle. 15776 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 15777 // In ARC, it's captured strongly iff the variable has __strong 15778 // lifetime. In MRR, it's captured strongly if the variable is 15779 // __block and has an appropriate type. 15780 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 15781 return false; 15782 15783 owner.Variable = var; 15784 if (ref) 15785 owner.setLocsFrom(ref); 15786 return true; 15787 } 15788 15789 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 15790 while (true) { 15791 e = e->IgnoreParens(); 15792 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 15793 switch (cast->getCastKind()) { 15794 case CK_BitCast: 15795 case CK_LValueBitCast: 15796 case CK_LValueToRValue: 15797 case CK_ARCReclaimReturnedObject: 15798 e = cast->getSubExpr(); 15799 continue; 15800 15801 default: 15802 return false; 15803 } 15804 } 15805 15806 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 15807 ObjCIvarDecl *ivar = ref->getDecl(); 15808 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 15809 return false; 15810 15811 // Try to find a retain cycle in the base. 15812 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 15813 return false; 15814 15815 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 15816 owner.Indirect = true; 15817 return true; 15818 } 15819 15820 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 15821 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 15822 if (!var) return false; 15823 return considerVariable(var, ref, owner); 15824 } 15825 15826 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 15827 if (member->isArrow()) return false; 15828 15829 // Don't count this as an indirect ownership. 15830 e = member->getBase(); 15831 continue; 15832 } 15833 15834 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 15835 // Only pay attention to pseudo-objects on property references. 15836 ObjCPropertyRefExpr *pre 15837 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 15838 ->IgnoreParens()); 15839 if (!pre) return false; 15840 if (pre->isImplicitProperty()) return false; 15841 ObjCPropertyDecl *property = pre->getExplicitProperty(); 15842 if (!property->isRetaining() && 15843 !(property->getPropertyIvarDecl() && 15844 property->getPropertyIvarDecl()->getType() 15845 .getObjCLifetime() == Qualifiers::OCL_Strong)) 15846 return false; 15847 15848 owner.Indirect = true; 15849 if (pre->isSuperReceiver()) { 15850 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 15851 if (!owner.Variable) 15852 return false; 15853 owner.Loc = pre->getLocation(); 15854 owner.Range = pre->getSourceRange(); 15855 return true; 15856 } 15857 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 15858 ->getSourceExpr()); 15859 continue; 15860 } 15861 15862 // Array ivars? 15863 15864 return false; 15865 } 15866 } 15867 15868 namespace { 15869 15870 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 15871 ASTContext &Context; 15872 VarDecl *Variable; 15873 Expr *Capturer = nullptr; 15874 bool VarWillBeReased = false; 15875 15876 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 15877 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 15878 Context(Context), Variable(variable) {} 15879 15880 void VisitDeclRefExpr(DeclRefExpr *ref) { 15881 if (ref->getDecl() == Variable && !Capturer) 15882 Capturer = ref; 15883 } 15884 15885 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 15886 if (Capturer) return; 15887 Visit(ref->getBase()); 15888 if (Capturer && ref->isFreeIvar()) 15889 Capturer = ref; 15890 } 15891 15892 void VisitBlockExpr(BlockExpr *block) { 15893 // Look inside nested blocks 15894 if (block->getBlockDecl()->capturesVariable(Variable)) 15895 Visit(block->getBlockDecl()->getBody()); 15896 } 15897 15898 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 15899 if (Capturer) return; 15900 if (OVE->getSourceExpr()) 15901 Visit(OVE->getSourceExpr()); 15902 } 15903 15904 void VisitBinaryOperator(BinaryOperator *BinOp) { 15905 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 15906 return; 15907 Expr *LHS = BinOp->getLHS(); 15908 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 15909 if (DRE->getDecl() != Variable) 15910 return; 15911 if (Expr *RHS = BinOp->getRHS()) { 15912 RHS = RHS->IgnoreParenCasts(); 15913 Optional<llvm::APSInt> Value; 15914 VarWillBeReased = 15915 (RHS && (Value = RHS->getIntegerConstantExpr(Context)) && 15916 *Value == 0); 15917 } 15918 } 15919 } 15920 }; 15921 15922 } // namespace 15923 15924 /// Check whether the given argument is a block which captures a 15925 /// variable. 15926 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 15927 assert(owner.Variable && owner.Loc.isValid()); 15928 15929 e = e->IgnoreParenCasts(); 15930 15931 // Look through [^{...} copy] and Block_copy(^{...}). 15932 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 15933 Selector Cmd = ME->getSelector(); 15934 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 15935 e = ME->getInstanceReceiver(); 15936 if (!e) 15937 return nullptr; 15938 e = e->IgnoreParenCasts(); 15939 } 15940 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 15941 if (CE->getNumArgs() == 1) { 15942 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 15943 if (Fn) { 15944 const IdentifierInfo *FnI = Fn->getIdentifier(); 15945 if (FnI && FnI->isStr("_Block_copy")) { 15946 e = CE->getArg(0)->IgnoreParenCasts(); 15947 } 15948 } 15949 } 15950 } 15951 15952 BlockExpr *block = dyn_cast<BlockExpr>(e); 15953 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 15954 return nullptr; 15955 15956 FindCaptureVisitor visitor(S.Context, owner.Variable); 15957 visitor.Visit(block->getBlockDecl()->getBody()); 15958 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 15959 } 15960 15961 static void diagnoseRetainCycle(Sema &S, Expr *capturer, 15962 RetainCycleOwner &owner) { 15963 assert(capturer); 15964 assert(owner.Variable && owner.Loc.isValid()); 15965 15966 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 15967 << owner.Variable << capturer->getSourceRange(); 15968 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 15969 << owner.Indirect << owner.Range; 15970 } 15971 15972 /// Check for a keyword selector that starts with the word 'add' or 15973 /// 'set'. 15974 static bool isSetterLikeSelector(Selector sel) { 15975 if (sel.isUnarySelector()) return false; 15976 15977 StringRef str = sel.getNameForSlot(0); 15978 while (!str.empty() && str.front() == '_') str = str.substr(1); 15979 if (str.startswith("set")) 15980 str = str.substr(3); 15981 else if (str.startswith("add")) { 15982 // Specially allow 'addOperationWithBlock:'. 15983 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) 15984 return false; 15985 str = str.substr(3); 15986 } 15987 else 15988 return false; 15989 15990 if (str.empty()) return true; 15991 return !isLowercase(str.front()); 15992 } 15993 15994 static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S, 15995 ObjCMessageExpr *Message) { 15996 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 15997 Message->getReceiverInterface(), 15998 NSAPI::ClassId_NSMutableArray); 15999 if (!IsMutableArray) { 16000 return None; 16001 } 16002 16003 Selector Sel = Message->getSelector(); 16004 16005 Optional<NSAPI::NSArrayMethodKind> MKOpt = 16006 S.NSAPIObj->getNSArrayMethodKind(Sel); 16007 if (!MKOpt) { 16008 return None; 16009 } 16010 16011 NSAPI::NSArrayMethodKind MK = *MKOpt; 16012 16013 switch (MK) { 16014 case NSAPI::NSMutableArr_addObject: 16015 case NSAPI::NSMutableArr_insertObjectAtIndex: 16016 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 16017 return 0; 16018 case NSAPI::NSMutableArr_replaceObjectAtIndex: 16019 return 1; 16020 16021 default: 16022 return None; 16023 } 16024 16025 return None; 16026 } 16027 16028 static 16029 Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S, 16030 ObjCMessageExpr *Message) { 16031 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 16032 Message->getReceiverInterface(), 16033 NSAPI::ClassId_NSMutableDictionary); 16034 if (!IsMutableDictionary) { 16035 return None; 16036 } 16037 16038 Selector Sel = Message->getSelector(); 16039 16040 Optional<NSAPI::NSDictionaryMethodKind> MKOpt = 16041 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 16042 if (!MKOpt) { 16043 return None; 16044 } 16045 16046 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 16047 16048 switch (MK) { 16049 case NSAPI::NSMutableDict_setObjectForKey: 16050 case NSAPI::NSMutableDict_setValueForKey: 16051 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 16052 return 0; 16053 16054 default: 16055 return None; 16056 } 16057 16058 return None; 16059 } 16060 16061 static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 16062 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 16063 Message->getReceiverInterface(), 16064 NSAPI::ClassId_NSMutableSet); 16065 16066 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 16067 Message->getReceiverInterface(), 16068 NSAPI::ClassId_NSMutableOrderedSet); 16069 if (!IsMutableSet && !IsMutableOrderedSet) { 16070 return None; 16071 } 16072 16073 Selector Sel = Message->getSelector(); 16074 16075 Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel); 16076 if (!MKOpt) { 16077 return None; 16078 } 16079 16080 NSAPI::NSSetMethodKind MK = *MKOpt; 16081 16082 switch (MK) { 16083 case NSAPI::NSMutableSet_addObject: 16084 case NSAPI::NSOrderedSet_setObjectAtIndex: 16085 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 16086 case NSAPI::NSOrderedSet_insertObjectAtIndex: 16087 return 0; 16088 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 16089 return 1; 16090 } 16091 16092 return None; 16093 } 16094 16095 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 16096 if (!Message->isInstanceMessage()) { 16097 return; 16098 } 16099 16100 Optional<int> ArgOpt; 16101 16102 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 16103 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 16104 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 16105 return; 16106 } 16107 16108 int ArgIndex = *ArgOpt; 16109 16110 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 16111 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 16112 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 16113 } 16114 16115 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 16116 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 16117 if (ArgRE->isObjCSelfExpr()) { 16118 Diag(Message->getSourceRange().getBegin(), 16119 diag::warn_objc_circular_container) 16120 << ArgRE->getDecl() << StringRef("'super'"); 16121 } 16122 } 16123 } else { 16124 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 16125 16126 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 16127 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 16128 } 16129 16130 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 16131 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 16132 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 16133 ValueDecl *Decl = ReceiverRE->getDecl(); 16134 Diag(Message->getSourceRange().getBegin(), 16135 diag::warn_objc_circular_container) 16136 << Decl << Decl; 16137 if (!ArgRE->isObjCSelfExpr()) { 16138 Diag(Decl->getLocation(), 16139 diag::note_objc_circular_container_declared_here) 16140 << Decl; 16141 } 16142 } 16143 } 16144 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 16145 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 16146 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 16147 ObjCIvarDecl *Decl = IvarRE->getDecl(); 16148 Diag(Message->getSourceRange().getBegin(), 16149 diag::warn_objc_circular_container) 16150 << Decl << Decl; 16151 Diag(Decl->getLocation(), 16152 diag::note_objc_circular_container_declared_here) 16153 << Decl; 16154 } 16155 } 16156 } 16157 } 16158 } 16159 16160 /// Check a message send to see if it's likely to cause a retain cycle. 16161 void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 16162 // Only check instance methods whose selector looks like a setter. 16163 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 16164 return; 16165 16166 // Try to find a variable that the receiver is strongly owned by. 16167 RetainCycleOwner owner; 16168 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 16169 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 16170 return; 16171 } else { 16172 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 16173 owner.Variable = getCurMethodDecl()->getSelfDecl(); 16174 owner.Loc = msg->getSuperLoc(); 16175 owner.Range = msg->getSuperLoc(); 16176 } 16177 16178 // Check whether the receiver is captured by any of the arguments. 16179 const ObjCMethodDecl *MD = msg->getMethodDecl(); 16180 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 16181 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 16182 // noescape blocks should not be retained by the method. 16183 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 16184 continue; 16185 return diagnoseRetainCycle(*this, capturer, owner); 16186 } 16187 } 16188 } 16189 16190 /// Check a property assign to see if it's likely to cause a retain cycle. 16191 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 16192 RetainCycleOwner owner; 16193 if (!findRetainCycleOwner(*this, receiver, owner)) 16194 return; 16195 16196 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 16197 diagnoseRetainCycle(*this, capturer, owner); 16198 } 16199 16200 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 16201 RetainCycleOwner Owner; 16202 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 16203 return; 16204 16205 // Because we don't have an expression for the variable, we have to set the 16206 // location explicitly here. 16207 Owner.Loc = Var->getLocation(); 16208 Owner.Range = Var->getSourceRange(); 16209 16210 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 16211 diagnoseRetainCycle(*this, Capturer, Owner); 16212 } 16213 16214 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 16215 Expr *RHS, bool isProperty) { 16216 // Check if RHS is an Objective-C object literal, which also can get 16217 // immediately zapped in a weak reference. Note that we explicitly 16218 // allow ObjCStringLiterals, since those are designed to never really die. 16219 RHS = RHS->IgnoreParenImpCasts(); 16220 16221 // This enum needs to match with the 'select' in 16222 // warn_objc_arc_literal_assign (off-by-1). 16223 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 16224 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 16225 return false; 16226 16227 S.Diag(Loc, diag::warn_arc_literal_assign) 16228 << (unsigned) Kind 16229 << (isProperty ? 0 : 1) 16230 << RHS->getSourceRange(); 16231 16232 return true; 16233 } 16234 16235 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 16236 Qualifiers::ObjCLifetime LT, 16237 Expr *RHS, bool isProperty) { 16238 // Strip off any implicit cast added to get to the one ARC-specific. 16239 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 16240 if (cast->getCastKind() == CK_ARCConsumeObject) { 16241 S.Diag(Loc, diag::warn_arc_retained_assign) 16242 << (LT == Qualifiers::OCL_ExplicitNone) 16243 << (isProperty ? 0 : 1) 16244 << RHS->getSourceRange(); 16245 return true; 16246 } 16247 RHS = cast->getSubExpr(); 16248 } 16249 16250 if (LT == Qualifiers::OCL_Weak && 16251 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 16252 return true; 16253 16254 return false; 16255 } 16256 16257 bool Sema::checkUnsafeAssigns(SourceLocation Loc, 16258 QualType LHS, Expr *RHS) { 16259 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 16260 16261 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 16262 return false; 16263 16264 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 16265 return true; 16266 16267 return false; 16268 } 16269 16270 void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 16271 Expr *LHS, Expr *RHS) { 16272 QualType LHSType; 16273 // PropertyRef on LHS type need be directly obtained from 16274 // its declaration as it has a PseudoType. 16275 ObjCPropertyRefExpr *PRE 16276 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 16277 if (PRE && !PRE->isImplicitProperty()) { 16278 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 16279 if (PD) 16280 LHSType = PD->getType(); 16281 } 16282 16283 if (LHSType.isNull()) 16284 LHSType = LHS->getType(); 16285 16286 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 16287 16288 if (LT == Qualifiers::OCL_Weak) { 16289 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 16290 getCurFunction()->markSafeWeakUse(LHS); 16291 } 16292 16293 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 16294 return; 16295 16296 // FIXME. Check for other life times. 16297 if (LT != Qualifiers::OCL_None) 16298 return; 16299 16300 if (PRE) { 16301 if (PRE->isImplicitProperty()) 16302 return; 16303 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 16304 if (!PD) 16305 return; 16306 16307 unsigned Attributes = PD->getPropertyAttributes(); 16308 if (Attributes & ObjCPropertyAttribute::kind_assign) { 16309 // when 'assign' attribute was not explicitly specified 16310 // by user, ignore it and rely on property type itself 16311 // for lifetime info. 16312 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 16313 if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) && 16314 LHSType->isObjCRetainableType()) 16315 return; 16316 16317 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 16318 if (cast->getCastKind() == CK_ARCConsumeObject) { 16319 Diag(Loc, diag::warn_arc_retained_property_assign) 16320 << RHS->getSourceRange(); 16321 return; 16322 } 16323 RHS = cast->getSubExpr(); 16324 } 16325 } else if (Attributes & ObjCPropertyAttribute::kind_weak) { 16326 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 16327 return; 16328 } 16329 } 16330 } 16331 16332 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 16333 16334 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 16335 SourceLocation StmtLoc, 16336 const NullStmt *Body) { 16337 // Do not warn if the body is a macro that expands to nothing, e.g: 16338 // 16339 // #define CALL(x) 16340 // if (condition) 16341 // CALL(0); 16342 if (Body->hasLeadingEmptyMacro()) 16343 return false; 16344 16345 // Get line numbers of statement and body. 16346 bool StmtLineInvalid; 16347 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 16348 &StmtLineInvalid); 16349 if (StmtLineInvalid) 16350 return false; 16351 16352 bool BodyLineInvalid; 16353 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 16354 &BodyLineInvalid); 16355 if (BodyLineInvalid) 16356 return false; 16357 16358 // Warn if null statement and body are on the same line. 16359 if (StmtLine != BodyLine) 16360 return false; 16361 16362 return true; 16363 } 16364 16365 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 16366 const Stmt *Body, 16367 unsigned DiagID) { 16368 // Since this is a syntactic check, don't emit diagnostic for template 16369 // instantiations, this just adds noise. 16370 if (CurrentInstantiationScope) 16371 return; 16372 16373 // The body should be a null statement. 16374 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 16375 if (!NBody) 16376 return; 16377 16378 // Do the usual checks. 16379 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 16380 return; 16381 16382 Diag(NBody->getSemiLoc(), DiagID); 16383 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 16384 } 16385 16386 void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 16387 const Stmt *PossibleBody) { 16388 assert(!CurrentInstantiationScope); // Ensured by caller 16389 16390 SourceLocation StmtLoc; 16391 const Stmt *Body; 16392 unsigned DiagID; 16393 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 16394 StmtLoc = FS->getRParenLoc(); 16395 Body = FS->getBody(); 16396 DiagID = diag::warn_empty_for_body; 16397 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 16398 StmtLoc = WS->getCond()->getSourceRange().getEnd(); 16399 Body = WS->getBody(); 16400 DiagID = diag::warn_empty_while_body; 16401 } else 16402 return; // Neither `for' nor `while'. 16403 16404 // The body should be a null statement. 16405 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 16406 if (!NBody) 16407 return; 16408 16409 // Skip expensive checks if diagnostic is disabled. 16410 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 16411 return; 16412 16413 // Do the usual checks. 16414 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 16415 return; 16416 16417 // `for(...);' and `while(...);' are popular idioms, so in order to keep 16418 // noise level low, emit diagnostics only if for/while is followed by a 16419 // CompoundStmt, e.g.: 16420 // for (int i = 0; i < n; i++); 16421 // { 16422 // a(i); 16423 // } 16424 // or if for/while is followed by a statement with more indentation 16425 // than for/while itself: 16426 // for (int i = 0; i < n; i++); 16427 // a(i); 16428 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 16429 if (!ProbableTypo) { 16430 bool BodyColInvalid; 16431 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 16432 PossibleBody->getBeginLoc(), &BodyColInvalid); 16433 if (BodyColInvalid) 16434 return; 16435 16436 bool StmtColInvalid; 16437 unsigned StmtCol = 16438 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 16439 if (StmtColInvalid) 16440 return; 16441 16442 if (BodyCol > StmtCol) 16443 ProbableTypo = true; 16444 } 16445 16446 if (ProbableTypo) { 16447 Diag(NBody->getSemiLoc(), DiagID); 16448 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 16449 } 16450 } 16451 16452 //===--- CHECK: Warn on self move with std::move. -------------------------===// 16453 16454 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 16455 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 16456 SourceLocation OpLoc) { 16457 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 16458 return; 16459 16460 if (inTemplateInstantiation()) 16461 return; 16462 16463 // Strip parens and casts away. 16464 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 16465 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 16466 16467 // Check for a call expression 16468 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 16469 if (!CE || CE->getNumArgs() != 1) 16470 return; 16471 16472 // Check for a call to std::move 16473 if (!CE->isCallToStdMove()) 16474 return; 16475 16476 // Get argument from std::move 16477 RHSExpr = CE->getArg(0); 16478 16479 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 16480 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 16481 16482 // Two DeclRefExpr's, check that the decls are the same. 16483 if (LHSDeclRef && RHSDeclRef) { 16484 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 16485 return; 16486 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 16487 RHSDeclRef->getDecl()->getCanonicalDecl()) 16488 return; 16489 16490 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16491 << LHSExpr->getSourceRange() 16492 << RHSExpr->getSourceRange(); 16493 return; 16494 } 16495 16496 // Member variables require a different approach to check for self moves. 16497 // MemberExpr's are the same if every nested MemberExpr refers to the same 16498 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 16499 // the base Expr's are CXXThisExpr's. 16500 const Expr *LHSBase = LHSExpr; 16501 const Expr *RHSBase = RHSExpr; 16502 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 16503 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 16504 if (!LHSME || !RHSME) 16505 return; 16506 16507 while (LHSME && RHSME) { 16508 if (LHSME->getMemberDecl()->getCanonicalDecl() != 16509 RHSME->getMemberDecl()->getCanonicalDecl()) 16510 return; 16511 16512 LHSBase = LHSME->getBase(); 16513 RHSBase = RHSME->getBase(); 16514 LHSME = dyn_cast<MemberExpr>(LHSBase); 16515 RHSME = dyn_cast<MemberExpr>(RHSBase); 16516 } 16517 16518 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 16519 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 16520 if (LHSDeclRef && RHSDeclRef) { 16521 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 16522 return; 16523 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 16524 RHSDeclRef->getDecl()->getCanonicalDecl()) 16525 return; 16526 16527 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16528 << LHSExpr->getSourceRange() 16529 << RHSExpr->getSourceRange(); 16530 return; 16531 } 16532 16533 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 16534 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16535 << LHSExpr->getSourceRange() 16536 << RHSExpr->getSourceRange(); 16537 } 16538 16539 //===--- Layout compatibility ----------------------------------------------// 16540 16541 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 16542 16543 /// Check if two enumeration types are layout-compatible. 16544 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 16545 // C++11 [dcl.enum] p8: 16546 // Two enumeration types are layout-compatible if they have the same 16547 // underlying type. 16548 return ED1->isComplete() && ED2->isComplete() && 16549 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 16550 } 16551 16552 /// Check if two fields are layout-compatible. 16553 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 16554 FieldDecl *Field2) { 16555 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 16556 return false; 16557 16558 if (Field1->isBitField() != Field2->isBitField()) 16559 return false; 16560 16561 if (Field1->isBitField()) { 16562 // Make sure that the bit-fields are the same length. 16563 unsigned Bits1 = Field1->getBitWidthValue(C); 16564 unsigned Bits2 = Field2->getBitWidthValue(C); 16565 16566 if (Bits1 != Bits2) 16567 return false; 16568 } 16569 16570 return true; 16571 } 16572 16573 /// Check if two standard-layout structs are layout-compatible. 16574 /// (C++11 [class.mem] p17) 16575 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 16576 RecordDecl *RD2) { 16577 // If both records are C++ classes, check that base classes match. 16578 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 16579 // If one of records is a CXXRecordDecl we are in C++ mode, 16580 // thus the other one is a CXXRecordDecl, too. 16581 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 16582 // Check number of base classes. 16583 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 16584 return false; 16585 16586 // Check the base classes. 16587 for (CXXRecordDecl::base_class_const_iterator 16588 Base1 = D1CXX->bases_begin(), 16589 BaseEnd1 = D1CXX->bases_end(), 16590 Base2 = D2CXX->bases_begin(); 16591 Base1 != BaseEnd1; 16592 ++Base1, ++Base2) { 16593 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 16594 return false; 16595 } 16596 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 16597 // If only RD2 is a C++ class, it should have zero base classes. 16598 if (D2CXX->getNumBases() > 0) 16599 return false; 16600 } 16601 16602 // Check the fields. 16603 RecordDecl::field_iterator Field2 = RD2->field_begin(), 16604 Field2End = RD2->field_end(), 16605 Field1 = RD1->field_begin(), 16606 Field1End = RD1->field_end(); 16607 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 16608 if (!isLayoutCompatible(C, *Field1, *Field2)) 16609 return false; 16610 } 16611 if (Field1 != Field1End || Field2 != Field2End) 16612 return false; 16613 16614 return true; 16615 } 16616 16617 /// Check if two standard-layout unions are layout-compatible. 16618 /// (C++11 [class.mem] p18) 16619 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 16620 RecordDecl *RD2) { 16621 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 16622 for (auto *Field2 : RD2->fields()) 16623 UnmatchedFields.insert(Field2); 16624 16625 for (auto *Field1 : RD1->fields()) { 16626 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 16627 I = UnmatchedFields.begin(), 16628 E = UnmatchedFields.end(); 16629 16630 for ( ; I != E; ++I) { 16631 if (isLayoutCompatible(C, Field1, *I)) { 16632 bool Result = UnmatchedFields.erase(*I); 16633 (void) Result; 16634 assert(Result); 16635 break; 16636 } 16637 } 16638 if (I == E) 16639 return false; 16640 } 16641 16642 return UnmatchedFields.empty(); 16643 } 16644 16645 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 16646 RecordDecl *RD2) { 16647 if (RD1->isUnion() != RD2->isUnion()) 16648 return false; 16649 16650 if (RD1->isUnion()) 16651 return isLayoutCompatibleUnion(C, RD1, RD2); 16652 else 16653 return isLayoutCompatibleStruct(C, RD1, RD2); 16654 } 16655 16656 /// Check if two types are layout-compatible in C++11 sense. 16657 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 16658 if (T1.isNull() || T2.isNull()) 16659 return false; 16660 16661 // C++11 [basic.types] p11: 16662 // If two types T1 and T2 are the same type, then T1 and T2 are 16663 // layout-compatible types. 16664 if (C.hasSameType(T1, T2)) 16665 return true; 16666 16667 T1 = T1.getCanonicalType().getUnqualifiedType(); 16668 T2 = T2.getCanonicalType().getUnqualifiedType(); 16669 16670 const Type::TypeClass TC1 = T1->getTypeClass(); 16671 const Type::TypeClass TC2 = T2->getTypeClass(); 16672 16673 if (TC1 != TC2) 16674 return false; 16675 16676 if (TC1 == Type::Enum) { 16677 return isLayoutCompatible(C, 16678 cast<EnumType>(T1)->getDecl(), 16679 cast<EnumType>(T2)->getDecl()); 16680 } else if (TC1 == Type::Record) { 16681 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 16682 return false; 16683 16684 return isLayoutCompatible(C, 16685 cast<RecordType>(T1)->getDecl(), 16686 cast<RecordType>(T2)->getDecl()); 16687 } 16688 16689 return false; 16690 } 16691 16692 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 16693 16694 /// Given a type tag expression find the type tag itself. 16695 /// 16696 /// \param TypeExpr Type tag expression, as it appears in user's code. 16697 /// 16698 /// \param VD Declaration of an identifier that appears in a type tag. 16699 /// 16700 /// \param MagicValue Type tag magic value. 16701 /// 16702 /// \param isConstantEvaluated whether the evalaution should be performed in 16703 16704 /// constant context. 16705 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 16706 const ValueDecl **VD, uint64_t *MagicValue, 16707 bool isConstantEvaluated) { 16708 while(true) { 16709 if (!TypeExpr) 16710 return false; 16711 16712 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 16713 16714 switch (TypeExpr->getStmtClass()) { 16715 case Stmt::UnaryOperatorClass: { 16716 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 16717 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 16718 TypeExpr = UO->getSubExpr(); 16719 continue; 16720 } 16721 return false; 16722 } 16723 16724 case Stmt::DeclRefExprClass: { 16725 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 16726 *VD = DRE->getDecl(); 16727 return true; 16728 } 16729 16730 case Stmt::IntegerLiteralClass: { 16731 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 16732 llvm::APInt MagicValueAPInt = IL->getValue(); 16733 if (MagicValueAPInt.getActiveBits() <= 64) { 16734 *MagicValue = MagicValueAPInt.getZExtValue(); 16735 return true; 16736 } else 16737 return false; 16738 } 16739 16740 case Stmt::BinaryConditionalOperatorClass: 16741 case Stmt::ConditionalOperatorClass: { 16742 const AbstractConditionalOperator *ACO = 16743 cast<AbstractConditionalOperator>(TypeExpr); 16744 bool Result; 16745 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx, 16746 isConstantEvaluated)) { 16747 if (Result) 16748 TypeExpr = ACO->getTrueExpr(); 16749 else 16750 TypeExpr = ACO->getFalseExpr(); 16751 continue; 16752 } 16753 return false; 16754 } 16755 16756 case Stmt::BinaryOperatorClass: { 16757 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 16758 if (BO->getOpcode() == BO_Comma) { 16759 TypeExpr = BO->getRHS(); 16760 continue; 16761 } 16762 return false; 16763 } 16764 16765 default: 16766 return false; 16767 } 16768 } 16769 } 16770 16771 /// Retrieve the C type corresponding to type tag TypeExpr. 16772 /// 16773 /// \param TypeExpr Expression that specifies a type tag. 16774 /// 16775 /// \param MagicValues Registered magic values. 16776 /// 16777 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 16778 /// kind. 16779 /// 16780 /// \param TypeInfo Information about the corresponding C type. 16781 /// 16782 /// \param isConstantEvaluated whether the evalaution should be performed in 16783 /// constant context. 16784 /// 16785 /// \returns true if the corresponding C type was found. 16786 static bool GetMatchingCType( 16787 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr, 16788 const ASTContext &Ctx, 16789 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData> 16790 *MagicValues, 16791 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo, 16792 bool isConstantEvaluated) { 16793 FoundWrongKind = false; 16794 16795 // Variable declaration that has type_tag_for_datatype attribute. 16796 const ValueDecl *VD = nullptr; 16797 16798 uint64_t MagicValue; 16799 16800 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated)) 16801 return false; 16802 16803 if (VD) { 16804 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 16805 if (I->getArgumentKind() != ArgumentKind) { 16806 FoundWrongKind = true; 16807 return false; 16808 } 16809 TypeInfo.Type = I->getMatchingCType(); 16810 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 16811 TypeInfo.MustBeNull = I->getMustBeNull(); 16812 return true; 16813 } 16814 return false; 16815 } 16816 16817 if (!MagicValues) 16818 return false; 16819 16820 llvm::DenseMap<Sema::TypeTagMagicValue, 16821 Sema::TypeTagData>::const_iterator I = 16822 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 16823 if (I == MagicValues->end()) 16824 return false; 16825 16826 TypeInfo = I->second; 16827 return true; 16828 } 16829 16830 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 16831 uint64_t MagicValue, QualType Type, 16832 bool LayoutCompatible, 16833 bool MustBeNull) { 16834 if (!TypeTagForDatatypeMagicValues) 16835 TypeTagForDatatypeMagicValues.reset( 16836 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 16837 16838 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 16839 (*TypeTagForDatatypeMagicValues)[Magic] = 16840 TypeTagData(Type, LayoutCompatible, MustBeNull); 16841 } 16842 16843 static bool IsSameCharType(QualType T1, QualType T2) { 16844 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 16845 if (!BT1) 16846 return false; 16847 16848 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 16849 if (!BT2) 16850 return false; 16851 16852 BuiltinType::Kind T1Kind = BT1->getKind(); 16853 BuiltinType::Kind T2Kind = BT2->getKind(); 16854 16855 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 16856 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 16857 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 16858 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 16859 } 16860 16861 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 16862 const ArrayRef<const Expr *> ExprArgs, 16863 SourceLocation CallSiteLoc) { 16864 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 16865 bool IsPointerAttr = Attr->getIsPointer(); 16866 16867 // Retrieve the argument representing the 'type_tag'. 16868 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 16869 if (TypeTagIdxAST >= ExprArgs.size()) { 16870 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 16871 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 16872 return; 16873 } 16874 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 16875 bool FoundWrongKind; 16876 TypeTagData TypeInfo; 16877 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 16878 TypeTagForDatatypeMagicValues.get(), FoundWrongKind, 16879 TypeInfo, isConstantEvaluated())) { 16880 if (FoundWrongKind) 16881 Diag(TypeTagExpr->getExprLoc(), 16882 diag::warn_type_tag_for_datatype_wrong_kind) 16883 << TypeTagExpr->getSourceRange(); 16884 return; 16885 } 16886 16887 // Retrieve the argument representing the 'arg_idx'. 16888 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 16889 if (ArgumentIdxAST >= ExprArgs.size()) { 16890 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 16891 << 1 << Attr->getArgumentIdx().getSourceIndex(); 16892 return; 16893 } 16894 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 16895 if (IsPointerAttr) { 16896 // Skip implicit cast of pointer to `void *' (as a function argument). 16897 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 16898 if (ICE->getType()->isVoidPointerType() && 16899 ICE->getCastKind() == CK_BitCast) 16900 ArgumentExpr = ICE->getSubExpr(); 16901 } 16902 QualType ArgumentType = ArgumentExpr->getType(); 16903 16904 // Passing a `void*' pointer shouldn't trigger a warning. 16905 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 16906 return; 16907 16908 if (TypeInfo.MustBeNull) { 16909 // Type tag with matching void type requires a null pointer. 16910 if (!ArgumentExpr->isNullPointerConstant(Context, 16911 Expr::NPC_ValueDependentIsNotNull)) { 16912 Diag(ArgumentExpr->getExprLoc(), 16913 diag::warn_type_safety_null_pointer_required) 16914 << ArgumentKind->getName() 16915 << ArgumentExpr->getSourceRange() 16916 << TypeTagExpr->getSourceRange(); 16917 } 16918 return; 16919 } 16920 16921 QualType RequiredType = TypeInfo.Type; 16922 if (IsPointerAttr) 16923 RequiredType = Context.getPointerType(RequiredType); 16924 16925 bool mismatch = false; 16926 if (!TypeInfo.LayoutCompatible) { 16927 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 16928 16929 // C++11 [basic.fundamental] p1: 16930 // Plain char, signed char, and unsigned char are three distinct types. 16931 // 16932 // But we treat plain `char' as equivalent to `signed char' or `unsigned 16933 // char' depending on the current char signedness mode. 16934 if (mismatch) 16935 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 16936 RequiredType->getPointeeType())) || 16937 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 16938 mismatch = false; 16939 } else 16940 if (IsPointerAttr) 16941 mismatch = !isLayoutCompatible(Context, 16942 ArgumentType->getPointeeType(), 16943 RequiredType->getPointeeType()); 16944 else 16945 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 16946 16947 if (mismatch) 16948 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 16949 << ArgumentType << ArgumentKind 16950 << TypeInfo.LayoutCompatible << RequiredType 16951 << ArgumentExpr->getSourceRange() 16952 << TypeTagExpr->getSourceRange(); 16953 } 16954 16955 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 16956 CharUnits Alignment) { 16957 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 16958 } 16959 16960 void Sema::DiagnoseMisalignedMembers() { 16961 for (MisalignedMember &m : MisalignedMembers) { 16962 const NamedDecl *ND = m.RD; 16963 if (ND->getName().empty()) { 16964 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 16965 ND = TD; 16966 } 16967 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 16968 << m.MD << ND << m.E->getSourceRange(); 16969 } 16970 MisalignedMembers.clear(); 16971 } 16972 16973 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 16974 E = E->IgnoreParens(); 16975 if (!T->isPointerType() && !T->isIntegerType()) 16976 return; 16977 if (isa<UnaryOperator>(E) && 16978 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 16979 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 16980 if (isa<MemberExpr>(Op)) { 16981 auto MA = llvm::find(MisalignedMembers, MisalignedMember(Op)); 16982 if (MA != MisalignedMembers.end() && 16983 (T->isIntegerType() || 16984 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 16985 Context.getTypeAlignInChars( 16986 T->getPointeeType()) <= MA->Alignment)))) 16987 MisalignedMembers.erase(MA); 16988 } 16989 } 16990 } 16991 16992 void Sema::RefersToMemberWithReducedAlignment( 16993 Expr *E, 16994 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 16995 Action) { 16996 const auto *ME = dyn_cast<MemberExpr>(E); 16997 if (!ME) 16998 return; 16999 17000 // No need to check expressions with an __unaligned-qualified type. 17001 if (E->getType().getQualifiers().hasUnaligned()) 17002 return; 17003 17004 // For a chain of MemberExpr like "a.b.c.d" this list 17005 // will keep FieldDecl's like [d, c, b]. 17006 SmallVector<FieldDecl *, 4> ReverseMemberChain; 17007 const MemberExpr *TopME = nullptr; 17008 bool AnyIsPacked = false; 17009 do { 17010 QualType BaseType = ME->getBase()->getType(); 17011 if (BaseType->isDependentType()) 17012 return; 17013 if (ME->isArrow()) 17014 BaseType = BaseType->getPointeeType(); 17015 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl(); 17016 if (RD->isInvalidDecl()) 17017 return; 17018 17019 ValueDecl *MD = ME->getMemberDecl(); 17020 auto *FD = dyn_cast<FieldDecl>(MD); 17021 // We do not care about non-data members. 17022 if (!FD || FD->isInvalidDecl()) 17023 return; 17024 17025 AnyIsPacked = 17026 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 17027 ReverseMemberChain.push_back(FD); 17028 17029 TopME = ME; 17030 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 17031 } while (ME); 17032 assert(TopME && "We did not compute a topmost MemberExpr!"); 17033 17034 // Not the scope of this diagnostic. 17035 if (!AnyIsPacked) 17036 return; 17037 17038 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 17039 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 17040 // TODO: The innermost base of the member expression may be too complicated. 17041 // For now, just disregard these cases. This is left for future 17042 // improvement. 17043 if (!DRE && !isa<CXXThisExpr>(TopBase)) 17044 return; 17045 17046 // Alignment expected by the whole expression. 17047 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 17048 17049 // No need to do anything else with this case. 17050 if (ExpectedAlignment.isOne()) 17051 return; 17052 17053 // Synthesize offset of the whole access. 17054 CharUnits Offset; 17055 for (const FieldDecl *FD : llvm::reverse(ReverseMemberChain)) 17056 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(FD)); 17057 17058 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 17059 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 17060 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 17061 17062 // The base expression of the innermost MemberExpr may give 17063 // stronger guarantees than the class containing the member. 17064 if (DRE && !TopME->isArrow()) { 17065 const ValueDecl *VD = DRE->getDecl(); 17066 if (!VD->getType()->isReferenceType()) 17067 CompleteObjectAlignment = 17068 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 17069 } 17070 17071 // Check if the synthesized offset fulfills the alignment. 17072 if (Offset % ExpectedAlignment != 0 || 17073 // It may fulfill the offset it but the effective alignment may still be 17074 // lower than the expected expression alignment. 17075 CompleteObjectAlignment < ExpectedAlignment) { 17076 // If this happens, we want to determine a sensible culprit of this. 17077 // Intuitively, watching the chain of member expressions from right to 17078 // left, we start with the required alignment (as required by the field 17079 // type) but some packed attribute in that chain has reduced the alignment. 17080 // It may happen that another packed structure increases it again. But if 17081 // we are here such increase has not been enough. So pointing the first 17082 // FieldDecl that either is packed or else its RecordDecl is, 17083 // seems reasonable. 17084 FieldDecl *FD = nullptr; 17085 CharUnits Alignment; 17086 for (FieldDecl *FDI : ReverseMemberChain) { 17087 if (FDI->hasAttr<PackedAttr>() || 17088 FDI->getParent()->hasAttr<PackedAttr>()) { 17089 FD = FDI; 17090 Alignment = std::min( 17091 Context.getTypeAlignInChars(FD->getType()), 17092 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 17093 break; 17094 } 17095 } 17096 assert(FD && "We did not find a packed FieldDecl!"); 17097 Action(E, FD->getParent(), FD, Alignment); 17098 } 17099 } 17100 17101 void Sema::CheckAddressOfPackedMember(Expr *rhs) { 17102 using namespace std::placeholders; 17103 17104 RefersToMemberWithReducedAlignment( 17105 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 17106 _2, _3, _4)); 17107 } 17108 17109 // Check if \p Ty is a valid type for the elementwise math builtins. If it is 17110 // not a valid type, emit an error message and return true. Otherwise return 17111 // false. 17112 static bool checkMathBuiltinElementType(Sema &S, SourceLocation Loc, 17113 QualType Ty) { 17114 if (!Ty->getAs<VectorType>() && !ConstantMatrixType::isValidElementType(Ty)) { 17115 S.Diag(Loc, diag::err_builtin_invalid_arg_type) 17116 << 1 << /* vector, integer or float ty*/ 0 << Ty; 17117 return true; 17118 } 17119 return false; 17120 } 17121 17122 bool Sema::PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall) { 17123 if (checkArgCount(*this, TheCall, 1)) 17124 return true; 17125 17126 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 17127 if (A.isInvalid()) 17128 return true; 17129 17130 TheCall->setArg(0, A.get()); 17131 QualType TyA = A.get()->getType(); 17132 17133 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 17134 return true; 17135 17136 TheCall->setType(TyA); 17137 return false; 17138 } 17139 17140 bool Sema::SemaBuiltinElementwiseMath(CallExpr *TheCall) { 17141 if (checkArgCount(*this, TheCall, 2)) 17142 return true; 17143 17144 ExprResult A = TheCall->getArg(0); 17145 ExprResult B = TheCall->getArg(1); 17146 // Do standard promotions between the two arguments, returning their common 17147 // type. 17148 QualType Res = 17149 UsualArithmeticConversions(A, B, TheCall->getExprLoc(), ACK_Comparison); 17150 if (A.isInvalid() || B.isInvalid()) 17151 return true; 17152 17153 QualType TyA = A.get()->getType(); 17154 QualType TyB = B.get()->getType(); 17155 17156 if (Res.isNull() || TyA.getCanonicalType() != TyB.getCanonicalType()) 17157 return Diag(A.get()->getBeginLoc(), 17158 diag::err_typecheck_call_different_arg_types) 17159 << TyA << TyB; 17160 17161 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 17162 return true; 17163 17164 TheCall->setArg(0, A.get()); 17165 TheCall->setArg(1, B.get()); 17166 TheCall->setType(Res); 17167 return false; 17168 } 17169 17170 bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall) { 17171 if (checkArgCount(*this, TheCall, 1)) 17172 return true; 17173 17174 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 17175 if (A.isInvalid()) 17176 return true; 17177 17178 TheCall->setArg(0, A.get()); 17179 return false; 17180 } 17181 17182 ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall, 17183 ExprResult CallResult) { 17184 if (checkArgCount(*this, TheCall, 1)) 17185 return ExprError(); 17186 17187 ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0)); 17188 if (MatrixArg.isInvalid()) 17189 return MatrixArg; 17190 Expr *Matrix = MatrixArg.get(); 17191 17192 auto *MType = Matrix->getType()->getAs<ConstantMatrixType>(); 17193 if (!MType) { 17194 Diag(Matrix->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17195 << 1 << /* matrix ty*/ 1 << Matrix->getType(); 17196 return ExprError(); 17197 } 17198 17199 // Create returned matrix type by swapping rows and columns of the argument 17200 // matrix type. 17201 QualType ResultType = Context.getConstantMatrixType( 17202 MType->getElementType(), MType->getNumColumns(), MType->getNumRows()); 17203 17204 // Change the return type to the type of the returned matrix. 17205 TheCall->setType(ResultType); 17206 17207 // Update call argument to use the possibly converted matrix argument. 17208 TheCall->setArg(0, Matrix); 17209 return CallResult; 17210 } 17211 17212 // Get and verify the matrix dimensions. 17213 static llvm::Optional<unsigned> 17214 getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) { 17215 SourceLocation ErrorPos; 17216 Optional<llvm::APSInt> Value = 17217 Expr->getIntegerConstantExpr(S.Context, &ErrorPos); 17218 if (!Value) { 17219 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg) 17220 << Name; 17221 return {}; 17222 } 17223 uint64_t Dim = Value->getZExtValue(); 17224 if (!ConstantMatrixType::isDimensionValid(Dim)) { 17225 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension) 17226 << Name << ConstantMatrixType::getMaxElementsPerDimension(); 17227 return {}; 17228 } 17229 return Dim; 17230 } 17231 17232 ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, 17233 ExprResult CallResult) { 17234 if (!getLangOpts().MatrixTypes) { 17235 Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled); 17236 return ExprError(); 17237 } 17238 17239 if (checkArgCount(*this, TheCall, 4)) 17240 return ExprError(); 17241 17242 unsigned PtrArgIdx = 0; 17243 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 17244 Expr *RowsExpr = TheCall->getArg(1); 17245 Expr *ColumnsExpr = TheCall->getArg(2); 17246 Expr *StrideExpr = TheCall->getArg(3); 17247 17248 bool ArgError = false; 17249 17250 // Check pointer argument. 17251 { 17252 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 17253 if (PtrConv.isInvalid()) 17254 return PtrConv; 17255 PtrExpr = PtrConv.get(); 17256 TheCall->setArg(0, PtrExpr); 17257 if (PtrExpr->isTypeDependent()) { 17258 TheCall->setType(Context.DependentTy); 17259 return TheCall; 17260 } 17261 } 17262 17263 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 17264 QualType ElementTy; 17265 if (!PtrTy) { 17266 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17267 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 17268 ArgError = true; 17269 } else { 17270 ElementTy = PtrTy->getPointeeType().getUnqualifiedType(); 17271 17272 if (!ConstantMatrixType::isValidElementType(ElementTy)) { 17273 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17274 << PtrArgIdx + 1 << /* pointer to element ty*/ 2 17275 << PtrExpr->getType(); 17276 ArgError = true; 17277 } 17278 } 17279 17280 // Apply default Lvalue conversions and convert the expression to size_t. 17281 auto ApplyArgumentConversions = [this](Expr *E) { 17282 ExprResult Conv = DefaultLvalueConversion(E); 17283 if (Conv.isInvalid()) 17284 return Conv; 17285 17286 return tryConvertExprToType(Conv.get(), Context.getSizeType()); 17287 }; 17288 17289 // Apply conversion to row and column expressions. 17290 ExprResult RowsConv = ApplyArgumentConversions(RowsExpr); 17291 if (!RowsConv.isInvalid()) { 17292 RowsExpr = RowsConv.get(); 17293 TheCall->setArg(1, RowsExpr); 17294 } else 17295 RowsExpr = nullptr; 17296 17297 ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr); 17298 if (!ColumnsConv.isInvalid()) { 17299 ColumnsExpr = ColumnsConv.get(); 17300 TheCall->setArg(2, ColumnsExpr); 17301 } else 17302 ColumnsExpr = nullptr; 17303 17304 // If any any part of the result matrix type is still pending, just use 17305 // Context.DependentTy, until all parts are resolved. 17306 if ((RowsExpr && RowsExpr->isTypeDependent()) || 17307 (ColumnsExpr && ColumnsExpr->isTypeDependent())) { 17308 TheCall->setType(Context.DependentTy); 17309 return CallResult; 17310 } 17311 17312 // Check row and column dimensions. 17313 llvm::Optional<unsigned> MaybeRows; 17314 if (RowsExpr) 17315 MaybeRows = getAndVerifyMatrixDimension(RowsExpr, "row", *this); 17316 17317 llvm::Optional<unsigned> MaybeColumns; 17318 if (ColumnsExpr) 17319 MaybeColumns = getAndVerifyMatrixDimension(ColumnsExpr, "column", *this); 17320 17321 // Check stride argument. 17322 ExprResult StrideConv = ApplyArgumentConversions(StrideExpr); 17323 if (StrideConv.isInvalid()) 17324 return ExprError(); 17325 StrideExpr = StrideConv.get(); 17326 TheCall->setArg(3, StrideExpr); 17327 17328 if (MaybeRows) { 17329 if (Optional<llvm::APSInt> Value = 17330 StrideExpr->getIntegerConstantExpr(Context)) { 17331 uint64_t Stride = Value->getZExtValue(); 17332 if (Stride < *MaybeRows) { 17333 Diag(StrideExpr->getBeginLoc(), 17334 diag::err_builtin_matrix_stride_too_small); 17335 ArgError = true; 17336 } 17337 } 17338 } 17339 17340 if (ArgError || !MaybeRows || !MaybeColumns) 17341 return ExprError(); 17342 17343 TheCall->setType( 17344 Context.getConstantMatrixType(ElementTy, *MaybeRows, *MaybeColumns)); 17345 return CallResult; 17346 } 17347 17348 ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, 17349 ExprResult CallResult) { 17350 if (checkArgCount(*this, TheCall, 3)) 17351 return ExprError(); 17352 17353 unsigned PtrArgIdx = 1; 17354 Expr *MatrixExpr = TheCall->getArg(0); 17355 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 17356 Expr *StrideExpr = TheCall->getArg(2); 17357 17358 bool ArgError = false; 17359 17360 { 17361 ExprResult MatrixConv = DefaultLvalueConversion(MatrixExpr); 17362 if (MatrixConv.isInvalid()) 17363 return MatrixConv; 17364 MatrixExpr = MatrixConv.get(); 17365 TheCall->setArg(0, MatrixExpr); 17366 } 17367 if (MatrixExpr->isTypeDependent()) { 17368 TheCall->setType(Context.DependentTy); 17369 return TheCall; 17370 } 17371 17372 auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>(); 17373 if (!MatrixTy) { 17374 Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17375 << 1 << /*matrix ty */ 1 << MatrixExpr->getType(); 17376 ArgError = true; 17377 } 17378 17379 { 17380 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 17381 if (PtrConv.isInvalid()) 17382 return PtrConv; 17383 PtrExpr = PtrConv.get(); 17384 TheCall->setArg(1, PtrExpr); 17385 if (PtrExpr->isTypeDependent()) { 17386 TheCall->setType(Context.DependentTy); 17387 return TheCall; 17388 } 17389 } 17390 17391 // Check pointer argument. 17392 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 17393 if (!PtrTy) { 17394 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17395 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 17396 ArgError = true; 17397 } else { 17398 QualType ElementTy = PtrTy->getPointeeType(); 17399 if (ElementTy.isConstQualified()) { 17400 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const); 17401 ArgError = true; 17402 } 17403 ElementTy = ElementTy.getUnqualifiedType().getCanonicalType(); 17404 if (MatrixTy && 17405 !Context.hasSameType(ElementTy, MatrixTy->getElementType())) { 17406 Diag(PtrExpr->getBeginLoc(), 17407 diag::err_builtin_matrix_pointer_arg_mismatch) 17408 << ElementTy << MatrixTy->getElementType(); 17409 ArgError = true; 17410 } 17411 } 17412 17413 // Apply default Lvalue conversions and convert the stride expression to 17414 // size_t. 17415 { 17416 ExprResult StrideConv = DefaultLvalueConversion(StrideExpr); 17417 if (StrideConv.isInvalid()) 17418 return StrideConv; 17419 17420 StrideConv = tryConvertExprToType(StrideConv.get(), Context.getSizeType()); 17421 if (StrideConv.isInvalid()) 17422 return StrideConv; 17423 StrideExpr = StrideConv.get(); 17424 TheCall->setArg(2, StrideExpr); 17425 } 17426 17427 // Check stride argument. 17428 if (MatrixTy) { 17429 if (Optional<llvm::APSInt> Value = 17430 StrideExpr->getIntegerConstantExpr(Context)) { 17431 uint64_t Stride = Value->getZExtValue(); 17432 if (Stride < MatrixTy->getNumRows()) { 17433 Diag(StrideExpr->getBeginLoc(), 17434 diag::err_builtin_matrix_stride_too_small); 17435 ArgError = true; 17436 } 17437 } 17438 } 17439 17440 if (ArgError) 17441 return ExprError(); 17442 17443 return CallResult; 17444 } 17445 17446 /// \brief Enforce the bounds of a TCB 17447 /// CheckTCBEnforcement - Enforces that every function in a named TCB only 17448 /// directly calls other functions in the same TCB as marked by the enforce_tcb 17449 /// and enforce_tcb_leaf attributes. 17450 void Sema::CheckTCBEnforcement(const SourceLocation CallExprLoc, 17451 const NamedDecl *Callee) { 17452 const NamedDecl *Caller = getCurFunctionOrMethodDecl(); 17453 17454 if (!Caller || !Caller->hasAttr<EnforceTCBAttr>()) 17455 return; 17456 17457 // Search through the enforce_tcb and enforce_tcb_leaf attributes to find 17458 // all TCBs the callee is a part of. 17459 llvm::StringSet<> CalleeTCBs; 17460 for_each(Callee->specific_attrs<EnforceTCBAttr>(), 17461 [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); }); 17462 for_each(Callee->specific_attrs<EnforceTCBLeafAttr>(), 17463 [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); }); 17464 17465 // Go through the TCBs the caller is a part of and emit warnings if Caller 17466 // is in a TCB that the Callee is not. 17467 for_each( 17468 Caller->specific_attrs<EnforceTCBAttr>(), 17469 [&](const auto *A) { 17470 StringRef CallerTCB = A->getTCBName(); 17471 if (CalleeTCBs.count(CallerTCB) == 0) { 17472 this->Diag(CallExprLoc, diag::warn_tcb_enforcement_violation) 17473 << Callee << CallerTCB; 17474 } 17475 }); 17476 } 17477