1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements extra semantic analysis beyond what is enforced 10 // by the C type system. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/APValue.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/AttrIterator.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclBase.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/DeclarationName.h" 24 #include "clang/AST/EvaluatedExprVisitor.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/ExprCXX.h" 27 #include "clang/AST/ExprObjC.h" 28 #include "clang/AST/ExprOpenMP.h" 29 #include "clang/AST/FormatString.h" 30 #include "clang/AST/NSAPI.h" 31 #include "clang/AST/NonTrivialTypeVisitor.h" 32 #include "clang/AST/OperationKinds.h" 33 #include "clang/AST/RecordLayout.h" 34 #include "clang/AST/Stmt.h" 35 #include "clang/AST/TemplateBase.h" 36 #include "clang/AST/Type.h" 37 #include "clang/AST/TypeLoc.h" 38 #include "clang/AST/UnresolvedSet.h" 39 #include "clang/Basic/AddressSpaces.h" 40 #include "clang/Basic/CharInfo.h" 41 #include "clang/Basic/Diagnostic.h" 42 #include "clang/Basic/IdentifierTable.h" 43 #include "clang/Basic/LLVM.h" 44 #include "clang/Basic/LangOptions.h" 45 #include "clang/Basic/OpenCLOptions.h" 46 #include "clang/Basic/OperatorKinds.h" 47 #include "clang/Basic/PartialDiagnostic.h" 48 #include "clang/Basic/SourceLocation.h" 49 #include "clang/Basic/SourceManager.h" 50 #include "clang/Basic/Specifiers.h" 51 #include "clang/Basic/SyncScope.h" 52 #include "clang/Basic/TargetBuiltins.h" 53 #include "clang/Basic/TargetCXXABI.h" 54 #include "clang/Basic/TargetInfo.h" 55 #include "clang/Basic/TypeTraits.h" 56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 57 #include "clang/Sema/Initialization.h" 58 #include "clang/Sema/Lookup.h" 59 #include "clang/Sema/Ownership.h" 60 #include "clang/Sema/Scope.h" 61 #include "clang/Sema/ScopeInfo.h" 62 #include "clang/Sema/Sema.h" 63 #include "clang/Sema/SemaInternal.h" 64 #include "llvm/ADT/APFloat.h" 65 #include "llvm/ADT/APInt.h" 66 #include "llvm/ADT/APSInt.h" 67 #include "llvm/ADT/ArrayRef.h" 68 #include "llvm/ADT/DenseMap.h" 69 #include "llvm/ADT/FoldingSet.h" 70 #include "llvm/ADT/None.h" 71 #include "llvm/ADT/Optional.h" 72 #include "llvm/ADT/STLExtras.h" 73 #include "llvm/ADT/SmallBitVector.h" 74 #include "llvm/ADT/SmallPtrSet.h" 75 #include "llvm/ADT/SmallString.h" 76 #include "llvm/ADT/SmallVector.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/ADT/StringSet.h" 79 #include "llvm/ADT/StringSwitch.h" 80 #include "llvm/ADT/Triple.h" 81 #include "llvm/Support/AtomicOrdering.h" 82 #include "llvm/Support/Casting.h" 83 #include "llvm/Support/Compiler.h" 84 #include "llvm/Support/ConvertUTF.h" 85 #include "llvm/Support/ErrorHandling.h" 86 #include "llvm/Support/Format.h" 87 #include "llvm/Support/Locale.h" 88 #include "llvm/Support/MathExtras.h" 89 #include "llvm/Support/SaveAndRestore.h" 90 #include "llvm/Support/raw_ostream.h" 91 #include <algorithm> 92 #include <bitset> 93 #include <cassert> 94 #include <cctype> 95 #include <cstddef> 96 #include <cstdint> 97 #include <functional> 98 #include <limits> 99 #include <string> 100 #include <tuple> 101 #include <utility> 102 103 using namespace clang; 104 using namespace sema; 105 106 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 107 unsigned ByteNo) const { 108 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 109 Context.getTargetInfo()); 110 } 111 112 /// Checks that a call expression's argument count is the desired number. 113 /// This is useful when doing custom type-checking. Returns true on error. 114 static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) { 115 unsigned argCount = call->getNumArgs(); 116 if (argCount == desiredArgCount) return false; 117 118 if (argCount < desiredArgCount) 119 return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args) 120 << 0 /*function call*/ << desiredArgCount << argCount 121 << call->getSourceRange(); 122 123 // Highlight all the excess arguments. 124 SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(), 125 call->getArg(argCount - 1)->getEndLoc()); 126 127 return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args) 128 << 0 /*function call*/ << desiredArgCount << argCount 129 << call->getArg(1)->getSourceRange(); 130 } 131 132 /// Check that the first argument to __builtin_annotation is an integer 133 /// and the second argument is a non-wide string literal. 134 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 135 if (checkArgCount(S, TheCall, 2)) 136 return true; 137 138 // First argument should be an integer. 139 Expr *ValArg = TheCall->getArg(0); 140 QualType Ty = ValArg->getType(); 141 if (!Ty->isIntegerType()) { 142 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 143 << ValArg->getSourceRange(); 144 return true; 145 } 146 147 // Second argument should be a constant string. 148 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 149 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 150 if (!Literal || !Literal->isAscii()) { 151 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 152 << StrArg->getSourceRange(); 153 return true; 154 } 155 156 TheCall->setType(Ty); 157 return false; 158 } 159 160 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 161 // We need at least one argument. 162 if (TheCall->getNumArgs() < 1) { 163 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 164 << 0 << 1 << TheCall->getNumArgs() 165 << TheCall->getCallee()->getSourceRange(); 166 return true; 167 } 168 169 // All arguments should be wide string literals. 170 for (Expr *Arg : TheCall->arguments()) { 171 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 172 if (!Literal || !Literal->isWide()) { 173 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 174 << Arg->getSourceRange(); 175 return true; 176 } 177 } 178 179 return false; 180 } 181 182 /// Check that the argument to __builtin_addressof is a glvalue, and set the 183 /// result type to the corresponding pointer type. 184 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 185 if (checkArgCount(S, TheCall, 1)) 186 return true; 187 188 ExprResult Arg(TheCall->getArg(0)); 189 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 190 if (ResultType.isNull()) 191 return true; 192 193 TheCall->setArg(0, Arg.get()); 194 TheCall->setType(ResultType); 195 return false; 196 } 197 198 /// Check the number of arguments and set the result type to 199 /// the argument type. 200 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) { 201 if (checkArgCount(S, TheCall, 1)) 202 return true; 203 204 TheCall->setType(TheCall->getArg(0)->getType()); 205 return false; 206 } 207 208 /// Check that the value argument for __builtin_is_aligned(value, alignment) and 209 /// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer 210 /// type (but not a function pointer) and that the alignment is a power-of-two. 211 static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) { 212 if (checkArgCount(S, TheCall, 2)) 213 return true; 214 215 clang::Expr *Source = TheCall->getArg(0); 216 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned; 217 218 auto IsValidIntegerType = [](QualType Ty) { 219 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType(); 220 }; 221 QualType SrcTy = Source->getType(); 222 // We should also be able to use it with arrays (but not functions!). 223 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) { 224 SrcTy = S.Context.getDecayedType(SrcTy); 225 } 226 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) || 227 SrcTy->isFunctionPointerType()) { 228 // FIXME: this is not quite the right error message since we don't allow 229 // floating point types, or member pointers. 230 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand) 231 << SrcTy; 232 return true; 233 } 234 235 clang::Expr *AlignOp = TheCall->getArg(1); 236 if (!IsValidIntegerType(AlignOp->getType())) { 237 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int) 238 << AlignOp->getType(); 239 return true; 240 } 241 Expr::EvalResult AlignResult; 242 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1; 243 // We can't check validity of alignment if it is value dependent. 244 if (!AlignOp->isValueDependent() && 245 AlignOp->EvaluateAsInt(AlignResult, S.Context, 246 Expr::SE_AllowSideEffects)) { 247 llvm::APSInt AlignValue = AlignResult.Val.getInt(); 248 llvm::APSInt MaxValue( 249 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits)); 250 if (AlignValue < 1) { 251 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1; 252 return true; 253 } 254 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) { 255 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big) 256 << toString(MaxValue, 10); 257 return true; 258 } 259 if (!AlignValue.isPowerOf2()) { 260 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two); 261 return true; 262 } 263 if (AlignValue == 1) { 264 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless) 265 << IsBooleanAlignBuiltin; 266 } 267 } 268 269 ExprResult SrcArg = S.PerformCopyInitialization( 270 InitializedEntity::InitializeParameter(S.Context, SrcTy, false), 271 SourceLocation(), Source); 272 if (SrcArg.isInvalid()) 273 return true; 274 TheCall->setArg(0, SrcArg.get()); 275 ExprResult AlignArg = 276 S.PerformCopyInitialization(InitializedEntity::InitializeParameter( 277 S.Context, AlignOp->getType(), false), 278 SourceLocation(), AlignOp); 279 if (AlignArg.isInvalid()) 280 return true; 281 TheCall->setArg(1, AlignArg.get()); 282 // For align_up/align_down, the return type is the same as the (potentially 283 // decayed) argument type including qualifiers. For is_aligned(), the result 284 // is always bool. 285 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy); 286 return false; 287 } 288 289 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall, 290 unsigned BuiltinID) { 291 if (checkArgCount(S, TheCall, 3)) 292 return true; 293 294 // First two arguments should be integers. 295 for (unsigned I = 0; I < 2; ++I) { 296 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I)); 297 if (Arg.isInvalid()) return true; 298 TheCall->setArg(I, Arg.get()); 299 300 QualType Ty = Arg.get()->getType(); 301 if (!Ty->isIntegerType()) { 302 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 303 << Ty << Arg.get()->getSourceRange(); 304 return true; 305 } 306 } 307 308 // Third argument should be a pointer to a non-const integer. 309 // IRGen correctly handles volatile, restrict, and address spaces, and 310 // the other qualifiers aren't possible. 311 { 312 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2)); 313 if (Arg.isInvalid()) return true; 314 TheCall->setArg(2, Arg.get()); 315 316 QualType Ty = Arg.get()->getType(); 317 const auto *PtrTy = Ty->getAs<PointerType>(); 318 if (!PtrTy || 319 !PtrTy->getPointeeType()->isIntegerType() || 320 PtrTy->getPointeeType().isConstQualified()) { 321 S.Diag(Arg.get()->getBeginLoc(), 322 diag::err_overflow_builtin_must_be_ptr_int) 323 << Ty << Arg.get()->getSourceRange(); 324 return true; 325 } 326 } 327 328 // Disallow signed ExtIntType args larger than 128 bits to mul function until 329 // we improve backend support. 330 if (BuiltinID == Builtin::BI__builtin_mul_overflow) { 331 for (unsigned I = 0; I < 3; ++I) { 332 const auto Arg = TheCall->getArg(I); 333 // Third argument will be a pointer. 334 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType(); 335 if (Ty->isExtIntType() && Ty->isSignedIntegerType() && 336 S.getASTContext().getIntWidth(Ty) > 128) 337 return S.Diag(Arg->getBeginLoc(), 338 diag::err_overflow_builtin_ext_int_max_size) 339 << 128; 340 } 341 } 342 343 return false; 344 } 345 346 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 347 if (checkArgCount(S, BuiltinCall, 2)) 348 return true; 349 350 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 351 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 352 Expr *Call = BuiltinCall->getArg(0); 353 Expr *Chain = BuiltinCall->getArg(1); 354 355 if (Call->getStmtClass() != Stmt::CallExprClass) { 356 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 357 << Call->getSourceRange(); 358 return true; 359 } 360 361 auto CE = cast<CallExpr>(Call); 362 if (CE->getCallee()->getType()->isBlockPointerType()) { 363 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 364 << Call->getSourceRange(); 365 return true; 366 } 367 368 const Decl *TargetDecl = CE->getCalleeDecl(); 369 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 370 if (FD->getBuiltinID()) { 371 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 372 << Call->getSourceRange(); 373 return true; 374 } 375 376 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 377 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 378 << Call->getSourceRange(); 379 return true; 380 } 381 382 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 383 if (ChainResult.isInvalid()) 384 return true; 385 if (!ChainResult.get()->getType()->isPointerType()) { 386 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 387 << Chain->getSourceRange(); 388 return true; 389 } 390 391 QualType ReturnTy = CE->getCallReturnType(S.Context); 392 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 393 QualType BuiltinTy = S.Context.getFunctionType( 394 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 395 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 396 397 Builtin = 398 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 399 400 BuiltinCall->setType(CE->getType()); 401 BuiltinCall->setValueKind(CE->getValueKind()); 402 BuiltinCall->setObjectKind(CE->getObjectKind()); 403 BuiltinCall->setCallee(Builtin); 404 BuiltinCall->setArg(1, ChainResult.get()); 405 406 return false; 407 } 408 409 namespace { 410 411 class EstimateSizeFormatHandler 412 : public analyze_format_string::FormatStringHandler { 413 size_t Size; 414 415 public: 416 EstimateSizeFormatHandler(StringRef Format) 417 : Size(std::min(Format.find(0), Format.size()) + 418 1 /* null byte always written by sprintf */) {} 419 420 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 421 const char *, unsigned SpecifierLen) override { 422 423 const size_t FieldWidth = computeFieldWidth(FS); 424 const size_t Precision = computePrecision(FS); 425 426 // The actual format. 427 switch (FS.getConversionSpecifier().getKind()) { 428 // Just a char. 429 case analyze_format_string::ConversionSpecifier::cArg: 430 case analyze_format_string::ConversionSpecifier::CArg: 431 Size += std::max(FieldWidth, (size_t)1); 432 break; 433 // Just an integer. 434 case analyze_format_string::ConversionSpecifier::dArg: 435 case analyze_format_string::ConversionSpecifier::DArg: 436 case analyze_format_string::ConversionSpecifier::iArg: 437 case analyze_format_string::ConversionSpecifier::oArg: 438 case analyze_format_string::ConversionSpecifier::OArg: 439 case analyze_format_string::ConversionSpecifier::uArg: 440 case analyze_format_string::ConversionSpecifier::UArg: 441 case analyze_format_string::ConversionSpecifier::xArg: 442 case analyze_format_string::ConversionSpecifier::XArg: 443 Size += std::max(FieldWidth, Precision); 444 break; 445 446 // %g style conversion switches between %f or %e style dynamically. 447 // %f always takes less space, so default to it. 448 case analyze_format_string::ConversionSpecifier::gArg: 449 case analyze_format_string::ConversionSpecifier::GArg: 450 451 // Floating point number in the form '[+]ddd.ddd'. 452 case analyze_format_string::ConversionSpecifier::fArg: 453 case analyze_format_string::ConversionSpecifier::FArg: 454 Size += std::max(FieldWidth, 1 /* integer part */ + 455 (Precision ? 1 + Precision 456 : 0) /* period + decimal */); 457 break; 458 459 // Floating point number in the form '[-]d.ddde[+-]dd'. 460 case analyze_format_string::ConversionSpecifier::eArg: 461 case analyze_format_string::ConversionSpecifier::EArg: 462 Size += 463 std::max(FieldWidth, 464 1 /* integer part */ + 465 (Precision ? 1 + Precision : 0) /* period + decimal */ + 466 1 /* e or E letter */ + 2 /* exponent */); 467 break; 468 469 // Floating point number in the form '[-]0xh.hhhhp±dd'. 470 case analyze_format_string::ConversionSpecifier::aArg: 471 case analyze_format_string::ConversionSpecifier::AArg: 472 Size += 473 std::max(FieldWidth, 474 2 /* 0x */ + 1 /* integer part */ + 475 (Precision ? 1 + Precision : 0) /* period + decimal */ + 476 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */); 477 break; 478 479 // Just a string. 480 case analyze_format_string::ConversionSpecifier::sArg: 481 case analyze_format_string::ConversionSpecifier::SArg: 482 Size += FieldWidth; 483 break; 484 485 // Just a pointer in the form '0xddd'. 486 case analyze_format_string::ConversionSpecifier::pArg: 487 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision); 488 break; 489 490 // A plain percent. 491 case analyze_format_string::ConversionSpecifier::PercentArg: 492 Size += 1; 493 break; 494 495 default: 496 break; 497 } 498 499 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix(); 500 501 if (FS.hasAlternativeForm()) { 502 switch (FS.getConversionSpecifier().getKind()) { 503 default: 504 break; 505 // Force a leading '0'. 506 case analyze_format_string::ConversionSpecifier::oArg: 507 Size += 1; 508 break; 509 // Force a leading '0x'. 510 case analyze_format_string::ConversionSpecifier::xArg: 511 case analyze_format_string::ConversionSpecifier::XArg: 512 Size += 2; 513 break; 514 // Force a period '.' before decimal, even if precision is 0. 515 case analyze_format_string::ConversionSpecifier::aArg: 516 case analyze_format_string::ConversionSpecifier::AArg: 517 case analyze_format_string::ConversionSpecifier::eArg: 518 case analyze_format_string::ConversionSpecifier::EArg: 519 case analyze_format_string::ConversionSpecifier::fArg: 520 case analyze_format_string::ConversionSpecifier::FArg: 521 case analyze_format_string::ConversionSpecifier::gArg: 522 case analyze_format_string::ConversionSpecifier::GArg: 523 Size += (Precision ? 0 : 1); 524 break; 525 } 526 } 527 assert(SpecifierLen <= Size && "no underflow"); 528 Size -= SpecifierLen; 529 return true; 530 } 531 532 size_t getSizeLowerBound() const { return Size; } 533 534 private: 535 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) { 536 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth(); 537 size_t FieldWidth = 0; 538 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant) 539 FieldWidth = FW.getConstantAmount(); 540 return FieldWidth; 541 } 542 543 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) { 544 const analyze_format_string::OptionalAmount &FW = FS.getPrecision(); 545 size_t Precision = 0; 546 547 // See man 3 printf for default precision value based on the specifier. 548 switch (FW.getHowSpecified()) { 549 case analyze_format_string::OptionalAmount::NotSpecified: 550 switch (FS.getConversionSpecifier().getKind()) { 551 default: 552 break; 553 case analyze_format_string::ConversionSpecifier::dArg: // %d 554 case analyze_format_string::ConversionSpecifier::DArg: // %D 555 case analyze_format_string::ConversionSpecifier::iArg: // %i 556 Precision = 1; 557 break; 558 case analyze_format_string::ConversionSpecifier::oArg: // %d 559 case analyze_format_string::ConversionSpecifier::OArg: // %D 560 case analyze_format_string::ConversionSpecifier::uArg: // %d 561 case analyze_format_string::ConversionSpecifier::UArg: // %D 562 case analyze_format_string::ConversionSpecifier::xArg: // %d 563 case analyze_format_string::ConversionSpecifier::XArg: // %D 564 Precision = 1; 565 break; 566 case analyze_format_string::ConversionSpecifier::fArg: // %f 567 case analyze_format_string::ConversionSpecifier::FArg: // %F 568 case analyze_format_string::ConversionSpecifier::eArg: // %e 569 case analyze_format_string::ConversionSpecifier::EArg: // %E 570 case analyze_format_string::ConversionSpecifier::gArg: // %g 571 case analyze_format_string::ConversionSpecifier::GArg: // %G 572 Precision = 6; 573 break; 574 case analyze_format_string::ConversionSpecifier::pArg: // %d 575 Precision = 1; 576 break; 577 } 578 break; 579 case analyze_format_string::OptionalAmount::Constant: 580 Precision = FW.getConstantAmount(); 581 break; 582 default: 583 break; 584 } 585 return Precision; 586 } 587 }; 588 589 } // namespace 590 591 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, 592 CallExpr *TheCall) { 593 if (TheCall->isValueDependent() || TheCall->isTypeDependent() || 594 isConstantEvaluated()) 595 return; 596 597 unsigned BuiltinID = FD->getBuiltinID(/*ConsiderWrappers=*/true); 598 if (!BuiltinID) 599 return; 600 601 const TargetInfo &TI = getASTContext().getTargetInfo(); 602 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType()); 603 604 auto ComputeExplicitObjectSizeArgument = 605 [&](unsigned Index) -> Optional<llvm::APSInt> { 606 Expr::EvalResult Result; 607 Expr *SizeArg = TheCall->getArg(Index); 608 if (!SizeArg->EvaluateAsInt(Result, getASTContext())) 609 return llvm::None; 610 return Result.Val.getInt(); 611 }; 612 613 auto ComputeSizeArgument = [&](unsigned Index) -> Optional<llvm::APSInt> { 614 // If the parameter has a pass_object_size attribute, then we should use its 615 // (potentially) more strict checking mode. Otherwise, conservatively assume 616 // type 0. 617 int BOSType = 0; 618 if (const auto *POS = 619 FD->getParamDecl(Index)->getAttr<PassObjectSizeAttr>()) 620 BOSType = POS->getType(); 621 622 const Expr *ObjArg = TheCall->getArg(Index); 623 uint64_t Result; 624 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType)) 625 return llvm::None; 626 627 // Get the object size in the target's size_t width. 628 return llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth); 629 }; 630 631 auto ComputeStrLenArgument = [&](unsigned Index) -> Optional<llvm::APSInt> { 632 Expr *ObjArg = TheCall->getArg(Index); 633 uint64_t Result; 634 if (!ObjArg->tryEvaluateStrLen(Result, getASTContext())) 635 return llvm::None; 636 // Add 1 for null byte. 637 return llvm::APSInt::getUnsigned(Result + 1).extOrTrunc(SizeTypeWidth); 638 }; 639 640 Optional<llvm::APSInt> SourceSize; 641 Optional<llvm::APSInt> DestinationSize; 642 unsigned DiagID = 0; 643 bool IsChkVariant = false; 644 645 switch (BuiltinID) { 646 default: 647 return; 648 case Builtin::BI__builtin_strcpy: 649 case Builtin::BIstrcpy: { 650 DiagID = diag::warn_fortify_strlen_overflow; 651 SourceSize = ComputeStrLenArgument(1); 652 DestinationSize = ComputeSizeArgument(0); 653 break; 654 } 655 656 case Builtin::BI__builtin___strcpy_chk: { 657 DiagID = diag::warn_fortify_strlen_overflow; 658 SourceSize = ComputeStrLenArgument(1); 659 DestinationSize = ComputeExplicitObjectSizeArgument(2); 660 IsChkVariant = true; 661 break; 662 } 663 664 case Builtin::BIsprintf: 665 case Builtin::BI__builtin___sprintf_chk: { 666 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3; 667 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 668 669 if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) { 670 671 if (!Format->isAscii() && !Format->isUTF8()) 672 return; 673 674 StringRef FormatStrRef = Format->getString(); 675 EstimateSizeFormatHandler H(FormatStrRef); 676 const char *FormatBytes = FormatStrRef.data(); 677 const ConstantArrayType *T = 678 Context.getAsConstantArrayType(Format->getType()); 679 assert(T && "String literal not of constant array type!"); 680 size_t TypeSize = T->getSize().getZExtValue(); 681 682 // In case there's a null byte somewhere. 683 size_t StrLen = 684 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 685 if (!analyze_format_string::ParsePrintfString( 686 H, FormatBytes, FormatBytes + StrLen, getLangOpts(), 687 Context.getTargetInfo(), false)) { 688 DiagID = diag::warn_fortify_source_format_overflow; 689 SourceSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound()) 690 .extOrTrunc(SizeTypeWidth); 691 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) { 692 DestinationSize = ComputeExplicitObjectSizeArgument(2); 693 IsChkVariant = true; 694 } else { 695 DestinationSize = ComputeSizeArgument(0); 696 } 697 break; 698 } 699 } 700 return; 701 } 702 case Builtin::BI__builtin___memcpy_chk: 703 case Builtin::BI__builtin___memmove_chk: 704 case Builtin::BI__builtin___memset_chk: 705 case Builtin::BI__builtin___strlcat_chk: 706 case Builtin::BI__builtin___strlcpy_chk: 707 case Builtin::BI__builtin___strncat_chk: 708 case Builtin::BI__builtin___strncpy_chk: 709 case Builtin::BI__builtin___stpncpy_chk: 710 case Builtin::BI__builtin___memccpy_chk: 711 case Builtin::BI__builtin___mempcpy_chk: { 712 DiagID = diag::warn_builtin_chk_overflow; 713 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 2); 714 DestinationSize = 715 ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 716 IsChkVariant = true; 717 break; 718 } 719 720 case Builtin::BI__builtin___snprintf_chk: 721 case Builtin::BI__builtin___vsnprintf_chk: { 722 DiagID = diag::warn_builtin_chk_overflow; 723 SourceSize = ComputeExplicitObjectSizeArgument(1); 724 DestinationSize = ComputeExplicitObjectSizeArgument(3); 725 IsChkVariant = true; 726 break; 727 } 728 729 case Builtin::BIstrncat: 730 case Builtin::BI__builtin_strncat: 731 case Builtin::BIstrncpy: 732 case Builtin::BI__builtin_strncpy: 733 case Builtin::BIstpncpy: 734 case Builtin::BI__builtin_stpncpy: { 735 // Whether these functions overflow depends on the runtime strlen of the 736 // string, not just the buffer size, so emitting the "always overflow" 737 // diagnostic isn't quite right. We should still diagnose passing a buffer 738 // size larger than the destination buffer though; this is a runtime abort 739 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise. 740 DiagID = diag::warn_fortify_source_size_mismatch; 741 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 742 DestinationSize = ComputeSizeArgument(0); 743 break; 744 } 745 746 case Builtin::BImemcpy: 747 case Builtin::BI__builtin_memcpy: 748 case Builtin::BImemmove: 749 case Builtin::BI__builtin_memmove: 750 case Builtin::BImemset: 751 case Builtin::BI__builtin_memset: 752 case Builtin::BImempcpy: 753 case Builtin::BI__builtin_mempcpy: { 754 DiagID = diag::warn_fortify_source_overflow; 755 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 756 DestinationSize = ComputeSizeArgument(0); 757 break; 758 } 759 case Builtin::BIsnprintf: 760 case Builtin::BI__builtin_snprintf: 761 case Builtin::BIvsnprintf: 762 case Builtin::BI__builtin_vsnprintf: { 763 DiagID = diag::warn_fortify_source_size_mismatch; 764 SourceSize = ComputeExplicitObjectSizeArgument(1); 765 DestinationSize = ComputeSizeArgument(0); 766 break; 767 } 768 } 769 770 if (!SourceSize || !DestinationSize || 771 SourceSize.getValue().ule(DestinationSize.getValue())) 772 return; 773 774 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID); 775 // Skim off the details of whichever builtin was called to produce a better 776 // diagnostic, as it's unlikely that the user wrote the __builtin explicitly. 777 if (IsChkVariant) { 778 FunctionName = FunctionName.drop_front(std::strlen("__builtin___")); 779 FunctionName = FunctionName.drop_back(std::strlen("_chk")); 780 } else if (FunctionName.startswith("__builtin_")) { 781 FunctionName = FunctionName.drop_front(std::strlen("__builtin_")); 782 } 783 784 SmallString<16> DestinationStr; 785 SmallString<16> SourceStr; 786 DestinationSize->toString(DestinationStr, /*Radix=*/10); 787 SourceSize->toString(SourceStr, /*Radix=*/10); 788 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 789 PDiag(DiagID) 790 << FunctionName << DestinationStr << SourceStr); 791 } 792 793 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 794 Scope::ScopeFlags NeededScopeFlags, 795 unsigned DiagID) { 796 // Scopes aren't available during instantiation. Fortunately, builtin 797 // functions cannot be template args so they cannot be formed through template 798 // instantiation. Therefore checking once during the parse is sufficient. 799 if (SemaRef.inTemplateInstantiation()) 800 return false; 801 802 Scope *S = SemaRef.getCurScope(); 803 while (S && !S->isSEHExceptScope()) 804 S = S->getParent(); 805 if (!S || !(S->getFlags() & NeededScopeFlags)) { 806 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 807 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 808 << DRE->getDecl()->getIdentifier(); 809 return true; 810 } 811 812 return false; 813 } 814 815 static inline bool isBlockPointer(Expr *Arg) { 816 return Arg->getType()->isBlockPointerType(); 817 } 818 819 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 820 /// void*, which is a requirement of device side enqueue. 821 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 822 const BlockPointerType *BPT = 823 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 824 ArrayRef<QualType> Params = 825 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes(); 826 unsigned ArgCounter = 0; 827 bool IllegalParams = false; 828 // Iterate through the block parameters until either one is found that is not 829 // a local void*, or the block is valid. 830 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 831 I != E; ++I, ++ArgCounter) { 832 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 833 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 834 LangAS::opencl_local) { 835 // Get the location of the error. If a block literal has been passed 836 // (BlockExpr) then we can point straight to the offending argument, 837 // else we just point to the variable reference. 838 SourceLocation ErrorLoc; 839 if (isa<BlockExpr>(BlockArg)) { 840 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 841 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 842 } else if (isa<DeclRefExpr>(BlockArg)) { 843 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 844 } 845 S.Diag(ErrorLoc, 846 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 847 IllegalParams = true; 848 } 849 } 850 851 return IllegalParams; 852 } 853 854 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 855 if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts())) { 856 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 857 << 1 << Call->getDirectCallee() << "cl_khr_subgroups"; 858 return true; 859 } 860 return false; 861 } 862 863 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 864 if (checkArgCount(S, TheCall, 2)) 865 return true; 866 867 if (checkOpenCLSubgroupExt(S, TheCall)) 868 return true; 869 870 // First argument is an ndrange_t type. 871 Expr *NDRangeArg = TheCall->getArg(0); 872 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 873 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 874 << TheCall->getDirectCallee() << "'ndrange_t'"; 875 return true; 876 } 877 878 Expr *BlockArg = TheCall->getArg(1); 879 if (!isBlockPointer(BlockArg)) { 880 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 881 << TheCall->getDirectCallee() << "block"; 882 return true; 883 } 884 return checkOpenCLBlockArgs(S, BlockArg); 885 } 886 887 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 888 /// get_kernel_work_group_size 889 /// and get_kernel_preferred_work_group_size_multiple builtin functions. 890 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 891 if (checkArgCount(S, TheCall, 1)) 892 return true; 893 894 Expr *BlockArg = TheCall->getArg(0); 895 if (!isBlockPointer(BlockArg)) { 896 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 897 << TheCall->getDirectCallee() << "block"; 898 return true; 899 } 900 return checkOpenCLBlockArgs(S, BlockArg); 901 } 902 903 /// Diagnose integer type and any valid implicit conversion to it. 904 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 905 const QualType &IntType); 906 907 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 908 unsigned Start, unsigned End) { 909 bool IllegalParams = false; 910 for (unsigned I = Start; I <= End; ++I) 911 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 912 S.Context.getSizeType()); 913 return IllegalParams; 914 } 915 916 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 917 /// 'local void*' parameter of passed block. 918 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 919 Expr *BlockArg, 920 unsigned NumNonVarArgs) { 921 const BlockPointerType *BPT = 922 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 923 unsigned NumBlockParams = 924 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams(); 925 unsigned TotalNumArgs = TheCall->getNumArgs(); 926 927 // For each argument passed to the block, a corresponding uint needs to 928 // be passed to describe the size of the local memory. 929 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 930 S.Diag(TheCall->getBeginLoc(), 931 diag::err_opencl_enqueue_kernel_local_size_args); 932 return true; 933 } 934 935 // Check that the sizes of the local memory are specified by integers. 936 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 937 TotalNumArgs - 1); 938 } 939 940 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 941 /// overload formats specified in Table 6.13.17.1. 942 /// int enqueue_kernel(queue_t queue, 943 /// kernel_enqueue_flags_t flags, 944 /// const ndrange_t ndrange, 945 /// void (^block)(void)) 946 /// int enqueue_kernel(queue_t queue, 947 /// kernel_enqueue_flags_t flags, 948 /// const ndrange_t ndrange, 949 /// uint num_events_in_wait_list, 950 /// clk_event_t *event_wait_list, 951 /// clk_event_t *event_ret, 952 /// void (^block)(void)) 953 /// int enqueue_kernel(queue_t queue, 954 /// kernel_enqueue_flags_t flags, 955 /// const ndrange_t ndrange, 956 /// void (^block)(local void*, ...), 957 /// uint size0, ...) 958 /// int enqueue_kernel(queue_t queue, 959 /// kernel_enqueue_flags_t flags, 960 /// const ndrange_t ndrange, 961 /// uint num_events_in_wait_list, 962 /// clk_event_t *event_wait_list, 963 /// clk_event_t *event_ret, 964 /// void (^block)(local void*, ...), 965 /// uint size0, ...) 966 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 967 unsigned NumArgs = TheCall->getNumArgs(); 968 969 if (NumArgs < 4) { 970 S.Diag(TheCall->getBeginLoc(), 971 diag::err_typecheck_call_too_few_args_at_least) 972 << 0 << 4 << NumArgs; 973 return true; 974 } 975 976 Expr *Arg0 = TheCall->getArg(0); 977 Expr *Arg1 = TheCall->getArg(1); 978 Expr *Arg2 = TheCall->getArg(2); 979 Expr *Arg3 = TheCall->getArg(3); 980 981 // First argument always needs to be a queue_t type. 982 if (!Arg0->getType()->isQueueT()) { 983 S.Diag(TheCall->getArg(0)->getBeginLoc(), 984 diag::err_opencl_builtin_expected_type) 985 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 986 return true; 987 } 988 989 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 990 if (!Arg1->getType()->isIntegerType()) { 991 S.Diag(TheCall->getArg(1)->getBeginLoc(), 992 diag::err_opencl_builtin_expected_type) 993 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 994 return true; 995 } 996 997 // Third argument is always an ndrange_t type. 998 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 999 S.Diag(TheCall->getArg(2)->getBeginLoc(), 1000 diag::err_opencl_builtin_expected_type) 1001 << TheCall->getDirectCallee() << "'ndrange_t'"; 1002 return true; 1003 } 1004 1005 // With four arguments, there is only one form that the function could be 1006 // called in: no events and no variable arguments. 1007 if (NumArgs == 4) { 1008 // check that the last argument is the right block type. 1009 if (!isBlockPointer(Arg3)) { 1010 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1011 << TheCall->getDirectCallee() << "block"; 1012 return true; 1013 } 1014 // we have a block type, check the prototype 1015 const BlockPointerType *BPT = 1016 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 1017 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) { 1018 S.Diag(Arg3->getBeginLoc(), 1019 diag::err_opencl_enqueue_kernel_blocks_no_args); 1020 return true; 1021 } 1022 return false; 1023 } 1024 // we can have block + varargs. 1025 if (isBlockPointer(Arg3)) 1026 return (checkOpenCLBlockArgs(S, Arg3) || 1027 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 1028 // last two cases with either exactly 7 args or 7 args and varargs. 1029 if (NumArgs >= 7) { 1030 // check common block argument. 1031 Expr *Arg6 = TheCall->getArg(6); 1032 if (!isBlockPointer(Arg6)) { 1033 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1034 << TheCall->getDirectCallee() << "block"; 1035 return true; 1036 } 1037 if (checkOpenCLBlockArgs(S, Arg6)) 1038 return true; 1039 1040 // Forth argument has to be any integer type. 1041 if (!Arg3->getType()->isIntegerType()) { 1042 S.Diag(TheCall->getArg(3)->getBeginLoc(), 1043 diag::err_opencl_builtin_expected_type) 1044 << TheCall->getDirectCallee() << "integer"; 1045 return true; 1046 } 1047 // check remaining common arguments. 1048 Expr *Arg4 = TheCall->getArg(4); 1049 Expr *Arg5 = TheCall->getArg(5); 1050 1051 // Fifth argument is always passed as a pointer to clk_event_t. 1052 if (!Arg4->isNullPointerConstant(S.Context, 1053 Expr::NPC_ValueDependentIsNotNull) && 1054 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 1055 S.Diag(TheCall->getArg(4)->getBeginLoc(), 1056 diag::err_opencl_builtin_expected_type) 1057 << TheCall->getDirectCallee() 1058 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1059 return true; 1060 } 1061 1062 // Sixth argument is always passed as a pointer to clk_event_t. 1063 if (!Arg5->isNullPointerConstant(S.Context, 1064 Expr::NPC_ValueDependentIsNotNull) && 1065 !(Arg5->getType()->isPointerType() && 1066 Arg5->getType()->getPointeeType()->isClkEventT())) { 1067 S.Diag(TheCall->getArg(5)->getBeginLoc(), 1068 diag::err_opencl_builtin_expected_type) 1069 << TheCall->getDirectCallee() 1070 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1071 return true; 1072 } 1073 1074 if (NumArgs == 7) 1075 return false; 1076 1077 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 1078 } 1079 1080 // None of the specific case has been detected, give generic error 1081 S.Diag(TheCall->getBeginLoc(), 1082 diag::err_opencl_enqueue_kernel_incorrect_args); 1083 return true; 1084 } 1085 1086 /// Returns OpenCL access qual. 1087 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 1088 return D->getAttr<OpenCLAccessAttr>(); 1089 } 1090 1091 /// Returns true if pipe element type is different from the pointer. 1092 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 1093 const Expr *Arg0 = Call->getArg(0); 1094 // First argument type should always be pipe. 1095 if (!Arg0->getType()->isPipeType()) { 1096 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1097 << Call->getDirectCallee() << Arg0->getSourceRange(); 1098 return true; 1099 } 1100 OpenCLAccessAttr *AccessQual = 1101 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 1102 // Validates the access qualifier is compatible with the call. 1103 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 1104 // read_only and write_only, and assumed to be read_only if no qualifier is 1105 // specified. 1106 switch (Call->getDirectCallee()->getBuiltinID()) { 1107 case Builtin::BIread_pipe: 1108 case Builtin::BIreserve_read_pipe: 1109 case Builtin::BIcommit_read_pipe: 1110 case Builtin::BIwork_group_reserve_read_pipe: 1111 case Builtin::BIsub_group_reserve_read_pipe: 1112 case Builtin::BIwork_group_commit_read_pipe: 1113 case Builtin::BIsub_group_commit_read_pipe: 1114 if (!(!AccessQual || AccessQual->isReadOnly())) { 1115 S.Diag(Arg0->getBeginLoc(), 1116 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1117 << "read_only" << Arg0->getSourceRange(); 1118 return true; 1119 } 1120 break; 1121 case Builtin::BIwrite_pipe: 1122 case Builtin::BIreserve_write_pipe: 1123 case Builtin::BIcommit_write_pipe: 1124 case Builtin::BIwork_group_reserve_write_pipe: 1125 case Builtin::BIsub_group_reserve_write_pipe: 1126 case Builtin::BIwork_group_commit_write_pipe: 1127 case Builtin::BIsub_group_commit_write_pipe: 1128 if (!(AccessQual && AccessQual->isWriteOnly())) { 1129 S.Diag(Arg0->getBeginLoc(), 1130 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1131 << "write_only" << Arg0->getSourceRange(); 1132 return true; 1133 } 1134 break; 1135 default: 1136 break; 1137 } 1138 return false; 1139 } 1140 1141 /// Returns true if pipe element type is different from the pointer. 1142 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 1143 const Expr *Arg0 = Call->getArg(0); 1144 const Expr *ArgIdx = Call->getArg(Idx); 1145 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 1146 const QualType EltTy = PipeTy->getElementType(); 1147 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 1148 // The Idx argument should be a pointer and the type of the pointer and 1149 // the type of pipe element should also be the same. 1150 if (!ArgTy || 1151 !S.Context.hasSameType( 1152 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 1153 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1154 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 1155 << ArgIdx->getType() << ArgIdx->getSourceRange(); 1156 return true; 1157 } 1158 return false; 1159 } 1160 1161 // Performs semantic analysis for the read/write_pipe call. 1162 // \param S Reference to the semantic analyzer. 1163 // \param Call A pointer to the builtin call. 1164 // \return True if a semantic error has been found, false otherwise. 1165 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 1166 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 1167 // functions have two forms. 1168 switch (Call->getNumArgs()) { 1169 case 2: 1170 if (checkOpenCLPipeArg(S, Call)) 1171 return true; 1172 // The call with 2 arguments should be 1173 // read/write_pipe(pipe T, T*). 1174 // Check packet type T. 1175 if (checkOpenCLPipePacketType(S, Call, 1)) 1176 return true; 1177 break; 1178 1179 case 4: { 1180 if (checkOpenCLPipeArg(S, Call)) 1181 return true; 1182 // The call with 4 arguments should be 1183 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 1184 // Check reserve_id_t. 1185 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1186 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1187 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1188 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1189 return true; 1190 } 1191 1192 // Check the index. 1193 const Expr *Arg2 = Call->getArg(2); 1194 if (!Arg2->getType()->isIntegerType() && 1195 !Arg2->getType()->isUnsignedIntegerType()) { 1196 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1197 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1198 << Arg2->getType() << Arg2->getSourceRange(); 1199 return true; 1200 } 1201 1202 // Check packet type T. 1203 if (checkOpenCLPipePacketType(S, Call, 3)) 1204 return true; 1205 } break; 1206 default: 1207 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 1208 << Call->getDirectCallee() << Call->getSourceRange(); 1209 return true; 1210 } 1211 1212 return false; 1213 } 1214 1215 // Performs a semantic analysis on the {work_group_/sub_group_ 1216 // /_}reserve_{read/write}_pipe 1217 // \param S Reference to the semantic analyzer. 1218 // \param Call The call to the builtin function to be analyzed. 1219 // \return True if a semantic error was found, false otherwise. 1220 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 1221 if (checkArgCount(S, Call, 2)) 1222 return true; 1223 1224 if (checkOpenCLPipeArg(S, Call)) 1225 return true; 1226 1227 // Check the reserve size. 1228 if (!Call->getArg(1)->getType()->isIntegerType() && 1229 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 1230 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1231 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1232 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1233 return true; 1234 } 1235 1236 // Since return type of reserve_read/write_pipe built-in function is 1237 // reserve_id_t, which is not defined in the builtin def file , we used int 1238 // as return type and need to override the return type of these functions. 1239 Call->setType(S.Context.OCLReserveIDTy); 1240 1241 return false; 1242 } 1243 1244 // Performs a semantic analysis on {work_group_/sub_group_ 1245 // /_}commit_{read/write}_pipe 1246 // \param S Reference to the semantic analyzer. 1247 // \param Call The call to the builtin function to be analyzed. 1248 // \return True if a semantic error was found, false otherwise. 1249 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 1250 if (checkArgCount(S, Call, 2)) 1251 return true; 1252 1253 if (checkOpenCLPipeArg(S, Call)) 1254 return true; 1255 1256 // Check reserve_id_t. 1257 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1258 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1259 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1260 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1261 return true; 1262 } 1263 1264 return false; 1265 } 1266 1267 // Performs a semantic analysis on the call to built-in Pipe 1268 // Query Functions. 1269 // \param S Reference to the semantic analyzer. 1270 // \param Call The call to the builtin function to be analyzed. 1271 // \return True if a semantic error was found, false otherwise. 1272 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 1273 if (checkArgCount(S, Call, 1)) 1274 return true; 1275 1276 if (!Call->getArg(0)->getType()->isPipeType()) { 1277 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1278 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 1279 return true; 1280 } 1281 1282 return false; 1283 } 1284 1285 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. 1286 // Performs semantic analysis for the to_global/local/private call. 1287 // \param S Reference to the semantic analyzer. 1288 // \param BuiltinID ID of the builtin function. 1289 // \param Call A pointer to the builtin call. 1290 // \return True if a semantic error has been found, false otherwise. 1291 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 1292 CallExpr *Call) { 1293 if (checkArgCount(S, Call, 1)) 1294 return true; 1295 1296 auto RT = Call->getArg(0)->getType(); 1297 if (!RT->isPointerType() || RT->getPointeeType() 1298 .getAddressSpace() == LangAS::opencl_constant) { 1299 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 1300 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 1301 return true; 1302 } 1303 1304 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 1305 S.Diag(Call->getArg(0)->getBeginLoc(), 1306 diag::warn_opencl_generic_address_space_arg) 1307 << Call->getDirectCallee()->getNameInfo().getAsString() 1308 << Call->getArg(0)->getSourceRange(); 1309 } 1310 1311 RT = RT->getPointeeType(); 1312 auto Qual = RT.getQualifiers(); 1313 switch (BuiltinID) { 1314 case Builtin::BIto_global: 1315 Qual.setAddressSpace(LangAS::opencl_global); 1316 break; 1317 case Builtin::BIto_local: 1318 Qual.setAddressSpace(LangAS::opencl_local); 1319 break; 1320 case Builtin::BIto_private: 1321 Qual.setAddressSpace(LangAS::opencl_private); 1322 break; 1323 default: 1324 llvm_unreachable("Invalid builtin function"); 1325 } 1326 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 1327 RT.getUnqualifiedType(), Qual))); 1328 1329 return false; 1330 } 1331 1332 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { 1333 if (checkArgCount(S, TheCall, 1)) 1334 return ExprError(); 1335 1336 // Compute __builtin_launder's parameter type from the argument. 1337 // The parameter type is: 1338 // * The type of the argument if it's not an array or function type, 1339 // Otherwise, 1340 // * The decayed argument type. 1341 QualType ParamTy = [&]() { 1342 QualType ArgTy = TheCall->getArg(0)->getType(); 1343 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe()) 1344 return S.Context.getPointerType(Ty->getElementType()); 1345 if (ArgTy->isFunctionType()) { 1346 return S.Context.getPointerType(ArgTy); 1347 } 1348 return ArgTy; 1349 }(); 1350 1351 TheCall->setType(ParamTy); 1352 1353 auto DiagSelect = [&]() -> llvm::Optional<unsigned> { 1354 if (!ParamTy->isPointerType()) 1355 return 0; 1356 if (ParamTy->isFunctionPointerType()) 1357 return 1; 1358 if (ParamTy->isVoidPointerType()) 1359 return 2; 1360 return llvm::Optional<unsigned>{}; 1361 }(); 1362 if (DiagSelect.hasValue()) { 1363 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) 1364 << DiagSelect.getValue() << TheCall->getSourceRange(); 1365 return ExprError(); 1366 } 1367 1368 // We either have an incomplete class type, or we have a class template 1369 // whose instantiation has not been forced. Example: 1370 // 1371 // template <class T> struct Foo { T value; }; 1372 // Foo<int> *p = nullptr; 1373 // auto *d = __builtin_launder(p); 1374 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(), 1375 diag::err_incomplete_type)) 1376 return ExprError(); 1377 1378 assert(ParamTy->getPointeeType()->isObjectType() && 1379 "Unhandled non-object pointer case"); 1380 1381 InitializedEntity Entity = 1382 InitializedEntity::InitializeParameter(S.Context, ParamTy, false); 1383 ExprResult Arg = 1384 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0)); 1385 if (Arg.isInvalid()) 1386 return ExprError(); 1387 TheCall->setArg(0, Arg.get()); 1388 1389 return TheCall; 1390 } 1391 1392 // Emit an error and return true if the current architecture is not in the list 1393 // of supported architectures. 1394 static bool 1395 CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1396 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 1397 llvm::Triple::ArchType CurArch = 1398 S.getASTContext().getTargetInfo().getTriple().getArch(); 1399 if (llvm::is_contained(SupportedArchs, CurArch)) 1400 return false; 1401 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1402 << TheCall->getSourceRange(); 1403 return true; 1404 } 1405 1406 static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr, 1407 SourceLocation CallSiteLoc); 1408 1409 bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 1410 CallExpr *TheCall) { 1411 switch (TI.getTriple().getArch()) { 1412 default: 1413 // Some builtins don't require additional checking, so just consider these 1414 // acceptable. 1415 return false; 1416 case llvm::Triple::arm: 1417 case llvm::Triple::armeb: 1418 case llvm::Triple::thumb: 1419 case llvm::Triple::thumbeb: 1420 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall); 1421 case llvm::Triple::aarch64: 1422 case llvm::Triple::aarch64_32: 1423 case llvm::Triple::aarch64_be: 1424 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall); 1425 case llvm::Triple::bpfeb: 1426 case llvm::Triple::bpfel: 1427 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall); 1428 case llvm::Triple::hexagon: 1429 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall); 1430 case llvm::Triple::mips: 1431 case llvm::Triple::mipsel: 1432 case llvm::Triple::mips64: 1433 case llvm::Triple::mips64el: 1434 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall); 1435 case llvm::Triple::systemz: 1436 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall); 1437 case llvm::Triple::x86: 1438 case llvm::Triple::x86_64: 1439 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall); 1440 case llvm::Triple::ppc: 1441 case llvm::Triple::ppcle: 1442 case llvm::Triple::ppc64: 1443 case llvm::Triple::ppc64le: 1444 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall); 1445 case llvm::Triple::amdgcn: 1446 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall); 1447 case llvm::Triple::riscv32: 1448 case llvm::Triple::riscv64: 1449 return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall); 1450 } 1451 } 1452 1453 ExprResult 1454 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 1455 CallExpr *TheCall) { 1456 ExprResult TheCallResult(TheCall); 1457 1458 // Find out if any arguments are required to be integer constant expressions. 1459 unsigned ICEArguments = 0; 1460 ASTContext::GetBuiltinTypeError Error; 1461 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 1462 if (Error != ASTContext::GE_None) 1463 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 1464 1465 // If any arguments are required to be ICE's, check and diagnose. 1466 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 1467 // Skip arguments not required to be ICE's. 1468 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 1469 1470 llvm::APSInt Result; 1471 if (SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 1472 return true; 1473 ICEArguments &= ~(1 << ArgNo); 1474 } 1475 1476 switch (BuiltinID) { 1477 case Builtin::BI__builtin___CFStringMakeConstantString: 1478 assert(TheCall->getNumArgs() == 1 && 1479 "Wrong # arguments to builtin CFStringMakeConstantString"); 1480 if (CheckObjCString(TheCall->getArg(0))) 1481 return ExprError(); 1482 break; 1483 case Builtin::BI__builtin_ms_va_start: 1484 case Builtin::BI__builtin_stdarg_start: 1485 case Builtin::BI__builtin_va_start: 1486 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1487 return ExprError(); 1488 break; 1489 case Builtin::BI__va_start: { 1490 switch (Context.getTargetInfo().getTriple().getArch()) { 1491 case llvm::Triple::aarch64: 1492 case llvm::Triple::arm: 1493 case llvm::Triple::thumb: 1494 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 1495 return ExprError(); 1496 break; 1497 default: 1498 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1499 return ExprError(); 1500 break; 1501 } 1502 break; 1503 } 1504 1505 // The acquire, release, and no fence variants are ARM and AArch64 only. 1506 case Builtin::BI_interlockedbittestandset_acq: 1507 case Builtin::BI_interlockedbittestandset_rel: 1508 case Builtin::BI_interlockedbittestandset_nf: 1509 case Builtin::BI_interlockedbittestandreset_acq: 1510 case Builtin::BI_interlockedbittestandreset_rel: 1511 case Builtin::BI_interlockedbittestandreset_nf: 1512 if (CheckBuiltinTargetSupport( 1513 *this, BuiltinID, TheCall, 1514 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 1515 return ExprError(); 1516 break; 1517 1518 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 1519 case Builtin::BI_bittest64: 1520 case Builtin::BI_bittestandcomplement64: 1521 case Builtin::BI_bittestandreset64: 1522 case Builtin::BI_bittestandset64: 1523 case Builtin::BI_interlockedbittestandreset64: 1524 case Builtin::BI_interlockedbittestandset64: 1525 if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall, 1526 {llvm::Triple::x86_64, llvm::Triple::arm, 1527 llvm::Triple::thumb, llvm::Triple::aarch64})) 1528 return ExprError(); 1529 break; 1530 1531 case Builtin::BI__builtin_isgreater: 1532 case Builtin::BI__builtin_isgreaterequal: 1533 case Builtin::BI__builtin_isless: 1534 case Builtin::BI__builtin_islessequal: 1535 case Builtin::BI__builtin_islessgreater: 1536 case Builtin::BI__builtin_isunordered: 1537 if (SemaBuiltinUnorderedCompare(TheCall)) 1538 return ExprError(); 1539 break; 1540 case Builtin::BI__builtin_fpclassify: 1541 if (SemaBuiltinFPClassification(TheCall, 6)) 1542 return ExprError(); 1543 break; 1544 case Builtin::BI__builtin_isfinite: 1545 case Builtin::BI__builtin_isinf: 1546 case Builtin::BI__builtin_isinf_sign: 1547 case Builtin::BI__builtin_isnan: 1548 case Builtin::BI__builtin_isnormal: 1549 case Builtin::BI__builtin_signbit: 1550 case Builtin::BI__builtin_signbitf: 1551 case Builtin::BI__builtin_signbitl: 1552 if (SemaBuiltinFPClassification(TheCall, 1)) 1553 return ExprError(); 1554 break; 1555 case Builtin::BI__builtin_shufflevector: 1556 return SemaBuiltinShuffleVector(TheCall); 1557 // TheCall will be freed by the smart pointer here, but that's fine, since 1558 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 1559 case Builtin::BI__builtin_prefetch: 1560 if (SemaBuiltinPrefetch(TheCall)) 1561 return ExprError(); 1562 break; 1563 case Builtin::BI__builtin_alloca_with_align: 1564 if (SemaBuiltinAllocaWithAlign(TheCall)) 1565 return ExprError(); 1566 LLVM_FALLTHROUGH; 1567 case Builtin::BI__builtin_alloca: 1568 Diag(TheCall->getBeginLoc(), diag::warn_alloca) 1569 << TheCall->getDirectCallee(); 1570 break; 1571 case Builtin::BI__arithmetic_fence: 1572 if (SemaBuiltinArithmeticFence(TheCall)) 1573 return ExprError(); 1574 break; 1575 case Builtin::BI__assume: 1576 case Builtin::BI__builtin_assume: 1577 if (SemaBuiltinAssume(TheCall)) 1578 return ExprError(); 1579 break; 1580 case Builtin::BI__builtin_assume_aligned: 1581 if (SemaBuiltinAssumeAligned(TheCall)) 1582 return ExprError(); 1583 break; 1584 case Builtin::BI__builtin_dynamic_object_size: 1585 case Builtin::BI__builtin_object_size: 1586 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 1587 return ExprError(); 1588 break; 1589 case Builtin::BI__builtin_longjmp: 1590 if (SemaBuiltinLongjmp(TheCall)) 1591 return ExprError(); 1592 break; 1593 case Builtin::BI__builtin_setjmp: 1594 if (SemaBuiltinSetjmp(TheCall)) 1595 return ExprError(); 1596 break; 1597 case Builtin::BI__builtin_classify_type: 1598 if (checkArgCount(*this, TheCall, 1)) return true; 1599 TheCall->setType(Context.IntTy); 1600 break; 1601 case Builtin::BI__builtin_complex: 1602 if (SemaBuiltinComplex(TheCall)) 1603 return ExprError(); 1604 break; 1605 case Builtin::BI__builtin_constant_p: { 1606 if (checkArgCount(*this, TheCall, 1)) return true; 1607 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 1608 if (Arg.isInvalid()) return true; 1609 TheCall->setArg(0, Arg.get()); 1610 TheCall->setType(Context.IntTy); 1611 break; 1612 } 1613 case Builtin::BI__builtin_launder: 1614 return SemaBuiltinLaunder(*this, TheCall); 1615 case Builtin::BI__sync_fetch_and_add: 1616 case Builtin::BI__sync_fetch_and_add_1: 1617 case Builtin::BI__sync_fetch_and_add_2: 1618 case Builtin::BI__sync_fetch_and_add_4: 1619 case Builtin::BI__sync_fetch_and_add_8: 1620 case Builtin::BI__sync_fetch_and_add_16: 1621 case Builtin::BI__sync_fetch_and_sub: 1622 case Builtin::BI__sync_fetch_and_sub_1: 1623 case Builtin::BI__sync_fetch_and_sub_2: 1624 case Builtin::BI__sync_fetch_and_sub_4: 1625 case Builtin::BI__sync_fetch_and_sub_8: 1626 case Builtin::BI__sync_fetch_and_sub_16: 1627 case Builtin::BI__sync_fetch_and_or: 1628 case Builtin::BI__sync_fetch_and_or_1: 1629 case Builtin::BI__sync_fetch_and_or_2: 1630 case Builtin::BI__sync_fetch_and_or_4: 1631 case Builtin::BI__sync_fetch_and_or_8: 1632 case Builtin::BI__sync_fetch_and_or_16: 1633 case Builtin::BI__sync_fetch_and_and: 1634 case Builtin::BI__sync_fetch_and_and_1: 1635 case Builtin::BI__sync_fetch_and_and_2: 1636 case Builtin::BI__sync_fetch_and_and_4: 1637 case Builtin::BI__sync_fetch_and_and_8: 1638 case Builtin::BI__sync_fetch_and_and_16: 1639 case Builtin::BI__sync_fetch_and_xor: 1640 case Builtin::BI__sync_fetch_and_xor_1: 1641 case Builtin::BI__sync_fetch_and_xor_2: 1642 case Builtin::BI__sync_fetch_and_xor_4: 1643 case Builtin::BI__sync_fetch_and_xor_8: 1644 case Builtin::BI__sync_fetch_and_xor_16: 1645 case Builtin::BI__sync_fetch_and_nand: 1646 case Builtin::BI__sync_fetch_and_nand_1: 1647 case Builtin::BI__sync_fetch_and_nand_2: 1648 case Builtin::BI__sync_fetch_and_nand_4: 1649 case Builtin::BI__sync_fetch_and_nand_8: 1650 case Builtin::BI__sync_fetch_and_nand_16: 1651 case Builtin::BI__sync_add_and_fetch: 1652 case Builtin::BI__sync_add_and_fetch_1: 1653 case Builtin::BI__sync_add_and_fetch_2: 1654 case Builtin::BI__sync_add_and_fetch_4: 1655 case Builtin::BI__sync_add_and_fetch_8: 1656 case Builtin::BI__sync_add_and_fetch_16: 1657 case Builtin::BI__sync_sub_and_fetch: 1658 case Builtin::BI__sync_sub_and_fetch_1: 1659 case Builtin::BI__sync_sub_and_fetch_2: 1660 case Builtin::BI__sync_sub_and_fetch_4: 1661 case Builtin::BI__sync_sub_and_fetch_8: 1662 case Builtin::BI__sync_sub_and_fetch_16: 1663 case Builtin::BI__sync_and_and_fetch: 1664 case Builtin::BI__sync_and_and_fetch_1: 1665 case Builtin::BI__sync_and_and_fetch_2: 1666 case Builtin::BI__sync_and_and_fetch_4: 1667 case Builtin::BI__sync_and_and_fetch_8: 1668 case Builtin::BI__sync_and_and_fetch_16: 1669 case Builtin::BI__sync_or_and_fetch: 1670 case Builtin::BI__sync_or_and_fetch_1: 1671 case Builtin::BI__sync_or_and_fetch_2: 1672 case Builtin::BI__sync_or_and_fetch_4: 1673 case Builtin::BI__sync_or_and_fetch_8: 1674 case Builtin::BI__sync_or_and_fetch_16: 1675 case Builtin::BI__sync_xor_and_fetch: 1676 case Builtin::BI__sync_xor_and_fetch_1: 1677 case Builtin::BI__sync_xor_and_fetch_2: 1678 case Builtin::BI__sync_xor_and_fetch_4: 1679 case Builtin::BI__sync_xor_and_fetch_8: 1680 case Builtin::BI__sync_xor_and_fetch_16: 1681 case Builtin::BI__sync_nand_and_fetch: 1682 case Builtin::BI__sync_nand_and_fetch_1: 1683 case Builtin::BI__sync_nand_and_fetch_2: 1684 case Builtin::BI__sync_nand_and_fetch_4: 1685 case Builtin::BI__sync_nand_and_fetch_8: 1686 case Builtin::BI__sync_nand_and_fetch_16: 1687 case Builtin::BI__sync_val_compare_and_swap: 1688 case Builtin::BI__sync_val_compare_and_swap_1: 1689 case Builtin::BI__sync_val_compare_and_swap_2: 1690 case Builtin::BI__sync_val_compare_and_swap_4: 1691 case Builtin::BI__sync_val_compare_and_swap_8: 1692 case Builtin::BI__sync_val_compare_and_swap_16: 1693 case Builtin::BI__sync_bool_compare_and_swap: 1694 case Builtin::BI__sync_bool_compare_and_swap_1: 1695 case Builtin::BI__sync_bool_compare_and_swap_2: 1696 case Builtin::BI__sync_bool_compare_and_swap_4: 1697 case Builtin::BI__sync_bool_compare_and_swap_8: 1698 case Builtin::BI__sync_bool_compare_and_swap_16: 1699 case Builtin::BI__sync_lock_test_and_set: 1700 case Builtin::BI__sync_lock_test_and_set_1: 1701 case Builtin::BI__sync_lock_test_and_set_2: 1702 case Builtin::BI__sync_lock_test_and_set_4: 1703 case Builtin::BI__sync_lock_test_and_set_8: 1704 case Builtin::BI__sync_lock_test_and_set_16: 1705 case Builtin::BI__sync_lock_release: 1706 case Builtin::BI__sync_lock_release_1: 1707 case Builtin::BI__sync_lock_release_2: 1708 case Builtin::BI__sync_lock_release_4: 1709 case Builtin::BI__sync_lock_release_8: 1710 case Builtin::BI__sync_lock_release_16: 1711 case Builtin::BI__sync_swap: 1712 case Builtin::BI__sync_swap_1: 1713 case Builtin::BI__sync_swap_2: 1714 case Builtin::BI__sync_swap_4: 1715 case Builtin::BI__sync_swap_8: 1716 case Builtin::BI__sync_swap_16: 1717 return SemaBuiltinAtomicOverloaded(TheCallResult); 1718 case Builtin::BI__sync_synchronize: 1719 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 1720 << TheCall->getCallee()->getSourceRange(); 1721 break; 1722 case Builtin::BI__builtin_nontemporal_load: 1723 case Builtin::BI__builtin_nontemporal_store: 1724 return SemaBuiltinNontemporalOverloaded(TheCallResult); 1725 case Builtin::BI__builtin_memcpy_inline: { 1726 clang::Expr *SizeOp = TheCall->getArg(2); 1727 // We warn about copying to or from `nullptr` pointers when `size` is 1728 // greater than 0. When `size` is value dependent we cannot evaluate its 1729 // value so we bail out. 1730 if (SizeOp->isValueDependent()) 1731 break; 1732 if (!SizeOp->EvaluateKnownConstInt(Context).isNullValue()) { 1733 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 1734 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc()); 1735 } 1736 break; 1737 } 1738 #define BUILTIN(ID, TYPE, ATTRS) 1739 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 1740 case Builtin::BI##ID: \ 1741 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 1742 #include "clang/Basic/Builtins.def" 1743 case Builtin::BI__annotation: 1744 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 1745 return ExprError(); 1746 break; 1747 case Builtin::BI__builtin_annotation: 1748 if (SemaBuiltinAnnotation(*this, TheCall)) 1749 return ExprError(); 1750 break; 1751 case Builtin::BI__builtin_addressof: 1752 if (SemaBuiltinAddressof(*this, TheCall)) 1753 return ExprError(); 1754 break; 1755 case Builtin::BI__builtin_is_aligned: 1756 case Builtin::BI__builtin_align_up: 1757 case Builtin::BI__builtin_align_down: 1758 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID)) 1759 return ExprError(); 1760 break; 1761 case Builtin::BI__builtin_add_overflow: 1762 case Builtin::BI__builtin_sub_overflow: 1763 case Builtin::BI__builtin_mul_overflow: 1764 if (SemaBuiltinOverflow(*this, TheCall, BuiltinID)) 1765 return ExprError(); 1766 break; 1767 case Builtin::BI__builtin_operator_new: 1768 case Builtin::BI__builtin_operator_delete: { 1769 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 1770 ExprResult Res = 1771 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 1772 if (Res.isInvalid()) 1773 CorrectDelayedTyposInExpr(TheCallResult.get()); 1774 return Res; 1775 } 1776 case Builtin::BI__builtin_dump_struct: { 1777 // We first want to ensure we are called with 2 arguments 1778 if (checkArgCount(*this, TheCall, 2)) 1779 return ExprError(); 1780 // Ensure that the first argument is of type 'struct XX *' 1781 const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts(); 1782 const QualType PtrArgType = PtrArg->getType(); 1783 if (!PtrArgType->isPointerType() || 1784 !PtrArgType->getPointeeType()->isRecordType()) { 1785 Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1786 << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType 1787 << "structure pointer"; 1788 return ExprError(); 1789 } 1790 1791 // Ensure that the second argument is of type 'FunctionType' 1792 const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts(); 1793 const QualType FnPtrArgType = FnPtrArg->getType(); 1794 if (!FnPtrArgType->isPointerType()) { 1795 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1796 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1797 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1798 return ExprError(); 1799 } 1800 1801 const auto *FuncType = 1802 FnPtrArgType->getPointeeType()->getAs<FunctionType>(); 1803 1804 if (!FuncType) { 1805 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1806 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1807 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1808 return ExprError(); 1809 } 1810 1811 if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) { 1812 if (!FT->getNumParams()) { 1813 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1814 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1815 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1816 return ExprError(); 1817 } 1818 QualType PT = FT->getParamType(0); 1819 if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy || 1820 !PT->isPointerType() || !PT->getPointeeType()->isCharType() || 1821 !PT->getPointeeType().isConstQualified()) { 1822 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1823 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1824 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1825 return ExprError(); 1826 } 1827 } 1828 1829 TheCall->setType(Context.IntTy); 1830 break; 1831 } 1832 case Builtin::BI__builtin_expect_with_probability: { 1833 // We first want to ensure we are called with 3 arguments 1834 if (checkArgCount(*this, TheCall, 3)) 1835 return ExprError(); 1836 // then check probability is constant float in range [0.0, 1.0] 1837 const Expr *ProbArg = TheCall->getArg(2); 1838 SmallVector<PartialDiagnosticAt, 8> Notes; 1839 Expr::EvalResult Eval; 1840 Eval.Diag = &Notes; 1841 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) || 1842 !Eval.Val.isFloat()) { 1843 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float) 1844 << ProbArg->getSourceRange(); 1845 for (const PartialDiagnosticAt &PDiag : Notes) 1846 Diag(PDiag.first, PDiag.second); 1847 return ExprError(); 1848 } 1849 llvm::APFloat Probability = Eval.Val.getFloat(); 1850 bool LoseInfo = false; 1851 Probability.convert(llvm::APFloat::IEEEdouble(), 1852 llvm::RoundingMode::Dynamic, &LoseInfo); 1853 if (!(Probability >= llvm::APFloat(0.0) && 1854 Probability <= llvm::APFloat(1.0))) { 1855 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range) 1856 << ProbArg->getSourceRange(); 1857 return ExprError(); 1858 } 1859 break; 1860 } 1861 case Builtin::BI__builtin_preserve_access_index: 1862 if (SemaBuiltinPreserveAI(*this, TheCall)) 1863 return ExprError(); 1864 break; 1865 case Builtin::BI__builtin_call_with_static_chain: 1866 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 1867 return ExprError(); 1868 break; 1869 case Builtin::BI__exception_code: 1870 case Builtin::BI_exception_code: 1871 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 1872 diag::err_seh___except_block)) 1873 return ExprError(); 1874 break; 1875 case Builtin::BI__exception_info: 1876 case Builtin::BI_exception_info: 1877 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 1878 diag::err_seh___except_filter)) 1879 return ExprError(); 1880 break; 1881 case Builtin::BI__GetExceptionInfo: 1882 if (checkArgCount(*this, TheCall, 1)) 1883 return ExprError(); 1884 1885 if (CheckCXXThrowOperand( 1886 TheCall->getBeginLoc(), 1887 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 1888 TheCall)) 1889 return ExprError(); 1890 1891 TheCall->setType(Context.VoidPtrTy); 1892 break; 1893 // OpenCL v2.0, s6.13.16 - Pipe functions 1894 case Builtin::BIread_pipe: 1895 case Builtin::BIwrite_pipe: 1896 // Since those two functions are declared with var args, we need a semantic 1897 // check for the argument. 1898 if (SemaBuiltinRWPipe(*this, TheCall)) 1899 return ExprError(); 1900 break; 1901 case Builtin::BIreserve_read_pipe: 1902 case Builtin::BIreserve_write_pipe: 1903 case Builtin::BIwork_group_reserve_read_pipe: 1904 case Builtin::BIwork_group_reserve_write_pipe: 1905 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 1906 return ExprError(); 1907 break; 1908 case Builtin::BIsub_group_reserve_read_pipe: 1909 case Builtin::BIsub_group_reserve_write_pipe: 1910 if (checkOpenCLSubgroupExt(*this, TheCall) || 1911 SemaBuiltinReserveRWPipe(*this, TheCall)) 1912 return ExprError(); 1913 break; 1914 case Builtin::BIcommit_read_pipe: 1915 case Builtin::BIcommit_write_pipe: 1916 case Builtin::BIwork_group_commit_read_pipe: 1917 case Builtin::BIwork_group_commit_write_pipe: 1918 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 1919 return ExprError(); 1920 break; 1921 case Builtin::BIsub_group_commit_read_pipe: 1922 case Builtin::BIsub_group_commit_write_pipe: 1923 if (checkOpenCLSubgroupExt(*this, TheCall) || 1924 SemaBuiltinCommitRWPipe(*this, TheCall)) 1925 return ExprError(); 1926 break; 1927 case Builtin::BIget_pipe_num_packets: 1928 case Builtin::BIget_pipe_max_packets: 1929 if (SemaBuiltinPipePackets(*this, TheCall)) 1930 return ExprError(); 1931 break; 1932 case Builtin::BIto_global: 1933 case Builtin::BIto_local: 1934 case Builtin::BIto_private: 1935 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 1936 return ExprError(); 1937 break; 1938 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 1939 case Builtin::BIenqueue_kernel: 1940 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 1941 return ExprError(); 1942 break; 1943 case Builtin::BIget_kernel_work_group_size: 1944 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 1945 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 1946 return ExprError(); 1947 break; 1948 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 1949 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 1950 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 1951 return ExprError(); 1952 break; 1953 case Builtin::BI__builtin_os_log_format: 1954 Cleanup.setExprNeedsCleanups(true); 1955 LLVM_FALLTHROUGH; 1956 case Builtin::BI__builtin_os_log_format_buffer_size: 1957 if (SemaBuiltinOSLogFormat(TheCall)) 1958 return ExprError(); 1959 break; 1960 case Builtin::BI__builtin_frame_address: 1961 case Builtin::BI__builtin_return_address: { 1962 if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF)) 1963 return ExprError(); 1964 1965 // -Wframe-address warning if non-zero passed to builtin 1966 // return/frame address. 1967 Expr::EvalResult Result; 1968 if (!TheCall->getArg(0)->isValueDependent() && 1969 TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) && 1970 Result.Val.getInt() != 0) 1971 Diag(TheCall->getBeginLoc(), diag::warn_frame_address) 1972 << ((BuiltinID == Builtin::BI__builtin_return_address) 1973 ? "__builtin_return_address" 1974 : "__builtin_frame_address") 1975 << TheCall->getSourceRange(); 1976 break; 1977 } 1978 1979 case Builtin::BI__builtin_matrix_transpose: 1980 return SemaBuiltinMatrixTranspose(TheCall, TheCallResult); 1981 1982 case Builtin::BI__builtin_matrix_column_major_load: 1983 return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult); 1984 1985 case Builtin::BI__builtin_matrix_column_major_store: 1986 return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult); 1987 1988 case Builtin::BI__builtin_get_device_side_mangled_name: { 1989 auto Check = [](CallExpr *TheCall) { 1990 if (TheCall->getNumArgs() != 1) 1991 return false; 1992 auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts()); 1993 if (!DRE) 1994 return false; 1995 auto *D = DRE->getDecl(); 1996 if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D)) 1997 return false; 1998 return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() || 1999 D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>(); 2000 }; 2001 if (!Check(TheCall)) { 2002 Diag(TheCall->getBeginLoc(), 2003 diag::err_hip_invalid_args_builtin_mangled_name); 2004 return ExprError(); 2005 } 2006 } 2007 } 2008 2009 // Since the target specific builtins for each arch overlap, only check those 2010 // of the arch we are compiling for. 2011 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 2012 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) { 2013 assert(Context.getAuxTargetInfo() && 2014 "Aux Target Builtin, but not an aux target?"); 2015 2016 if (CheckTSBuiltinFunctionCall( 2017 *Context.getAuxTargetInfo(), 2018 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall)) 2019 return ExprError(); 2020 } else { 2021 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID, 2022 TheCall)) 2023 return ExprError(); 2024 } 2025 } 2026 2027 return TheCallResult; 2028 } 2029 2030 // Get the valid immediate range for the specified NEON type code. 2031 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 2032 NeonTypeFlags Type(t); 2033 int IsQuad = ForceQuad ? true : Type.isQuad(); 2034 switch (Type.getEltType()) { 2035 case NeonTypeFlags::Int8: 2036 case NeonTypeFlags::Poly8: 2037 return shift ? 7 : (8 << IsQuad) - 1; 2038 case NeonTypeFlags::Int16: 2039 case NeonTypeFlags::Poly16: 2040 return shift ? 15 : (4 << IsQuad) - 1; 2041 case NeonTypeFlags::Int32: 2042 return shift ? 31 : (2 << IsQuad) - 1; 2043 case NeonTypeFlags::Int64: 2044 case NeonTypeFlags::Poly64: 2045 return shift ? 63 : (1 << IsQuad) - 1; 2046 case NeonTypeFlags::Poly128: 2047 return shift ? 127 : (1 << IsQuad) - 1; 2048 case NeonTypeFlags::Float16: 2049 assert(!shift && "cannot shift float types!"); 2050 return (4 << IsQuad) - 1; 2051 case NeonTypeFlags::Float32: 2052 assert(!shift && "cannot shift float types!"); 2053 return (2 << IsQuad) - 1; 2054 case NeonTypeFlags::Float64: 2055 assert(!shift && "cannot shift float types!"); 2056 return (1 << IsQuad) - 1; 2057 case NeonTypeFlags::BFloat16: 2058 assert(!shift && "cannot shift float types!"); 2059 return (4 << IsQuad) - 1; 2060 } 2061 llvm_unreachable("Invalid NeonTypeFlag!"); 2062 } 2063 2064 /// getNeonEltType - Return the QualType corresponding to the elements of 2065 /// the vector type specified by the NeonTypeFlags. This is used to check 2066 /// the pointer arguments for Neon load/store intrinsics. 2067 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 2068 bool IsPolyUnsigned, bool IsInt64Long) { 2069 switch (Flags.getEltType()) { 2070 case NeonTypeFlags::Int8: 2071 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 2072 case NeonTypeFlags::Int16: 2073 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 2074 case NeonTypeFlags::Int32: 2075 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 2076 case NeonTypeFlags::Int64: 2077 if (IsInt64Long) 2078 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 2079 else 2080 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 2081 : Context.LongLongTy; 2082 case NeonTypeFlags::Poly8: 2083 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 2084 case NeonTypeFlags::Poly16: 2085 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 2086 case NeonTypeFlags::Poly64: 2087 if (IsInt64Long) 2088 return Context.UnsignedLongTy; 2089 else 2090 return Context.UnsignedLongLongTy; 2091 case NeonTypeFlags::Poly128: 2092 break; 2093 case NeonTypeFlags::Float16: 2094 return Context.HalfTy; 2095 case NeonTypeFlags::Float32: 2096 return Context.FloatTy; 2097 case NeonTypeFlags::Float64: 2098 return Context.DoubleTy; 2099 case NeonTypeFlags::BFloat16: 2100 return Context.BFloat16Ty; 2101 } 2102 llvm_unreachable("Invalid NeonTypeFlag!"); 2103 } 2104 2105 bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2106 // Range check SVE intrinsics that take immediate values. 2107 SmallVector<std::tuple<int,int,int>, 3> ImmChecks; 2108 2109 switch (BuiltinID) { 2110 default: 2111 return false; 2112 #define GET_SVE_IMMEDIATE_CHECK 2113 #include "clang/Basic/arm_sve_sema_rangechecks.inc" 2114 #undef GET_SVE_IMMEDIATE_CHECK 2115 } 2116 2117 // Perform all the immediate checks for this builtin call. 2118 bool HasError = false; 2119 for (auto &I : ImmChecks) { 2120 int ArgNum, CheckTy, ElementSizeInBits; 2121 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I; 2122 2123 typedef bool(*OptionSetCheckFnTy)(int64_t Value); 2124 2125 // Function that checks whether the operand (ArgNum) is an immediate 2126 // that is one of the predefined values. 2127 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm, 2128 int ErrDiag) -> bool { 2129 // We can't check the value of a dependent argument. 2130 Expr *Arg = TheCall->getArg(ArgNum); 2131 if (Arg->isTypeDependent() || Arg->isValueDependent()) 2132 return false; 2133 2134 // Check constant-ness first. 2135 llvm::APSInt Imm; 2136 if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm)) 2137 return true; 2138 2139 if (!CheckImm(Imm.getSExtValue())) 2140 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange(); 2141 return false; 2142 }; 2143 2144 switch ((SVETypeFlags::ImmCheckType)CheckTy) { 2145 case SVETypeFlags::ImmCheck0_31: 2146 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31)) 2147 HasError = true; 2148 break; 2149 case SVETypeFlags::ImmCheck0_13: 2150 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13)) 2151 HasError = true; 2152 break; 2153 case SVETypeFlags::ImmCheck1_16: 2154 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16)) 2155 HasError = true; 2156 break; 2157 case SVETypeFlags::ImmCheck0_7: 2158 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7)) 2159 HasError = true; 2160 break; 2161 case SVETypeFlags::ImmCheckExtract: 2162 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2163 (2048 / ElementSizeInBits) - 1)) 2164 HasError = true; 2165 break; 2166 case SVETypeFlags::ImmCheckShiftRight: 2167 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits)) 2168 HasError = true; 2169 break; 2170 case SVETypeFlags::ImmCheckShiftRightNarrow: 2171 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 2172 ElementSizeInBits / 2)) 2173 HasError = true; 2174 break; 2175 case SVETypeFlags::ImmCheckShiftLeft: 2176 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2177 ElementSizeInBits - 1)) 2178 HasError = true; 2179 break; 2180 case SVETypeFlags::ImmCheckLaneIndex: 2181 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2182 (128 / (1 * ElementSizeInBits)) - 1)) 2183 HasError = true; 2184 break; 2185 case SVETypeFlags::ImmCheckLaneIndexCompRotate: 2186 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2187 (128 / (2 * ElementSizeInBits)) - 1)) 2188 HasError = true; 2189 break; 2190 case SVETypeFlags::ImmCheckLaneIndexDot: 2191 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2192 (128 / (4 * ElementSizeInBits)) - 1)) 2193 HasError = true; 2194 break; 2195 case SVETypeFlags::ImmCheckComplexRot90_270: 2196 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; }, 2197 diag::err_rotation_argument_to_cadd)) 2198 HasError = true; 2199 break; 2200 case SVETypeFlags::ImmCheckComplexRotAll90: 2201 if (CheckImmediateInSet( 2202 [](int64_t V) { 2203 return V == 0 || V == 90 || V == 180 || V == 270; 2204 }, 2205 diag::err_rotation_argument_to_cmla)) 2206 HasError = true; 2207 break; 2208 case SVETypeFlags::ImmCheck0_1: 2209 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1)) 2210 HasError = true; 2211 break; 2212 case SVETypeFlags::ImmCheck0_2: 2213 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2)) 2214 HasError = true; 2215 break; 2216 case SVETypeFlags::ImmCheck0_3: 2217 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3)) 2218 HasError = true; 2219 break; 2220 } 2221 } 2222 2223 return HasError; 2224 } 2225 2226 bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI, 2227 unsigned BuiltinID, CallExpr *TheCall) { 2228 llvm::APSInt Result; 2229 uint64_t mask = 0; 2230 unsigned TV = 0; 2231 int PtrArgNum = -1; 2232 bool HasConstPtr = false; 2233 switch (BuiltinID) { 2234 #define GET_NEON_OVERLOAD_CHECK 2235 #include "clang/Basic/arm_neon.inc" 2236 #include "clang/Basic/arm_fp16.inc" 2237 #undef GET_NEON_OVERLOAD_CHECK 2238 } 2239 2240 // For NEON intrinsics which are overloaded on vector element type, validate 2241 // the immediate which specifies which variant to emit. 2242 unsigned ImmArg = TheCall->getNumArgs()-1; 2243 if (mask) { 2244 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 2245 return true; 2246 2247 TV = Result.getLimitedValue(64); 2248 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 2249 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 2250 << TheCall->getArg(ImmArg)->getSourceRange(); 2251 } 2252 2253 if (PtrArgNum >= 0) { 2254 // Check that pointer arguments have the specified type. 2255 Expr *Arg = TheCall->getArg(PtrArgNum); 2256 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 2257 Arg = ICE->getSubExpr(); 2258 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 2259 QualType RHSTy = RHS.get()->getType(); 2260 2261 llvm::Triple::ArchType Arch = TI.getTriple().getArch(); 2262 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 2263 Arch == llvm::Triple::aarch64_32 || 2264 Arch == llvm::Triple::aarch64_be; 2265 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong; 2266 QualType EltTy = 2267 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 2268 if (HasConstPtr) 2269 EltTy = EltTy.withConst(); 2270 QualType LHSTy = Context.getPointerType(EltTy); 2271 AssignConvertType ConvTy; 2272 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 2273 if (RHS.isInvalid()) 2274 return true; 2275 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 2276 RHS.get(), AA_Assigning)) 2277 return true; 2278 } 2279 2280 // For NEON intrinsics which take an immediate value as part of the 2281 // instruction, range check them here. 2282 unsigned i = 0, l = 0, u = 0; 2283 switch (BuiltinID) { 2284 default: 2285 return false; 2286 #define GET_NEON_IMMEDIATE_CHECK 2287 #include "clang/Basic/arm_neon.inc" 2288 #include "clang/Basic/arm_fp16.inc" 2289 #undef GET_NEON_IMMEDIATE_CHECK 2290 } 2291 2292 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2293 } 2294 2295 bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2296 switch (BuiltinID) { 2297 default: 2298 return false; 2299 #include "clang/Basic/arm_mve_builtin_sema.inc" 2300 } 2301 } 2302 2303 bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2304 CallExpr *TheCall) { 2305 bool Err = false; 2306 switch (BuiltinID) { 2307 default: 2308 return false; 2309 #include "clang/Basic/arm_cde_builtin_sema.inc" 2310 } 2311 2312 if (Err) 2313 return true; 2314 2315 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true); 2316 } 2317 2318 bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI, 2319 const Expr *CoprocArg, bool WantCDE) { 2320 if (isConstantEvaluated()) 2321 return false; 2322 2323 // We can't check the value of a dependent argument. 2324 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent()) 2325 return false; 2326 2327 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context); 2328 int64_t CoprocNo = CoprocNoAP.getExtValue(); 2329 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative"); 2330 2331 uint32_t CDECoprocMask = TI.getARMCDECoprocMask(); 2332 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo)); 2333 2334 if (IsCDECoproc != WantCDE) 2335 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc) 2336 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange(); 2337 2338 return false; 2339 } 2340 2341 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 2342 unsigned MaxWidth) { 2343 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 2344 BuiltinID == ARM::BI__builtin_arm_ldaex || 2345 BuiltinID == ARM::BI__builtin_arm_strex || 2346 BuiltinID == ARM::BI__builtin_arm_stlex || 2347 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2348 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2349 BuiltinID == AArch64::BI__builtin_arm_strex || 2350 BuiltinID == AArch64::BI__builtin_arm_stlex) && 2351 "unexpected ARM builtin"); 2352 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 2353 BuiltinID == ARM::BI__builtin_arm_ldaex || 2354 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2355 BuiltinID == AArch64::BI__builtin_arm_ldaex; 2356 2357 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 2358 2359 // Ensure that we have the proper number of arguments. 2360 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 2361 return true; 2362 2363 // Inspect the pointer argument of the atomic builtin. This should always be 2364 // a pointer type, whose element is an integral scalar or pointer type. 2365 // Because it is a pointer type, we don't have to worry about any implicit 2366 // casts here. 2367 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 2368 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 2369 if (PointerArgRes.isInvalid()) 2370 return true; 2371 PointerArg = PointerArgRes.get(); 2372 2373 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 2374 if (!pointerType) { 2375 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 2376 << PointerArg->getType() << PointerArg->getSourceRange(); 2377 return true; 2378 } 2379 2380 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 2381 // task is to insert the appropriate casts into the AST. First work out just 2382 // what the appropriate type is. 2383 QualType ValType = pointerType->getPointeeType(); 2384 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 2385 if (IsLdrex) 2386 AddrType.addConst(); 2387 2388 // Issue a warning if the cast is dodgy. 2389 CastKind CastNeeded = CK_NoOp; 2390 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 2391 CastNeeded = CK_BitCast; 2392 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 2393 << PointerArg->getType() << Context.getPointerType(AddrType) 2394 << AA_Passing << PointerArg->getSourceRange(); 2395 } 2396 2397 // Finally, do the cast and replace the argument with the corrected version. 2398 AddrType = Context.getPointerType(AddrType); 2399 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 2400 if (PointerArgRes.isInvalid()) 2401 return true; 2402 PointerArg = PointerArgRes.get(); 2403 2404 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 2405 2406 // In general, we allow ints, floats and pointers to be loaded and stored. 2407 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 2408 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 2409 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 2410 << PointerArg->getType() << PointerArg->getSourceRange(); 2411 return true; 2412 } 2413 2414 // But ARM doesn't have instructions to deal with 128-bit versions. 2415 if (Context.getTypeSize(ValType) > MaxWidth) { 2416 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 2417 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 2418 << PointerArg->getType() << PointerArg->getSourceRange(); 2419 return true; 2420 } 2421 2422 switch (ValType.getObjCLifetime()) { 2423 case Qualifiers::OCL_None: 2424 case Qualifiers::OCL_ExplicitNone: 2425 // okay 2426 break; 2427 2428 case Qualifiers::OCL_Weak: 2429 case Qualifiers::OCL_Strong: 2430 case Qualifiers::OCL_Autoreleasing: 2431 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 2432 << ValType << PointerArg->getSourceRange(); 2433 return true; 2434 } 2435 2436 if (IsLdrex) { 2437 TheCall->setType(ValType); 2438 return false; 2439 } 2440 2441 // Initialize the argument to be stored. 2442 ExprResult ValArg = TheCall->getArg(0); 2443 InitializedEntity Entity = InitializedEntity::InitializeParameter( 2444 Context, ValType, /*consume*/ false); 2445 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 2446 if (ValArg.isInvalid()) 2447 return true; 2448 TheCall->setArg(0, ValArg.get()); 2449 2450 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 2451 // but the custom checker bypasses all default analysis. 2452 TheCall->setType(Context.IntTy); 2453 return false; 2454 } 2455 2456 bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2457 CallExpr *TheCall) { 2458 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 2459 BuiltinID == ARM::BI__builtin_arm_ldaex || 2460 BuiltinID == ARM::BI__builtin_arm_strex || 2461 BuiltinID == ARM::BI__builtin_arm_stlex) { 2462 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 2463 } 2464 2465 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 2466 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 2467 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 2468 } 2469 2470 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 2471 BuiltinID == ARM::BI__builtin_arm_wsr64) 2472 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 2473 2474 if (BuiltinID == ARM::BI__builtin_arm_rsr || 2475 BuiltinID == ARM::BI__builtin_arm_rsrp || 2476 BuiltinID == ARM::BI__builtin_arm_wsr || 2477 BuiltinID == ARM::BI__builtin_arm_wsrp) 2478 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2479 2480 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2481 return true; 2482 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall)) 2483 return true; 2484 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2485 return true; 2486 2487 // For intrinsics which take an immediate value as part of the instruction, 2488 // range check them here. 2489 // FIXME: VFP Intrinsics should error if VFP not present. 2490 switch (BuiltinID) { 2491 default: return false; 2492 case ARM::BI__builtin_arm_ssat: 2493 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 2494 case ARM::BI__builtin_arm_usat: 2495 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 2496 case ARM::BI__builtin_arm_ssat16: 2497 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 2498 case ARM::BI__builtin_arm_usat16: 2499 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 2500 case ARM::BI__builtin_arm_vcvtr_f: 2501 case ARM::BI__builtin_arm_vcvtr_d: 2502 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 2503 case ARM::BI__builtin_arm_dmb: 2504 case ARM::BI__builtin_arm_dsb: 2505 case ARM::BI__builtin_arm_isb: 2506 case ARM::BI__builtin_arm_dbg: 2507 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 2508 case ARM::BI__builtin_arm_cdp: 2509 case ARM::BI__builtin_arm_cdp2: 2510 case ARM::BI__builtin_arm_mcr: 2511 case ARM::BI__builtin_arm_mcr2: 2512 case ARM::BI__builtin_arm_mrc: 2513 case ARM::BI__builtin_arm_mrc2: 2514 case ARM::BI__builtin_arm_mcrr: 2515 case ARM::BI__builtin_arm_mcrr2: 2516 case ARM::BI__builtin_arm_mrrc: 2517 case ARM::BI__builtin_arm_mrrc2: 2518 case ARM::BI__builtin_arm_ldc: 2519 case ARM::BI__builtin_arm_ldcl: 2520 case ARM::BI__builtin_arm_ldc2: 2521 case ARM::BI__builtin_arm_ldc2l: 2522 case ARM::BI__builtin_arm_stc: 2523 case ARM::BI__builtin_arm_stcl: 2524 case ARM::BI__builtin_arm_stc2: 2525 case ARM::BI__builtin_arm_stc2l: 2526 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) || 2527 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), 2528 /*WantCDE*/ false); 2529 } 2530 } 2531 2532 bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, 2533 unsigned BuiltinID, 2534 CallExpr *TheCall) { 2535 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 2536 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2537 BuiltinID == AArch64::BI__builtin_arm_strex || 2538 BuiltinID == AArch64::BI__builtin_arm_stlex) { 2539 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 2540 } 2541 2542 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 2543 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 2544 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || 2545 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 2546 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 2547 } 2548 2549 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 2550 BuiltinID == AArch64::BI__builtin_arm_wsr64) 2551 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2552 2553 // Memory Tagging Extensions (MTE) Intrinsics 2554 if (BuiltinID == AArch64::BI__builtin_arm_irg || 2555 BuiltinID == AArch64::BI__builtin_arm_addg || 2556 BuiltinID == AArch64::BI__builtin_arm_gmi || 2557 BuiltinID == AArch64::BI__builtin_arm_ldg || 2558 BuiltinID == AArch64::BI__builtin_arm_stg || 2559 BuiltinID == AArch64::BI__builtin_arm_subp) { 2560 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 2561 } 2562 2563 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 2564 BuiltinID == AArch64::BI__builtin_arm_rsrp || 2565 BuiltinID == AArch64::BI__builtin_arm_wsr || 2566 BuiltinID == AArch64::BI__builtin_arm_wsrp) 2567 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2568 2569 // Only check the valid encoding range. Any constant in this range would be 2570 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 2571 // an exception for incorrect registers. This matches MSVC behavior. 2572 if (BuiltinID == AArch64::BI_ReadStatusReg || 2573 BuiltinID == AArch64::BI_WriteStatusReg) 2574 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 2575 2576 if (BuiltinID == AArch64::BI__getReg) 2577 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 2578 2579 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2580 return true; 2581 2582 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall)) 2583 return true; 2584 2585 // For intrinsics which take an immediate value as part of the instruction, 2586 // range check them here. 2587 unsigned i = 0, l = 0, u = 0; 2588 switch (BuiltinID) { 2589 default: return false; 2590 case AArch64::BI__builtin_arm_dmb: 2591 case AArch64::BI__builtin_arm_dsb: 2592 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 2593 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break; 2594 } 2595 2596 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2597 } 2598 2599 static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) { 2600 if (Arg->getType()->getAsPlaceholderType()) 2601 return false; 2602 2603 // The first argument needs to be a record field access. 2604 // If it is an array element access, we delay decision 2605 // to BPF backend to check whether the access is a 2606 // field access or not. 2607 return (Arg->IgnoreParens()->getObjectKind() == OK_BitField || 2608 dyn_cast<MemberExpr>(Arg->IgnoreParens()) || 2609 dyn_cast<ArraySubscriptExpr>(Arg->IgnoreParens())); 2610 } 2611 2612 static bool isEltOfVectorTy(ASTContext &Context, CallExpr *Call, Sema &S, 2613 QualType VectorTy, QualType EltTy) { 2614 QualType VectorEltTy = VectorTy->castAs<VectorType>()->getElementType(); 2615 if (!Context.hasSameType(VectorEltTy, EltTy)) { 2616 S.Diag(Call->getBeginLoc(), diag::err_typecheck_call_different_arg_types) 2617 << Call->getSourceRange() << VectorEltTy << EltTy; 2618 return false; 2619 } 2620 return true; 2621 } 2622 2623 static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) { 2624 QualType ArgType = Arg->getType(); 2625 if (ArgType->getAsPlaceholderType()) 2626 return false; 2627 2628 // for TYPE_EXISTENCE/TYPE_SIZEOF reloc type 2629 // format: 2630 // 1. __builtin_preserve_type_info(*(<type> *)0, flag); 2631 // 2. <type> var; 2632 // __builtin_preserve_type_info(var, flag); 2633 if (!dyn_cast<DeclRefExpr>(Arg->IgnoreParens()) && 2634 !dyn_cast<UnaryOperator>(Arg->IgnoreParens())) 2635 return false; 2636 2637 // Typedef type. 2638 if (ArgType->getAs<TypedefType>()) 2639 return true; 2640 2641 // Record type or Enum type. 2642 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 2643 if (const auto *RT = Ty->getAs<RecordType>()) { 2644 if (!RT->getDecl()->getDeclName().isEmpty()) 2645 return true; 2646 } else if (const auto *ET = Ty->getAs<EnumType>()) { 2647 if (!ET->getDecl()->getDeclName().isEmpty()) 2648 return true; 2649 } 2650 2651 return false; 2652 } 2653 2654 static bool isValidBPFPreserveEnumValueArg(Expr *Arg) { 2655 QualType ArgType = Arg->getType(); 2656 if (ArgType->getAsPlaceholderType()) 2657 return false; 2658 2659 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type 2660 // format: 2661 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>, 2662 // flag); 2663 const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens()); 2664 if (!UO) 2665 return false; 2666 2667 const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr()); 2668 if (!CE) 2669 return false; 2670 if (CE->getCastKind() != CK_IntegralToPointer && 2671 CE->getCastKind() != CK_NullToPointer) 2672 return false; 2673 2674 // The integer must be from an EnumConstantDecl. 2675 const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr()); 2676 if (!DR) 2677 return false; 2678 2679 const EnumConstantDecl *Enumerator = 2680 dyn_cast<EnumConstantDecl>(DR->getDecl()); 2681 if (!Enumerator) 2682 return false; 2683 2684 // The type must be EnumType. 2685 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 2686 const auto *ET = Ty->getAs<EnumType>(); 2687 if (!ET) 2688 return false; 2689 2690 // The enum value must be supported. 2691 for (auto *EDI : ET->getDecl()->enumerators()) { 2692 if (EDI == Enumerator) 2693 return true; 2694 } 2695 2696 return false; 2697 } 2698 2699 bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID, 2700 CallExpr *TheCall) { 2701 assert((BuiltinID == BPF::BI__builtin_preserve_field_info || 2702 BuiltinID == BPF::BI__builtin_btf_type_id || 2703 BuiltinID == BPF::BI__builtin_preserve_type_info || 2704 BuiltinID == BPF::BI__builtin_preserve_enum_value) && 2705 "unexpected BPF builtin"); 2706 2707 if (checkArgCount(*this, TheCall, 2)) 2708 return true; 2709 2710 // The second argument needs to be a constant int 2711 Expr *Arg = TheCall->getArg(1); 2712 Optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context); 2713 diag::kind kind; 2714 if (!Value) { 2715 if (BuiltinID == BPF::BI__builtin_preserve_field_info) 2716 kind = diag::err_preserve_field_info_not_const; 2717 else if (BuiltinID == BPF::BI__builtin_btf_type_id) 2718 kind = diag::err_btf_type_id_not_const; 2719 else if (BuiltinID == BPF::BI__builtin_preserve_type_info) 2720 kind = diag::err_preserve_type_info_not_const; 2721 else 2722 kind = diag::err_preserve_enum_value_not_const; 2723 Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange(); 2724 return true; 2725 } 2726 2727 // The first argument 2728 Arg = TheCall->getArg(0); 2729 bool InvalidArg = false; 2730 bool ReturnUnsignedInt = true; 2731 if (BuiltinID == BPF::BI__builtin_preserve_field_info) { 2732 if (!isValidBPFPreserveFieldInfoArg(Arg)) { 2733 InvalidArg = true; 2734 kind = diag::err_preserve_field_info_not_field; 2735 } 2736 } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) { 2737 if (!isValidBPFPreserveTypeInfoArg(Arg)) { 2738 InvalidArg = true; 2739 kind = diag::err_preserve_type_info_invalid; 2740 } 2741 } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) { 2742 if (!isValidBPFPreserveEnumValueArg(Arg)) { 2743 InvalidArg = true; 2744 kind = diag::err_preserve_enum_value_invalid; 2745 } 2746 ReturnUnsignedInt = false; 2747 } else if (BuiltinID == BPF::BI__builtin_btf_type_id) { 2748 ReturnUnsignedInt = false; 2749 } 2750 2751 if (InvalidArg) { 2752 Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange(); 2753 return true; 2754 } 2755 2756 if (ReturnUnsignedInt) 2757 TheCall->setType(Context.UnsignedIntTy); 2758 else 2759 TheCall->setType(Context.UnsignedLongTy); 2760 return false; 2761 } 2762 2763 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 2764 struct ArgInfo { 2765 uint8_t OpNum; 2766 bool IsSigned; 2767 uint8_t BitWidth; 2768 uint8_t Align; 2769 }; 2770 struct BuiltinInfo { 2771 unsigned BuiltinID; 2772 ArgInfo Infos[2]; 2773 }; 2774 2775 static BuiltinInfo Infos[] = { 2776 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 2777 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 2778 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 2779 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} }, 2780 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 2781 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 2782 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 2783 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 2784 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 2785 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 2786 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 2787 2788 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 2789 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 2790 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 2791 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 2792 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 2793 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 2794 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 2795 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 2796 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 2797 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 2798 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 2799 2800 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 2801 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 2802 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 2803 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 2804 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 2805 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 2806 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 2807 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 2808 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 2809 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 2810 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 2811 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 2812 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 2813 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 2814 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 2815 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 2816 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 2817 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 2818 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 2819 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 2820 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 2821 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 2822 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 2823 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 2824 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 2825 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 2826 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 2827 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 2828 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 2829 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 2830 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 2831 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 2832 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 2833 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 2834 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 2835 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 2836 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 2837 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 2838 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 2839 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 2840 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 2841 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 2842 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 2843 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 2844 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 2845 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 2846 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 2847 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 2848 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 2849 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 2850 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 2851 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 2852 {{ 1, false, 6, 0 }} }, 2853 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 2854 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 2855 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 2856 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 2857 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 2858 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 2859 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 2860 {{ 1, false, 5, 0 }} }, 2861 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 2862 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 2863 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 2864 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 2865 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 2866 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 2867 { 2, false, 5, 0 }} }, 2868 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 2869 { 2, false, 6, 0 }} }, 2870 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 2871 { 3, false, 5, 0 }} }, 2872 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 2873 { 3, false, 6, 0 }} }, 2874 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 2875 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 2876 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 2877 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 2878 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 2879 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 2880 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 2881 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 2882 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 2883 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 2884 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 2885 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 2886 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 2887 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 2888 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 2889 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 2890 {{ 2, false, 4, 0 }, 2891 { 3, false, 5, 0 }} }, 2892 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 2893 {{ 2, false, 4, 0 }, 2894 { 3, false, 5, 0 }} }, 2895 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 2896 {{ 2, false, 4, 0 }, 2897 { 3, false, 5, 0 }} }, 2898 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 2899 {{ 2, false, 4, 0 }, 2900 { 3, false, 5, 0 }} }, 2901 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 2902 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 2903 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 2904 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 2905 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 2906 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 2907 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 2908 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 2909 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 2910 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 2911 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 2912 { 2, false, 5, 0 }} }, 2913 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 2914 { 2, false, 6, 0 }} }, 2915 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 2916 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 2917 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 2918 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 2919 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 2920 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 2921 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 2922 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 2923 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 2924 {{ 1, false, 4, 0 }} }, 2925 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 2926 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 2927 {{ 1, false, 4, 0 }} }, 2928 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 2929 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 2930 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 2931 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 2932 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 2933 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 2934 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 2935 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 2936 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 2937 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 2938 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 2939 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 2940 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 2941 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 2942 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 2943 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 2944 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 2945 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 2946 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 2947 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 2948 {{ 3, false, 1, 0 }} }, 2949 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 2950 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 2951 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 2952 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 2953 {{ 3, false, 1, 0 }} }, 2954 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 2955 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 2956 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 2957 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 2958 {{ 3, false, 1, 0 }} }, 2959 }; 2960 2961 // Use a dynamically initialized static to sort the table exactly once on 2962 // first run. 2963 static const bool SortOnce = 2964 (llvm::sort(Infos, 2965 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 2966 return LHS.BuiltinID < RHS.BuiltinID; 2967 }), 2968 true); 2969 (void)SortOnce; 2970 2971 const BuiltinInfo *F = llvm::partition_point( 2972 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; }); 2973 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 2974 return false; 2975 2976 bool Error = false; 2977 2978 for (const ArgInfo &A : F->Infos) { 2979 // Ignore empty ArgInfo elements. 2980 if (A.BitWidth == 0) 2981 continue; 2982 2983 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 2984 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 2985 if (!A.Align) { 2986 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 2987 } else { 2988 unsigned M = 1 << A.Align; 2989 Min *= M; 2990 Max *= M; 2991 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max) | 2992 SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 2993 } 2994 } 2995 return Error; 2996 } 2997 2998 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 2999 CallExpr *TheCall) { 3000 return CheckHexagonBuiltinArgument(BuiltinID, TheCall); 3001 } 3002 3003 bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI, 3004 unsigned BuiltinID, CallExpr *TheCall) { 3005 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) || 3006 CheckMipsBuiltinArgument(BuiltinID, TheCall); 3007 } 3008 3009 bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, 3010 CallExpr *TheCall) { 3011 3012 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID && 3013 BuiltinID <= Mips::BI__builtin_mips_lwx) { 3014 if (!TI.hasFeature("dsp")) 3015 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp); 3016 } 3017 3018 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID && 3019 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) { 3020 if (!TI.hasFeature("dspr2")) 3021 return Diag(TheCall->getBeginLoc(), 3022 diag::err_mips_builtin_requires_dspr2); 3023 } 3024 3025 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID && 3026 BuiltinID <= Mips::BI__builtin_msa_xori_b) { 3027 if (!TI.hasFeature("msa")) 3028 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa); 3029 } 3030 3031 return false; 3032 } 3033 3034 // CheckMipsBuiltinArgument - Checks the constant value passed to the 3035 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The 3036 // ordering for DSP is unspecified. MSA is ordered by the data format used 3037 // by the underlying instruction i.e., df/m, df/n and then by size. 3038 // 3039 // FIXME: The size tests here should instead be tablegen'd along with the 3040 // definitions from include/clang/Basic/BuiltinsMips.def. 3041 // FIXME: GCC is strict on signedness for some of these intrinsics, we should 3042 // be too. 3043 bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3044 unsigned i = 0, l = 0, u = 0, m = 0; 3045 switch (BuiltinID) { 3046 default: return false; 3047 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 3048 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 3049 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 3050 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 3051 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 3052 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 3053 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 3054 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the 3055 // df/m field. 3056 // These intrinsics take an unsigned 3 bit immediate. 3057 case Mips::BI__builtin_msa_bclri_b: 3058 case Mips::BI__builtin_msa_bnegi_b: 3059 case Mips::BI__builtin_msa_bseti_b: 3060 case Mips::BI__builtin_msa_sat_s_b: 3061 case Mips::BI__builtin_msa_sat_u_b: 3062 case Mips::BI__builtin_msa_slli_b: 3063 case Mips::BI__builtin_msa_srai_b: 3064 case Mips::BI__builtin_msa_srari_b: 3065 case Mips::BI__builtin_msa_srli_b: 3066 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 3067 case Mips::BI__builtin_msa_binsli_b: 3068 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 3069 // These intrinsics take an unsigned 4 bit immediate. 3070 case Mips::BI__builtin_msa_bclri_h: 3071 case Mips::BI__builtin_msa_bnegi_h: 3072 case Mips::BI__builtin_msa_bseti_h: 3073 case Mips::BI__builtin_msa_sat_s_h: 3074 case Mips::BI__builtin_msa_sat_u_h: 3075 case Mips::BI__builtin_msa_slli_h: 3076 case Mips::BI__builtin_msa_srai_h: 3077 case Mips::BI__builtin_msa_srari_h: 3078 case Mips::BI__builtin_msa_srli_h: 3079 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 3080 case Mips::BI__builtin_msa_binsli_h: 3081 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 3082 // These intrinsics take an unsigned 5 bit immediate. 3083 // The first block of intrinsics actually have an unsigned 5 bit field, 3084 // not a df/n field. 3085 case Mips::BI__builtin_msa_cfcmsa: 3086 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; 3087 case Mips::BI__builtin_msa_clei_u_b: 3088 case Mips::BI__builtin_msa_clei_u_h: 3089 case Mips::BI__builtin_msa_clei_u_w: 3090 case Mips::BI__builtin_msa_clei_u_d: 3091 case Mips::BI__builtin_msa_clti_u_b: 3092 case Mips::BI__builtin_msa_clti_u_h: 3093 case Mips::BI__builtin_msa_clti_u_w: 3094 case Mips::BI__builtin_msa_clti_u_d: 3095 case Mips::BI__builtin_msa_maxi_u_b: 3096 case Mips::BI__builtin_msa_maxi_u_h: 3097 case Mips::BI__builtin_msa_maxi_u_w: 3098 case Mips::BI__builtin_msa_maxi_u_d: 3099 case Mips::BI__builtin_msa_mini_u_b: 3100 case Mips::BI__builtin_msa_mini_u_h: 3101 case Mips::BI__builtin_msa_mini_u_w: 3102 case Mips::BI__builtin_msa_mini_u_d: 3103 case Mips::BI__builtin_msa_addvi_b: 3104 case Mips::BI__builtin_msa_addvi_h: 3105 case Mips::BI__builtin_msa_addvi_w: 3106 case Mips::BI__builtin_msa_addvi_d: 3107 case Mips::BI__builtin_msa_bclri_w: 3108 case Mips::BI__builtin_msa_bnegi_w: 3109 case Mips::BI__builtin_msa_bseti_w: 3110 case Mips::BI__builtin_msa_sat_s_w: 3111 case Mips::BI__builtin_msa_sat_u_w: 3112 case Mips::BI__builtin_msa_slli_w: 3113 case Mips::BI__builtin_msa_srai_w: 3114 case Mips::BI__builtin_msa_srari_w: 3115 case Mips::BI__builtin_msa_srli_w: 3116 case Mips::BI__builtin_msa_srlri_w: 3117 case Mips::BI__builtin_msa_subvi_b: 3118 case Mips::BI__builtin_msa_subvi_h: 3119 case Mips::BI__builtin_msa_subvi_w: 3120 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 3121 case Mips::BI__builtin_msa_binsli_w: 3122 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 3123 // These intrinsics take an unsigned 6 bit immediate. 3124 case Mips::BI__builtin_msa_bclri_d: 3125 case Mips::BI__builtin_msa_bnegi_d: 3126 case Mips::BI__builtin_msa_bseti_d: 3127 case Mips::BI__builtin_msa_sat_s_d: 3128 case Mips::BI__builtin_msa_sat_u_d: 3129 case Mips::BI__builtin_msa_slli_d: 3130 case Mips::BI__builtin_msa_srai_d: 3131 case Mips::BI__builtin_msa_srari_d: 3132 case Mips::BI__builtin_msa_srli_d: 3133 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 3134 case Mips::BI__builtin_msa_binsli_d: 3135 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 3136 // These intrinsics take a signed 5 bit immediate. 3137 case Mips::BI__builtin_msa_ceqi_b: 3138 case Mips::BI__builtin_msa_ceqi_h: 3139 case Mips::BI__builtin_msa_ceqi_w: 3140 case Mips::BI__builtin_msa_ceqi_d: 3141 case Mips::BI__builtin_msa_clti_s_b: 3142 case Mips::BI__builtin_msa_clti_s_h: 3143 case Mips::BI__builtin_msa_clti_s_w: 3144 case Mips::BI__builtin_msa_clti_s_d: 3145 case Mips::BI__builtin_msa_clei_s_b: 3146 case Mips::BI__builtin_msa_clei_s_h: 3147 case Mips::BI__builtin_msa_clei_s_w: 3148 case Mips::BI__builtin_msa_clei_s_d: 3149 case Mips::BI__builtin_msa_maxi_s_b: 3150 case Mips::BI__builtin_msa_maxi_s_h: 3151 case Mips::BI__builtin_msa_maxi_s_w: 3152 case Mips::BI__builtin_msa_maxi_s_d: 3153 case Mips::BI__builtin_msa_mini_s_b: 3154 case Mips::BI__builtin_msa_mini_s_h: 3155 case Mips::BI__builtin_msa_mini_s_w: 3156 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 3157 // These intrinsics take an unsigned 8 bit immediate. 3158 case Mips::BI__builtin_msa_andi_b: 3159 case Mips::BI__builtin_msa_nori_b: 3160 case Mips::BI__builtin_msa_ori_b: 3161 case Mips::BI__builtin_msa_shf_b: 3162 case Mips::BI__builtin_msa_shf_h: 3163 case Mips::BI__builtin_msa_shf_w: 3164 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 3165 case Mips::BI__builtin_msa_bseli_b: 3166 case Mips::BI__builtin_msa_bmnzi_b: 3167 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 3168 // df/n format 3169 // These intrinsics take an unsigned 4 bit immediate. 3170 case Mips::BI__builtin_msa_copy_s_b: 3171 case Mips::BI__builtin_msa_copy_u_b: 3172 case Mips::BI__builtin_msa_insve_b: 3173 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 3174 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 3175 // These intrinsics take an unsigned 3 bit immediate. 3176 case Mips::BI__builtin_msa_copy_s_h: 3177 case Mips::BI__builtin_msa_copy_u_h: 3178 case Mips::BI__builtin_msa_insve_h: 3179 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 3180 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 3181 // These intrinsics take an unsigned 2 bit immediate. 3182 case Mips::BI__builtin_msa_copy_s_w: 3183 case Mips::BI__builtin_msa_copy_u_w: 3184 case Mips::BI__builtin_msa_insve_w: 3185 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 3186 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 3187 // These intrinsics take an unsigned 1 bit immediate. 3188 case Mips::BI__builtin_msa_copy_s_d: 3189 case Mips::BI__builtin_msa_copy_u_d: 3190 case Mips::BI__builtin_msa_insve_d: 3191 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 3192 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 3193 // Memory offsets and immediate loads. 3194 // These intrinsics take a signed 10 bit immediate. 3195 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 3196 case Mips::BI__builtin_msa_ldi_h: 3197 case Mips::BI__builtin_msa_ldi_w: 3198 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 3199 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 3200 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 3201 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 3202 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 3203 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break; 3204 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break; 3205 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 3206 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 3207 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 3208 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 3209 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break; 3210 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break; 3211 } 3212 3213 if (!m) 3214 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3215 3216 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 3217 SemaBuiltinConstantArgMultiple(TheCall, i, m); 3218 } 3219 3220 /// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str, 3221 /// advancing the pointer over the consumed characters. The decoded type is 3222 /// returned. If the decoded type represents a constant integer with a 3223 /// constraint on its value then Mask is set to that value. The type descriptors 3224 /// used in Str are specific to PPC MMA builtins and are documented in the file 3225 /// defining the PPC builtins. 3226 static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str, 3227 unsigned &Mask) { 3228 bool RequireICE = false; 3229 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 3230 switch (*Str++) { 3231 case 'V': 3232 return Context.getVectorType(Context.UnsignedCharTy, 16, 3233 VectorType::VectorKind::AltiVecVector); 3234 case 'i': { 3235 char *End; 3236 unsigned size = strtoul(Str, &End, 10); 3237 assert(End != Str && "Missing constant parameter constraint"); 3238 Str = End; 3239 Mask = size; 3240 return Context.IntTy; 3241 } 3242 case 'W': { 3243 char *End; 3244 unsigned size = strtoul(Str, &End, 10); 3245 assert(End != Str && "Missing PowerPC MMA type size"); 3246 Str = End; 3247 QualType Type; 3248 switch (size) { 3249 #define PPC_VECTOR_TYPE(typeName, Id, size) \ 3250 case size: Type = Context.Id##Ty; break; 3251 #include "clang/Basic/PPCTypes.def" 3252 default: llvm_unreachable("Invalid PowerPC MMA vector type"); 3253 } 3254 bool CheckVectorArgs = false; 3255 while (!CheckVectorArgs) { 3256 switch (*Str++) { 3257 case '*': 3258 Type = Context.getPointerType(Type); 3259 break; 3260 case 'C': 3261 Type = Type.withConst(); 3262 break; 3263 default: 3264 CheckVectorArgs = true; 3265 --Str; 3266 break; 3267 } 3268 } 3269 return Type; 3270 } 3271 default: 3272 return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true); 3273 } 3274 } 3275 3276 static bool isPPC_64Builtin(unsigned BuiltinID) { 3277 // These builtins only work on PPC 64bit targets. 3278 switch (BuiltinID) { 3279 case PPC::BI__builtin_divde: 3280 case PPC::BI__builtin_divdeu: 3281 case PPC::BI__builtin_bpermd: 3282 case PPC::BI__builtin_ppc_ldarx: 3283 case PPC::BI__builtin_ppc_stdcx: 3284 case PPC::BI__builtin_ppc_tdw: 3285 case PPC::BI__builtin_ppc_trapd: 3286 case PPC::BI__builtin_ppc_cmpeqb: 3287 case PPC::BI__builtin_ppc_setb: 3288 case PPC::BI__builtin_ppc_mulhd: 3289 case PPC::BI__builtin_ppc_mulhdu: 3290 case PPC::BI__builtin_ppc_maddhd: 3291 case PPC::BI__builtin_ppc_maddhdu: 3292 case PPC::BI__builtin_ppc_maddld: 3293 case PPC::BI__builtin_ppc_load8r: 3294 case PPC::BI__builtin_ppc_store8r: 3295 case PPC::BI__builtin_ppc_insert_exp: 3296 case PPC::BI__builtin_ppc_extract_sig: 3297 case PPC::BI__builtin_ppc_addex: 3298 case PPC::BI__builtin_darn: 3299 case PPC::BI__builtin_darn_raw: 3300 return true; 3301 } 3302 return false; 3303 } 3304 3305 static bool SemaFeatureCheck(Sema &S, CallExpr *TheCall, 3306 StringRef FeatureToCheck, unsigned DiagID, 3307 StringRef DiagArg = "") { 3308 if (S.Context.getTargetInfo().hasFeature(FeatureToCheck)) 3309 return false; 3310 3311 if (DiagArg.empty()) 3312 S.Diag(TheCall->getBeginLoc(), DiagID) << TheCall->getSourceRange(); 3313 else 3314 S.Diag(TheCall->getBeginLoc(), DiagID) 3315 << DiagArg << TheCall->getSourceRange(); 3316 3317 return true; 3318 } 3319 3320 /// Returns true if the argument consists of one contiguous run of 1s with any 3321 /// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so 3322 /// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not, 3323 /// since all 1s are not contiguous. 3324 bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) { 3325 llvm::APSInt Result; 3326 // We can't check the value of a dependent argument. 3327 Expr *Arg = TheCall->getArg(ArgNum); 3328 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3329 return false; 3330 3331 // Check constant-ness first. 3332 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3333 return true; 3334 3335 // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s. 3336 if (Result.isShiftedMask() || (~Result).isShiftedMask()) 3337 return false; 3338 3339 return Diag(TheCall->getBeginLoc(), 3340 diag::err_argument_not_contiguous_bit_field) 3341 << ArgNum << Arg->getSourceRange(); 3342 } 3343 3344 bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3345 CallExpr *TheCall) { 3346 unsigned i = 0, l = 0, u = 0; 3347 bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64; 3348 llvm::APSInt Result; 3349 3350 if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit) 3351 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 3352 << TheCall->getSourceRange(); 3353 3354 switch (BuiltinID) { 3355 default: return false; 3356 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 3357 case PPC::BI__builtin_altivec_crypto_vshasigmad: 3358 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3359 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3360 case PPC::BI__builtin_altivec_dss: 3361 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3); 3362 case PPC::BI__builtin_tbegin: 3363 case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break; 3364 case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break; 3365 case PPC::BI__builtin_tabortwc: 3366 case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break; 3367 case PPC::BI__builtin_tabortwci: 3368 case PPC::BI__builtin_tabortdci: 3369 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 3370 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); 3371 case PPC::BI__builtin_altivec_dst: 3372 case PPC::BI__builtin_altivec_dstt: 3373 case PPC::BI__builtin_altivec_dstst: 3374 case PPC::BI__builtin_altivec_dststt: 3375 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 3376 case PPC::BI__builtin_vsx_xxpermdi: 3377 case PPC::BI__builtin_vsx_xxsldwi: 3378 return SemaBuiltinVSX(TheCall); 3379 case PPC::BI__builtin_divwe: 3380 case PPC::BI__builtin_divweu: 3381 case PPC::BI__builtin_divde: 3382 case PPC::BI__builtin_divdeu: 3383 return SemaFeatureCheck(*this, TheCall, "extdiv", 3384 diag::err_ppc_builtin_only_on_arch, "7"); 3385 case PPC::BI__builtin_bpermd: 3386 return SemaFeatureCheck(*this, TheCall, "bpermd", 3387 diag::err_ppc_builtin_only_on_arch, "7"); 3388 case PPC::BI__builtin_unpack_vector_int128: 3389 return SemaFeatureCheck(*this, TheCall, "vsx", 3390 diag::err_ppc_builtin_only_on_arch, "7") || 3391 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3392 case PPC::BI__builtin_pack_vector_int128: 3393 return SemaFeatureCheck(*this, TheCall, "vsx", 3394 diag::err_ppc_builtin_only_on_arch, "7"); 3395 case PPC::BI__builtin_altivec_vgnb: 3396 return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7); 3397 case PPC::BI__builtin_altivec_vec_replace_elt: 3398 case PPC::BI__builtin_altivec_vec_replace_unaligned: { 3399 QualType VecTy = TheCall->getArg(0)->getType(); 3400 QualType EltTy = TheCall->getArg(1)->getType(); 3401 unsigned Width = Context.getIntWidth(EltTy); 3402 return SemaBuiltinConstantArgRange(TheCall, 2, 0, Width == 32 ? 12 : 8) || 3403 !isEltOfVectorTy(Context, TheCall, *this, VecTy, EltTy); 3404 } 3405 case PPC::BI__builtin_vsx_xxeval: 3406 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255); 3407 case PPC::BI__builtin_altivec_vsldbi: 3408 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 3409 case PPC::BI__builtin_altivec_vsrdbi: 3410 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 3411 case PPC::BI__builtin_vsx_xxpermx: 3412 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7); 3413 case PPC::BI__builtin_ppc_tw: 3414 case PPC::BI__builtin_ppc_tdw: 3415 return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31); 3416 case PPC::BI__builtin_ppc_cmpeqb: 3417 case PPC::BI__builtin_ppc_setb: 3418 case PPC::BI__builtin_ppc_maddhd: 3419 case PPC::BI__builtin_ppc_maddhdu: 3420 case PPC::BI__builtin_ppc_maddld: 3421 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3422 diag::err_ppc_builtin_only_on_arch, "9"); 3423 case PPC::BI__builtin_ppc_cmprb: 3424 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3425 diag::err_ppc_builtin_only_on_arch, "9") || 3426 SemaBuiltinConstantArgRange(TheCall, 0, 0, 1); 3427 // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must 3428 // be a constant that represents a contiguous bit field. 3429 case PPC::BI__builtin_ppc_rlwnm: 3430 return SemaBuiltinConstantArg(TheCall, 1, Result) || 3431 SemaValueIsRunOfOnes(TheCall, 2); 3432 case PPC::BI__builtin_ppc_rlwimi: 3433 case PPC::BI__builtin_ppc_rldimi: 3434 return SemaBuiltinConstantArg(TheCall, 2, Result) || 3435 SemaValueIsRunOfOnes(TheCall, 3); 3436 case PPC::BI__builtin_ppc_extract_exp: 3437 case PPC::BI__builtin_ppc_extract_sig: 3438 case PPC::BI__builtin_ppc_insert_exp: 3439 return SemaFeatureCheck(*this, TheCall, "power9-vector", 3440 diag::err_ppc_builtin_only_on_arch, "9"); 3441 case PPC::BI__builtin_ppc_addex: { 3442 if (SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3443 diag::err_ppc_builtin_only_on_arch, "9") || 3444 SemaBuiltinConstantArgRange(TheCall, 2, 0, 3)) 3445 return true; 3446 // Output warning for reserved values 1 to 3. 3447 int ArgValue = 3448 TheCall->getArg(2)->getIntegerConstantExpr(Context)->getSExtValue(); 3449 if (ArgValue != 0) 3450 Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour) 3451 << ArgValue; 3452 return false; 3453 } 3454 case PPC::BI__builtin_ppc_mtfsb0: 3455 case PPC::BI__builtin_ppc_mtfsb1: 3456 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 3457 case PPC::BI__builtin_ppc_mtfsf: 3458 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 255); 3459 case PPC::BI__builtin_ppc_mtfsfi: 3460 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 3461 SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 3462 case PPC::BI__builtin_ppc_alignx: 3463 return SemaBuiltinConstantArgPower2(TheCall, 0); 3464 case PPC::BI__builtin_ppc_rdlam: 3465 return SemaValueIsRunOfOnes(TheCall, 2); 3466 case PPC::BI__builtin_ppc_icbt: 3467 case PPC::BI__builtin_ppc_sthcx: 3468 case PPC::BI__builtin_ppc_stbcx: 3469 case PPC::BI__builtin_ppc_lharx: 3470 case PPC::BI__builtin_ppc_lbarx: 3471 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 3472 diag::err_ppc_builtin_only_on_arch, "8"); 3473 case PPC::BI__builtin_vsx_ldrmb: 3474 case PPC::BI__builtin_vsx_strmb: 3475 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 3476 diag::err_ppc_builtin_only_on_arch, "8") || 3477 SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 3478 case PPC::BI__builtin_altivec_vcntmbb: 3479 case PPC::BI__builtin_altivec_vcntmbh: 3480 case PPC::BI__builtin_altivec_vcntmbw: 3481 case PPC::BI__builtin_altivec_vcntmbd: 3482 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3483 case PPC::BI__builtin_darn: 3484 case PPC::BI__builtin_darn_raw: 3485 case PPC::BI__builtin_darn_32: 3486 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3487 diag::err_ppc_builtin_only_on_arch, "9"); 3488 case PPC::BI__builtin_vsx_xxgenpcvbm: 3489 case PPC::BI__builtin_vsx_xxgenpcvhm: 3490 case PPC::BI__builtin_vsx_xxgenpcvwm: 3491 case PPC::BI__builtin_vsx_xxgenpcvdm: 3492 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3); 3493 case PPC::BI__builtin_ppc_compare_exp_uo: 3494 case PPC::BI__builtin_ppc_compare_exp_lt: 3495 case PPC::BI__builtin_ppc_compare_exp_gt: 3496 case PPC::BI__builtin_ppc_compare_exp_eq: 3497 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3498 diag::err_ppc_builtin_only_on_arch, "9") || 3499 SemaFeatureCheck(*this, TheCall, "vsx", 3500 diag::err_ppc_builtin_requires_vsx); 3501 case PPC::BI__builtin_ppc_test_data_class: { 3502 // Check if the first argument of the __builtin_ppc_test_data_class call is 3503 // valid. The argument must be either a 'float' or a 'double'. 3504 QualType ArgType = TheCall->getArg(0)->getType(); 3505 if (ArgType != QualType(Context.FloatTy) && 3506 ArgType != QualType(Context.DoubleTy)) 3507 return Diag(TheCall->getBeginLoc(), 3508 diag::err_ppc_invalid_test_data_class_type); 3509 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3510 diag::err_ppc_builtin_only_on_arch, "9") || 3511 SemaFeatureCheck(*this, TheCall, "vsx", 3512 diag::err_ppc_builtin_requires_vsx) || 3513 SemaBuiltinConstantArgRange(TheCall, 1, 0, 127); 3514 } 3515 case PPC::BI__builtin_ppc_load8r: 3516 case PPC::BI__builtin_ppc_store8r: 3517 return SemaFeatureCheck(*this, TheCall, "isa-v206-instructions", 3518 diag::err_ppc_builtin_only_on_arch, "7"); 3519 #define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \ 3520 case PPC::BI__builtin_##Name: \ 3521 return SemaBuiltinPPCMMACall(TheCall, Types); 3522 #include "clang/Basic/BuiltinsPPC.def" 3523 } 3524 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3525 } 3526 3527 // Check if the given type is a non-pointer PPC MMA type. This function is used 3528 // in Sema to prevent invalid uses of restricted PPC MMA types. 3529 bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) { 3530 if (Type->isPointerType() || Type->isArrayType()) 3531 return false; 3532 3533 QualType CoreType = Type.getCanonicalType().getUnqualifiedType(); 3534 #define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty 3535 if (false 3536 #include "clang/Basic/PPCTypes.def" 3537 ) { 3538 Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type); 3539 return true; 3540 } 3541 return false; 3542 } 3543 3544 bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, 3545 CallExpr *TheCall) { 3546 // position of memory order and scope arguments in the builtin 3547 unsigned OrderIndex, ScopeIndex; 3548 switch (BuiltinID) { 3549 case AMDGPU::BI__builtin_amdgcn_atomic_inc32: 3550 case AMDGPU::BI__builtin_amdgcn_atomic_inc64: 3551 case AMDGPU::BI__builtin_amdgcn_atomic_dec32: 3552 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: 3553 OrderIndex = 2; 3554 ScopeIndex = 3; 3555 break; 3556 case AMDGPU::BI__builtin_amdgcn_fence: 3557 OrderIndex = 0; 3558 ScopeIndex = 1; 3559 break; 3560 default: 3561 return false; 3562 } 3563 3564 ExprResult Arg = TheCall->getArg(OrderIndex); 3565 auto ArgExpr = Arg.get(); 3566 Expr::EvalResult ArgResult; 3567 3568 if (!ArgExpr->EvaluateAsInt(ArgResult, Context)) 3569 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int) 3570 << ArgExpr->getType(); 3571 auto Ord = ArgResult.Val.getInt().getZExtValue(); 3572 3573 // Check validity of memory ordering as per C11 / C++11's memody model. 3574 // Only fence needs check. Atomic dec/inc allow all memory orders. 3575 if (!llvm::isValidAtomicOrderingCABI(Ord)) 3576 return Diag(ArgExpr->getBeginLoc(), 3577 diag::warn_atomic_op_has_invalid_memory_order) 3578 << ArgExpr->getSourceRange(); 3579 switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) { 3580 case llvm::AtomicOrderingCABI::relaxed: 3581 case llvm::AtomicOrderingCABI::consume: 3582 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence) 3583 return Diag(ArgExpr->getBeginLoc(), 3584 diag::warn_atomic_op_has_invalid_memory_order) 3585 << ArgExpr->getSourceRange(); 3586 break; 3587 case llvm::AtomicOrderingCABI::acquire: 3588 case llvm::AtomicOrderingCABI::release: 3589 case llvm::AtomicOrderingCABI::acq_rel: 3590 case llvm::AtomicOrderingCABI::seq_cst: 3591 break; 3592 } 3593 3594 Arg = TheCall->getArg(ScopeIndex); 3595 ArgExpr = Arg.get(); 3596 Expr::EvalResult ArgResult1; 3597 // Check that sync scope is a constant literal 3598 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context)) 3599 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal) 3600 << ArgExpr->getType(); 3601 3602 return false; 3603 } 3604 3605 bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) { 3606 llvm::APSInt Result; 3607 3608 // We can't check the value of a dependent argument. 3609 Expr *Arg = TheCall->getArg(ArgNum); 3610 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3611 return false; 3612 3613 // Check constant-ness first. 3614 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3615 return true; 3616 3617 int64_t Val = Result.getSExtValue(); 3618 if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7)) 3619 return false; 3620 3621 return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul) 3622 << Arg->getSourceRange(); 3623 } 3624 3625 bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, 3626 unsigned BuiltinID, 3627 CallExpr *TheCall) { 3628 // CodeGenFunction can also detect this, but this gives a better error 3629 // message. 3630 bool FeatureMissing = false; 3631 SmallVector<StringRef> ReqFeatures; 3632 StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID); 3633 Features.split(ReqFeatures, ','); 3634 3635 // Check if each required feature is included 3636 for (StringRef F : ReqFeatures) { 3637 if (TI.hasFeature(F)) 3638 continue; 3639 3640 // If the feature is 64bit, alter the string so it will print better in 3641 // the diagnostic. 3642 if (F == "64bit") 3643 F = "RV64"; 3644 3645 // Convert features like "zbr" and "experimental-zbr" to "Zbr". 3646 F.consume_front("experimental-"); 3647 std::string FeatureStr = F.str(); 3648 FeatureStr[0] = std::toupper(FeatureStr[0]); 3649 3650 // Error message 3651 FeatureMissing = true; 3652 Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension) 3653 << TheCall->getSourceRange() << StringRef(FeatureStr); 3654 } 3655 3656 if (FeatureMissing) 3657 return true; 3658 3659 switch (BuiltinID) { 3660 case RISCV::BI__builtin_rvv_vsetvli: 3661 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) || 3662 CheckRISCVLMUL(TheCall, 2); 3663 case RISCV::BI__builtin_rvv_vsetvlimax: 3664 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 3665 CheckRISCVLMUL(TheCall, 1); 3666 case RISCV::BI__builtin_rvv_vget_v_i8m2_i8m1: 3667 case RISCV::BI__builtin_rvv_vget_v_i16m2_i16m1: 3668 case RISCV::BI__builtin_rvv_vget_v_i32m2_i32m1: 3669 case RISCV::BI__builtin_rvv_vget_v_i64m2_i64m1: 3670 case RISCV::BI__builtin_rvv_vget_v_f32m2_f32m1: 3671 case RISCV::BI__builtin_rvv_vget_v_f64m2_f64m1: 3672 case RISCV::BI__builtin_rvv_vget_v_u8m2_u8m1: 3673 case RISCV::BI__builtin_rvv_vget_v_u16m2_u16m1: 3674 case RISCV::BI__builtin_rvv_vget_v_u32m2_u32m1: 3675 case RISCV::BI__builtin_rvv_vget_v_u64m2_u64m1: 3676 case RISCV::BI__builtin_rvv_vget_v_i8m4_i8m2: 3677 case RISCV::BI__builtin_rvv_vget_v_i16m4_i16m2: 3678 case RISCV::BI__builtin_rvv_vget_v_i32m4_i32m2: 3679 case RISCV::BI__builtin_rvv_vget_v_i64m4_i64m2: 3680 case RISCV::BI__builtin_rvv_vget_v_f32m4_f32m2: 3681 case RISCV::BI__builtin_rvv_vget_v_f64m4_f64m2: 3682 case RISCV::BI__builtin_rvv_vget_v_u8m4_u8m2: 3683 case RISCV::BI__builtin_rvv_vget_v_u16m4_u16m2: 3684 case RISCV::BI__builtin_rvv_vget_v_u32m4_u32m2: 3685 case RISCV::BI__builtin_rvv_vget_v_u64m4_u64m2: 3686 case RISCV::BI__builtin_rvv_vget_v_i8m8_i8m4: 3687 case RISCV::BI__builtin_rvv_vget_v_i16m8_i16m4: 3688 case RISCV::BI__builtin_rvv_vget_v_i32m8_i32m4: 3689 case RISCV::BI__builtin_rvv_vget_v_i64m8_i64m4: 3690 case RISCV::BI__builtin_rvv_vget_v_f32m8_f32m4: 3691 case RISCV::BI__builtin_rvv_vget_v_f64m8_f64m4: 3692 case RISCV::BI__builtin_rvv_vget_v_u8m8_u8m4: 3693 case RISCV::BI__builtin_rvv_vget_v_u16m8_u16m4: 3694 case RISCV::BI__builtin_rvv_vget_v_u32m8_u32m4: 3695 case RISCV::BI__builtin_rvv_vget_v_u64m8_u64m4: 3696 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3697 case RISCV::BI__builtin_rvv_vget_v_i8m4_i8m1: 3698 case RISCV::BI__builtin_rvv_vget_v_i16m4_i16m1: 3699 case RISCV::BI__builtin_rvv_vget_v_i32m4_i32m1: 3700 case RISCV::BI__builtin_rvv_vget_v_i64m4_i64m1: 3701 case RISCV::BI__builtin_rvv_vget_v_f32m4_f32m1: 3702 case RISCV::BI__builtin_rvv_vget_v_f64m4_f64m1: 3703 case RISCV::BI__builtin_rvv_vget_v_u8m4_u8m1: 3704 case RISCV::BI__builtin_rvv_vget_v_u16m4_u16m1: 3705 case RISCV::BI__builtin_rvv_vget_v_u32m4_u32m1: 3706 case RISCV::BI__builtin_rvv_vget_v_u64m4_u64m1: 3707 case RISCV::BI__builtin_rvv_vget_v_i8m8_i8m2: 3708 case RISCV::BI__builtin_rvv_vget_v_i16m8_i16m2: 3709 case RISCV::BI__builtin_rvv_vget_v_i32m8_i32m2: 3710 case RISCV::BI__builtin_rvv_vget_v_i64m8_i64m2: 3711 case RISCV::BI__builtin_rvv_vget_v_f32m8_f32m2: 3712 case RISCV::BI__builtin_rvv_vget_v_f64m8_f64m2: 3713 case RISCV::BI__builtin_rvv_vget_v_u8m8_u8m2: 3714 case RISCV::BI__builtin_rvv_vget_v_u16m8_u16m2: 3715 case RISCV::BI__builtin_rvv_vget_v_u32m8_u32m2: 3716 case RISCV::BI__builtin_rvv_vget_v_u64m8_u64m2: 3717 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3); 3718 case RISCV::BI__builtin_rvv_vget_v_i8m8_i8m1: 3719 case RISCV::BI__builtin_rvv_vget_v_i16m8_i16m1: 3720 case RISCV::BI__builtin_rvv_vget_v_i32m8_i32m1: 3721 case RISCV::BI__builtin_rvv_vget_v_i64m8_i64m1: 3722 case RISCV::BI__builtin_rvv_vget_v_f32m8_f32m1: 3723 case RISCV::BI__builtin_rvv_vget_v_f64m8_f64m1: 3724 case RISCV::BI__builtin_rvv_vget_v_u8m8_u8m1: 3725 case RISCV::BI__builtin_rvv_vget_v_u16m8_u16m1: 3726 case RISCV::BI__builtin_rvv_vget_v_u32m8_u32m1: 3727 case RISCV::BI__builtin_rvv_vget_v_u64m8_u64m1: 3728 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7); 3729 case RISCV::BI__builtin_rvv_vset_v_i8m1_i8m2: 3730 case RISCV::BI__builtin_rvv_vset_v_i16m1_i16m2: 3731 case RISCV::BI__builtin_rvv_vset_v_i32m1_i32m2: 3732 case RISCV::BI__builtin_rvv_vset_v_i64m1_i64m2: 3733 case RISCV::BI__builtin_rvv_vset_v_f32m1_f32m2: 3734 case RISCV::BI__builtin_rvv_vset_v_f64m1_f64m2: 3735 case RISCV::BI__builtin_rvv_vset_v_u8m1_u8m2: 3736 case RISCV::BI__builtin_rvv_vset_v_u16m1_u16m2: 3737 case RISCV::BI__builtin_rvv_vset_v_u32m1_u32m2: 3738 case RISCV::BI__builtin_rvv_vset_v_u64m1_u64m2: 3739 case RISCV::BI__builtin_rvv_vset_v_i8m2_i8m4: 3740 case RISCV::BI__builtin_rvv_vset_v_i16m2_i16m4: 3741 case RISCV::BI__builtin_rvv_vset_v_i32m2_i32m4: 3742 case RISCV::BI__builtin_rvv_vset_v_i64m2_i64m4: 3743 case RISCV::BI__builtin_rvv_vset_v_f32m2_f32m4: 3744 case RISCV::BI__builtin_rvv_vset_v_f64m2_f64m4: 3745 case RISCV::BI__builtin_rvv_vset_v_u8m2_u8m4: 3746 case RISCV::BI__builtin_rvv_vset_v_u16m2_u16m4: 3747 case RISCV::BI__builtin_rvv_vset_v_u32m2_u32m4: 3748 case RISCV::BI__builtin_rvv_vset_v_u64m2_u64m4: 3749 case RISCV::BI__builtin_rvv_vset_v_i8m4_i8m8: 3750 case RISCV::BI__builtin_rvv_vset_v_i16m4_i16m8: 3751 case RISCV::BI__builtin_rvv_vset_v_i32m4_i32m8: 3752 case RISCV::BI__builtin_rvv_vset_v_i64m4_i64m8: 3753 case RISCV::BI__builtin_rvv_vset_v_f32m4_f32m8: 3754 case RISCV::BI__builtin_rvv_vset_v_f64m4_f64m8: 3755 case RISCV::BI__builtin_rvv_vset_v_u8m4_u8m8: 3756 case RISCV::BI__builtin_rvv_vset_v_u16m4_u16m8: 3757 case RISCV::BI__builtin_rvv_vset_v_u32m4_u32m8: 3758 case RISCV::BI__builtin_rvv_vset_v_u64m4_u64m8: 3759 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3760 case RISCV::BI__builtin_rvv_vset_v_i8m1_i8m4: 3761 case RISCV::BI__builtin_rvv_vset_v_i16m1_i16m4: 3762 case RISCV::BI__builtin_rvv_vset_v_i32m1_i32m4: 3763 case RISCV::BI__builtin_rvv_vset_v_i64m1_i64m4: 3764 case RISCV::BI__builtin_rvv_vset_v_f32m1_f32m4: 3765 case RISCV::BI__builtin_rvv_vset_v_f64m1_f64m4: 3766 case RISCV::BI__builtin_rvv_vset_v_u8m1_u8m4: 3767 case RISCV::BI__builtin_rvv_vset_v_u16m1_u16m4: 3768 case RISCV::BI__builtin_rvv_vset_v_u32m1_u32m4: 3769 case RISCV::BI__builtin_rvv_vset_v_u64m1_u64m4: 3770 case RISCV::BI__builtin_rvv_vset_v_i8m2_i8m8: 3771 case RISCV::BI__builtin_rvv_vset_v_i16m2_i16m8: 3772 case RISCV::BI__builtin_rvv_vset_v_i32m2_i32m8: 3773 case RISCV::BI__builtin_rvv_vset_v_i64m2_i64m8: 3774 case RISCV::BI__builtin_rvv_vset_v_f32m2_f32m8: 3775 case RISCV::BI__builtin_rvv_vset_v_f64m2_f64m8: 3776 case RISCV::BI__builtin_rvv_vset_v_u8m2_u8m8: 3777 case RISCV::BI__builtin_rvv_vset_v_u16m2_u16m8: 3778 case RISCV::BI__builtin_rvv_vset_v_u32m2_u32m8: 3779 case RISCV::BI__builtin_rvv_vset_v_u64m2_u64m8: 3780 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3); 3781 case RISCV::BI__builtin_rvv_vset_v_i8m1_i8m8: 3782 case RISCV::BI__builtin_rvv_vset_v_i16m1_i16m8: 3783 case RISCV::BI__builtin_rvv_vset_v_i32m1_i32m8: 3784 case RISCV::BI__builtin_rvv_vset_v_i64m1_i64m8: 3785 case RISCV::BI__builtin_rvv_vset_v_f32m1_f32m8: 3786 case RISCV::BI__builtin_rvv_vset_v_f64m1_f64m8: 3787 case RISCV::BI__builtin_rvv_vset_v_u8m1_u8m8: 3788 case RISCV::BI__builtin_rvv_vset_v_u16m1_u16m8: 3789 case RISCV::BI__builtin_rvv_vset_v_u32m1_u32m8: 3790 case RISCV::BI__builtin_rvv_vset_v_u64m1_u64m8: 3791 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7); 3792 } 3793 3794 return false; 3795 } 3796 3797 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 3798 CallExpr *TheCall) { 3799 if (BuiltinID == SystemZ::BI__builtin_tabort) { 3800 Expr *Arg = TheCall->getArg(0); 3801 if (Optional<llvm::APSInt> AbortCode = Arg->getIntegerConstantExpr(Context)) 3802 if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256) 3803 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 3804 << Arg->getSourceRange(); 3805 } 3806 3807 // For intrinsics which take an immediate value as part of the instruction, 3808 // range check them here. 3809 unsigned i = 0, l = 0, u = 0; 3810 switch (BuiltinID) { 3811 default: return false; 3812 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 3813 case SystemZ::BI__builtin_s390_verimb: 3814 case SystemZ::BI__builtin_s390_verimh: 3815 case SystemZ::BI__builtin_s390_verimf: 3816 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 3817 case SystemZ::BI__builtin_s390_vfaeb: 3818 case SystemZ::BI__builtin_s390_vfaeh: 3819 case SystemZ::BI__builtin_s390_vfaef: 3820 case SystemZ::BI__builtin_s390_vfaebs: 3821 case SystemZ::BI__builtin_s390_vfaehs: 3822 case SystemZ::BI__builtin_s390_vfaefs: 3823 case SystemZ::BI__builtin_s390_vfaezb: 3824 case SystemZ::BI__builtin_s390_vfaezh: 3825 case SystemZ::BI__builtin_s390_vfaezf: 3826 case SystemZ::BI__builtin_s390_vfaezbs: 3827 case SystemZ::BI__builtin_s390_vfaezhs: 3828 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 3829 case SystemZ::BI__builtin_s390_vfisb: 3830 case SystemZ::BI__builtin_s390_vfidb: 3831 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 3832 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3833 case SystemZ::BI__builtin_s390_vftcisb: 3834 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 3835 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 3836 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 3837 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 3838 case SystemZ::BI__builtin_s390_vstrcb: 3839 case SystemZ::BI__builtin_s390_vstrch: 3840 case SystemZ::BI__builtin_s390_vstrcf: 3841 case SystemZ::BI__builtin_s390_vstrczb: 3842 case SystemZ::BI__builtin_s390_vstrczh: 3843 case SystemZ::BI__builtin_s390_vstrczf: 3844 case SystemZ::BI__builtin_s390_vstrcbs: 3845 case SystemZ::BI__builtin_s390_vstrchs: 3846 case SystemZ::BI__builtin_s390_vstrcfs: 3847 case SystemZ::BI__builtin_s390_vstrczbs: 3848 case SystemZ::BI__builtin_s390_vstrczhs: 3849 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 3850 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 3851 case SystemZ::BI__builtin_s390_vfminsb: 3852 case SystemZ::BI__builtin_s390_vfmaxsb: 3853 case SystemZ::BI__builtin_s390_vfmindb: 3854 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 3855 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break; 3856 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break; 3857 case SystemZ::BI__builtin_s390_vclfnhs: 3858 case SystemZ::BI__builtin_s390_vclfnls: 3859 case SystemZ::BI__builtin_s390_vcfn: 3860 case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break; 3861 case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break; 3862 } 3863 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3864 } 3865 3866 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 3867 /// This checks that the target supports __builtin_cpu_supports and 3868 /// that the string argument is constant and valid. 3869 static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI, 3870 CallExpr *TheCall) { 3871 Expr *Arg = TheCall->getArg(0); 3872 3873 // Check if the argument is a string literal. 3874 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3875 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3876 << Arg->getSourceRange(); 3877 3878 // Check the contents of the string. 3879 StringRef Feature = 3880 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3881 if (!TI.validateCpuSupports(Feature)) 3882 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 3883 << Arg->getSourceRange(); 3884 return false; 3885 } 3886 3887 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 3888 /// This checks that the target supports __builtin_cpu_is and 3889 /// that the string argument is constant and valid. 3890 static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) { 3891 Expr *Arg = TheCall->getArg(0); 3892 3893 // Check if the argument is a string literal. 3894 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3895 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3896 << Arg->getSourceRange(); 3897 3898 // Check the contents of the string. 3899 StringRef Feature = 3900 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3901 if (!TI.validateCpuIs(Feature)) 3902 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 3903 << Arg->getSourceRange(); 3904 return false; 3905 } 3906 3907 // Check if the rounding mode is legal. 3908 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 3909 // Indicates if this instruction has rounding control or just SAE. 3910 bool HasRC = false; 3911 3912 unsigned ArgNum = 0; 3913 switch (BuiltinID) { 3914 default: 3915 return false; 3916 case X86::BI__builtin_ia32_vcvttsd2si32: 3917 case X86::BI__builtin_ia32_vcvttsd2si64: 3918 case X86::BI__builtin_ia32_vcvttsd2usi32: 3919 case X86::BI__builtin_ia32_vcvttsd2usi64: 3920 case X86::BI__builtin_ia32_vcvttss2si32: 3921 case X86::BI__builtin_ia32_vcvttss2si64: 3922 case X86::BI__builtin_ia32_vcvttss2usi32: 3923 case X86::BI__builtin_ia32_vcvttss2usi64: 3924 case X86::BI__builtin_ia32_vcvttsh2si32: 3925 case X86::BI__builtin_ia32_vcvttsh2si64: 3926 case X86::BI__builtin_ia32_vcvttsh2usi32: 3927 case X86::BI__builtin_ia32_vcvttsh2usi64: 3928 ArgNum = 1; 3929 break; 3930 case X86::BI__builtin_ia32_maxpd512: 3931 case X86::BI__builtin_ia32_maxps512: 3932 case X86::BI__builtin_ia32_minpd512: 3933 case X86::BI__builtin_ia32_minps512: 3934 case X86::BI__builtin_ia32_maxph512: 3935 case X86::BI__builtin_ia32_minph512: 3936 ArgNum = 2; 3937 break; 3938 case X86::BI__builtin_ia32_vcvtph2pd512_mask: 3939 case X86::BI__builtin_ia32_vcvtph2psx512_mask: 3940 case X86::BI__builtin_ia32_cvtps2pd512_mask: 3941 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 3942 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 3943 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 3944 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 3945 case X86::BI__builtin_ia32_cvttps2dq512_mask: 3946 case X86::BI__builtin_ia32_cvttps2qq512_mask: 3947 case X86::BI__builtin_ia32_cvttps2udq512_mask: 3948 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 3949 case X86::BI__builtin_ia32_vcvttph2w512_mask: 3950 case X86::BI__builtin_ia32_vcvttph2uw512_mask: 3951 case X86::BI__builtin_ia32_vcvttph2dq512_mask: 3952 case X86::BI__builtin_ia32_vcvttph2udq512_mask: 3953 case X86::BI__builtin_ia32_vcvttph2qq512_mask: 3954 case X86::BI__builtin_ia32_vcvttph2uqq512_mask: 3955 case X86::BI__builtin_ia32_exp2pd_mask: 3956 case X86::BI__builtin_ia32_exp2ps_mask: 3957 case X86::BI__builtin_ia32_getexppd512_mask: 3958 case X86::BI__builtin_ia32_getexpps512_mask: 3959 case X86::BI__builtin_ia32_getexpph512_mask: 3960 case X86::BI__builtin_ia32_rcp28pd_mask: 3961 case X86::BI__builtin_ia32_rcp28ps_mask: 3962 case X86::BI__builtin_ia32_rsqrt28pd_mask: 3963 case X86::BI__builtin_ia32_rsqrt28ps_mask: 3964 case X86::BI__builtin_ia32_vcomisd: 3965 case X86::BI__builtin_ia32_vcomiss: 3966 case X86::BI__builtin_ia32_vcomish: 3967 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 3968 ArgNum = 3; 3969 break; 3970 case X86::BI__builtin_ia32_cmppd512_mask: 3971 case X86::BI__builtin_ia32_cmpps512_mask: 3972 case X86::BI__builtin_ia32_cmpsd_mask: 3973 case X86::BI__builtin_ia32_cmpss_mask: 3974 case X86::BI__builtin_ia32_cmpsh_mask: 3975 case X86::BI__builtin_ia32_vcvtsh2sd_round_mask: 3976 case X86::BI__builtin_ia32_vcvtsh2ss_round_mask: 3977 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 3978 case X86::BI__builtin_ia32_getexpsd128_round_mask: 3979 case X86::BI__builtin_ia32_getexpss128_round_mask: 3980 case X86::BI__builtin_ia32_getexpsh128_round_mask: 3981 case X86::BI__builtin_ia32_getmantpd512_mask: 3982 case X86::BI__builtin_ia32_getmantps512_mask: 3983 case X86::BI__builtin_ia32_getmantph512_mask: 3984 case X86::BI__builtin_ia32_maxsd_round_mask: 3985 case X86::BI__builtin_ia32_maxss_round_mask: 3986 case X86::BI__builtin_ia32_maxsh_round_mask: 3987 case X86::BI__builtin_ia32_minsd_round_mask: 3988 case X86::BI__builtin_ia32_minss_round_mask: 3989 case X86::BI__builtin_ia32_minsh_round_mask: 3990 case X86::BI__builtin_ia32_rcp28sd_round_mask: 3991 case X86::BI__builtin_ia32_rcp28ss_round_mask: 3992 case X86::BI__builtin_ia32_reducepd512_mask: 3993 case X86::BI__builtin_ia32_reduceps512_mask: 3994 case X86::BI__builtin_ia32_reduceph512_mask: 3995 case X86::BI__builtin_ia32_rndscalepd_mask: 3996 case X86::BI__builtin_ia32_rndscaleps_mask: 3997 case X86::BI__builtin_ia32_rndscaleph_mask: 3998 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 3999 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 4000 ArgNum = 4; 4001 break; 4002 case X86::BI__builtin_ia32_fixupimmpd512_mask: 4003 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 4004 case X86::BI__builtin_ia32_fixupimmps512_mask: 4005 case X86::BI__builtin_ia32_fixupimmps512_maskz: 4006 case X86::BI__builtin_ia32_fixupimmsd_mask: 4007 case X86::BI__builtin_ia32_fixupimmsd_maskz: 4008 case X86::BI__builtin_ia32_fixupimmss_mask: 4009 case X86::BI__builtin_ia32_fixupimmss_maskz: 4010 case X86::BI__builtin_ia32_getmantsd_round_mask: 4011 case X86::BI__builtin_ia32_getmantss_round_mask: 4012 case X86::BI__builtin_ia32_getmantsh_round_mask: 4013 case X86::BI__builtin_ia32_rangepd512_mask: 4014 case X86::BI__builtin_ia32_rangeps512_mask: 4015 case X86::BI__builtin_ia32_rangesd128_round_mask: 4016 case X86::BI__builtin_ia32_rangess128_round_mask: 4017 case X86::BI__builtin_ia32_reducesd_mask: 4018 case X86::BI__builtin_ia32_reducess_mask: 4019 case X86::BI__builtin_ia32_reducesh_mask: 4020 case X86::BI__builtin_ia32_rndscalesd_round_mask: 4021 case X86::BI__builtin_ia32_rndscaless_round_mask: 4022 case X86::BI__builtin_ia32_rndscalesh_round_mask: 4023 ArgNum = 5; 4024 break; 4025 case X86::BI__builtin_ia32_vcvtsd2si64: 4026 case X86::BI__builtin_ia32_vcvtsd2si32: 4027 case X86::BI__builtin_ia32_vcvtsd2usi32: 4028 case X86::BI__builtin_ia32_vcvtsd2usi64: 4029 case X86::BI__builtin_ia32_vcvtss2si32: 4030 case X86::BI__builtin_ia32_vcvtss2si64: 4031 case X86::BI__builtin_ia32_vcvtss2usi32: 4032 case X86::BI__builtin_ia32_vcvtss2usi64: 4033 case X86::BI__builtin_ia32_vcvtsh2si32: 4034 case X86::BI__builtin_ia32_vcvtsh2si64: 4035 case X86::BI__builtin_ia32_vcvtsh2usi32: 4036 case X86::BI__builtin_ia32_vcvtsh2usi64: 4037 case X86::BI__builtin_ia32_sqrtpd512: 4038 case X86::BI__builtin_ia32_sqrtps512: 4039 case X86::BI__builtin_ia32_sqrtph512: 4040 ArgNum = 1; 4041 HasRC = true; 4042 break; 4043 case X86::BI__builtin_ia32_addph512: 4044 case X86::BI__builtin_ia32_divph512: 4045 case X86::BI__builtin_ia32_mulph512: 4046 case X86::BI__builtin_ia32_subph512: 4047 case X86::BI__builtin_ia32_addpd512: 4048 case X86::BI__builtin_ia32_addps512: 4049 case X86::BI__builtin_ia32_divpd512: 4050 case X86::BI__builtin_ia32_divps512: 4051 case X86::BI__builtin_ia32_mulpd512: 4052 case X86::BI__builtin_ia32_mulps512: 4053 case X86::BI__builtin_ia32_subpd512: 4054 case X86::BI__builtin_ia32_subps512: 4055 case X86::BI__builtin_ia32_cvtsi2sd64: 4056 case X86::BI__builtin_ia32_cvtsi2ss32: 4057 case X86::BI__builtin_ia32_cvtsi2ss64: 4058 case X86::BI__builtin_ia32_cvtusi2sd64: 4059 case X86::BI__builtin_ia32_cvtusi2ss32: 4060 case X86::BI__builtin_ia32_cvtusi2ss64: 4061 case X86::BI__builtin_ia32_vcvtusi2sh: 4062 case X86::BI__builtin_ia32_vcvtusi642sh: 4063 case X86::BI__builtin_ia32_vcvtsi2sh: 4064 case X86::BI__builtin_ia32_vcvtsi642sh: 4065 ArgNum = 2; 4066 HasRC = true; 4067 break; 4068 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 4069 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 4070 case X86::BI__builtin_ia32_vcvtpd2ph512_mask: 4071 case X86::BI__builtin_ia32_vcvtps2phx512_mask: 4072 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 4073 case X86::BI__builtin_ia32_cvtpd2dq512_mask: 4074 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 4075 case X86::BI__builtin_ia32_cvtpd2udq512_mask: 4076 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 4077 case X86::BI__builtin_ia32_cvtps2dq512_mask: 4078 case X86::BI__builtin_ia32_cvtps2qq512_mask: 4079 case X86::BI__builtin_ia32_cvtps2udq512_mask: 4080 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 4081 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 4082 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 4083 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 4084 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 4085 case X86::BI__builtin_ia32_vcvtdq2ph512_mask: 4086 case X86::BI__builtin_ia32_vcvtudq2ph512_mask: 4087 case X86::BI__builtin_ia32_vcvtw2ph512_mask: 4088 case X86::BI__builtin_ia32_vcvtuw2ph512_mask: 4089 case X86::BI__builtin_ia32_vcvtph2w512_mask: 4090 case X86::BI__builtin_ia32_vcvtph2uw512_mask: 4091 case X86::BI__builtin_ia32_vcvtph2dq512_mask: 4092 case X86::BI__builtin_ia32_vcvtph2udq512_mask: 4093 case X86::BI__builtin_ia32_vcvtph2qq512_mask: 4094 case X86::BI__builtin_ia32_vcvtph2uqq512_mask: 4095 case X86::BI__builtin_ia32_vcvtqq2ph512_mask: 4096 case X86::BI__builtin_ia32_vcvtuqq2ph512_mask: 4097 ArgNum = 3; 4098 HasRC = true; 4099 break; 4100 case X86::BI__builtin_ia32_addsh_round_mask: 4101 case X86::BI__builtin_ia32_addss_round_mask: 4102 case X86::BI__builtin_ia32_addsd_round_mask: 4103 case X86::BI__builtin_ia32_divsh_round_mask: 4104 case X86::BI__builtin_ia32_divss_round_mask: 4105 case X86::BI__builtin_ia32_divsd_round_mask: 4106 case X86::BI__builtin_ia32_mulsh_round_mask: 4107 case X86::BI__builtin_ia32_mulss_round_mask: 4108 case X86::BI__builtin_ia32_mulsd_round_mask: 4109 case X86::BI__builtin_ia32_subsh_round_mask: 4110 case X86::BI__builtin_ia32_subss_round_mask: 4111 case X86::BI__builtin_ia32_subsd_round_mask: 4112 case X86::BI__builtin_ia32_scalefph512_mask: 4113 case X86::BI__builtin_ia32_scalefpd512_mask: 4114 case X86::BI__builtin_ia32_scalefps512_mask: 4115 case X86::BI__builtin_ia32_scalefsd_round_mask: 4116 case X86::BI__builtin_ia32_scalefss_round_mask: 4117 case X86::BI__builtin_ia32_scalefsh_round_mask: 4118 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 4119 case X86::BI__builtin_ia32_vcvtss2sh_round_mask: 4120 case X86::BI__builtin_ia32_vcvtsd2sh_round_mask: 4121 case X86::BI__builtin_ia32_sqrtsd_round_mask: 4122 case X86::BI__builtin_ia32_sqrtss_round_mask: 4123 case X86::BI__builtin_ia32_sqrtsh_round_mask: 4124 case X86::BI__builtin_ia32_vfmaddsd3_mask: 4125 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 4126 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 4127 case X86::BI__builtin_ia32_vfmaddss3_mask: 4128 case X86::BI__builtin_ia32_vfmaddss3_maskz: 4129 case X86::BI__builtin_ia32_vfmaddss3_mask3: 4130 case X86::BI__builtin_ia32_vfmaddsh3_mask: 4131 case X86::BI__builtin_ia32_vfmaddsh3_maskz: 4132 case X86::BI__builtin_ia32_vfmaddsh3_mask3: 4133 case X86::BI__builtin_ia32_vfmaddpd512_mask: 4134 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 4135 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 4136 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 4137 case X86::BI__builtin_ia32_vfmaddps512_mask: 4138 case X86::BI__builtin_ia32_vfmaddps512_maskz: 4139 case X86::BI__builtin_ia32_vfmaddps512_mask3: 4140 case X86::BI__builtin_ia32_vfmsubps512_mask3: 4141 case X86::BI__builtin_ia32_vfmaddph512_mask: 4142 case X86::BI__builtin_ia32_vfmaddph512_maskz: 4143 case X86::BI__builtin_ia32_vfmaddph512_mask3: 4144 case X86::BI__builtin_ia32_vfmsubph512_mask3: 4145 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 4146 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 4147 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 4148 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 4149 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 4150 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 4151 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 4152 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 4153 case X86::BI__builtin_ia32_vfmaddsubph512_mask: 4154 case X86::BI__builtin_ia32_vfmaddsubph512_maskz: 4155 case X86::BI__builtin_ia32_vfmaddsubph512_mask3: 4156 case X86::BI__builtin_ia32_vfmsubaddph512_mask3: 4157 case X86::BI__builtin_ia32_vfmaddcsh_mask: 4158 case X86::BI__builtin_ia32_vfmaddcsh_round_mask: 4159 case X86::BI__builtin_ia32_vfmaddcsh_round_mask3: 4160 case X86::BI__builtin_ia32_vfmaddcph512_mask: 4161 case X86::BI__builtin_ia32_vfmaddcph512_maskz: 4162 case X86::BI__builtin_ia32_vfmaddcph512_mask3: 4163 case X86::BI__builtin_ia32_vfcmaddcsh_mask: 4164 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask: 4165 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3: 4166 case X86::BI__builtin_ia32_vfcmaddcph512_mask: 4167 case X86::BI__builtin_ia32_vfcmaddcph512_maskz: 4168 case X86::BI__builtin_ia32_vfcmaddcph512_mask3: 4169 case X86::BI__builtin_ia32_vfmulcsh_mask: 4170 case X86::BI__builtin_ia32_vfmulcph512_mask: 4171 case X86::BI__builtin_ia32_vfcmulcsh_mask: 4172 case X86::BI__builtin_ia32_vfcmulcph512_mask: 4173 ArgNum = 4; 4174 HasRC = true; 4175 break; 4176 } 4177 4178 llvm::APSInt Result; 4179 4180 // We can't check the value of a dependent argument. 4181 Expr *Arg = TheCall->getArg(ArgNum); 4182 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4183 return false; 4184 4185 // Check constant-ness first. 4186 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4187 return true; 4188 4189 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 4190 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 4191 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding 4192 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together. 4193 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 4194 Result == 8/*ROUND_NO_EXC*/ || 4195 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) || 4196 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 4197 return false; 4198 4199 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 4200 << Arg->getSourceRange(); 4201 } 4202 4203 // Check if the gather/scatter scale is legal. 4204 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 4205 CallExpr *TheCall) { 4206 unsigned ArgNum = 0; 4207 switch (BuiltinID) { 4208 default: 4209 return false; 4210 case X86::BI__builtin_ia32_gatherpfdpd: 4211 case X86::BI__builtin_ia32_gatherpfdps: 4212 case X86::BI__builtin_ia32_gatherpfqpd: 4213 case X86::BI__builtin_ia32_gatherpfqps: 4214 case X86::BI__builtin_ia32_scatterpfdpd: 4215 case X86::BI__builtin_ia32_scatterpfdps: 4216 case X86::BI__builtin_ia32_scatterpfqpd: 4217 case X86::BI__builtin_ia32_scatterpfqps: 4218 ArgNum = 3; 4219 break; 4220 case X86::BI__builtin_ia32_gatherd_pd: 4221 case X86::BI__builtin_ia32_gatherd_pd256: 4222 case X86::BI__builtin_ia32_gatherq_pd: 4223 case X86::BI__builtin_ia32_gatherq_pd256: 4224 case X86::BI__builtin_ia32_gatherd_ps: 4225 case X86::BI__builtin_ia32_gatherd_ps256: 4226 case X86::BI__builtin_ia32_gatherq_ps: 4227 case X86::BI__builtin_ia32_gatherq_ps256: 4228 case X86::BI__builtin_ia32_gatherd_q: 4229 case X86::BI__builtin_ia32_gatherd_q256: 4230 case X86::BI__builtin_ia32_gatherq_q: 4231 case X86::BI__builtin_ia32_gatherq_q256: 4232 case X86::BI__builtin_ia32_gatherd_d: 4233 case X86::BI__builtin_ia32_gatherd_d256: 4234 case X86::BI__builtin_ia32_gatherq_d: 4235 case X86::BI__builtin_ia32_gatherq_d256: 4236 case X86::BI__builtin_ia32_gather3div2df: 4237 case X86::BI__builtin_ia32_gather3div2di: 4238 case X86::BI__builtin_ia32_gather3div4df: 4239 case X86::BI__builtin_ia32_gather3div4di: 4240 case X86::BI__builtin_ia32_gather3div4sf: 4241 case X86::BI__builtin_ia32_gather3div4si: 4242 case X86::BI__builtin_ia32_gather3div8sf: 4243 case X86::BI__builtin_ia32_gather3div8si: 4244 case X86::BI__builtin_ia32_gather3siv2df: 4245 case X86::BI__builtin_ia32_gather3siv2di: 4246 case X86::BI__builtin_ia32_gather3siv4df: 4247 case X86::BI__builtin_ia32_gather3siv4di: 4248 case X86::BI__builtin_ia32_gather3siv4sf: 4249 case X86::BI__builtin_ia32_gather3siv4si: 4250 case X86::BI__builtin_ia32_gather3siv8sf: 4251 case X86::BI__builtin_ia32_gather3siv8si: 4252 case X86::BI__builtin_ia32_gathersiv8df: 4253 case X86::BI__builtin_ia32_gathersiv16sf: 4254 case X86::BI__builtin_ia32_gatherdiv8df: 4255 case X86::BI__builtin_ia32_gatherdiv16sf: 4256 case X86::BI__builtin_ia32_gathersiv8di: 4257 case X86::BI__builtin_ia32_gathersiv16si: 4258 case X86::BI__builtin_ia32_gatherdiv8di: 4259 case X86::BI__builtin_ia32_gatherdiv16si: 4260 case X86::BI__builtin_ia32_scatterdiv2df: 4261 case X86::BI__builtin_ia32_scatterdiv2di: 4262 case X86::BI__builtin_ia32_scatterdiv4df: 4263 case X86::BI__builtin_ia32_scatterdiv4di: 4264 case X86::BI__builtin_ia32_scatterdiv4sf: 4265 case X86::BI__builtin_ia32_scatterdiv4si: 4266 case X86::BI__builtin_ia32_scatterdiv8sf: 4267 case X86::BI__builtin_ia32_scatterdiv8si: 4268 case X86::BI__builtin_ia32_scattersiv2df: 4269 case X86::BI__builtin_ia32_scattersiv2di: 4270 case X86::BI__builtin_ia32_scattersiv4df: 4271 case X86::BI__builtin_ia32_scattersiv4di: 4272 case X86::BI__builtin_ia32_scattersiv4sf: 4273 case X86::BI__builtin_ia32_scattersiv4si: 4274 case X86::BI__builtin_ia32_scattersiv8sf: 4275 case X86::BI__builtin_ia32_scattersiv8si: 4276 case X86::BI__builtin_ia32_scattersiv8df: 4277 case X86::BI__builtin_ia32_scattersiv16sf: 4278 case X86::BI__builtin_ia32_scatterdiv8df: 4279 case X86::BI__builtin_ia32_scatterdiv16sf: 4280 case X86::BI__builtin_ia32_scattersiv8di: 4281 case X86::BI__builtin_ia32_scattersiv16si: 4282 case X86::BI__builtin_ia32_scatterdiv8di: 4283 case X86::BI__builtin_ia32_scatterdiv16si: 4284 ArgNum = 4; 4285 break; 4286 } 4287 4288 llvm::APSInt Result; 4289 4290 // We can't check the value of a dependent argument. 4291 Expr *Arg = TheCall->getArg(ArgNum); 4292 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4293 return false; 4294 4295 // Check constant-ness first. 4296 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4297 return true; 4298 4299 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 4300 return false; 4301 4302 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 4303 << Arg->getSourceRange(); 4304 } 4305 4306 enum { TileRegLow = 0, TileRegHigh = 7 }; 4307 4308 bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, 4309 ArrayRef<int> ArgNums) { 4310 for (int ArgNum : ArgNums) { 4311 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh)) 4312 return true; 4313 } 4314 return false; 4315 } 4316 4317 bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall, 4318 ArrayRef<int> ArgNums) { 4319 // Because the max number of tile register is TileRegHigh + 1, so here we use 4320 // each bit to represent the usage of them in bitset. 4321 std::bitset<TileRegHigh + 1> ArgValues; 4322 for (int ArgNum : ArgNums) { 4323 Expr *Arg = TheCall->getArg(ArgNum); 4324 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4325 continue; 4326 4327 llvm::APSInt Result; 4328 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4329 return true; 4330 int ArgExtValue = Result.getExtValue(); 4331 assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) && 4332 "Incorrect tile register num."); 4333 if (ArgValues.test(ArgExtValue)) 4334 return Diag(TheCall->getBeginLoc(), 4335 diag::err_x86_builtin_tile_arg_duplicate) 4336 << TheCall->getArg(ArgNum)->getSourceRange(); 4337 ArgValues.set(ArgExtValue); 4338 } 4339 return false; 4340 } 4341 4342 bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, 4343 ArrayRef<int> ArgNums) { 4344 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) || 4345 CheckX86BuiltinTileDuplicate(TheCall, ArgNums); 4346 } 4347 4348 bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) { 4349 switch (BuiltinID) { 4350 default: 4351 return false; 4352 case X86::BI__builtin_ia32_tileloadd64: 4353 case X86::BI__builtin_ia32_tileloaddt164: 4354 case X86::BI__builtin_ia32_tilestored64: 4355 case X86::BI__builtin_ia32_tilezero: 4356 return CheckX86BuiltinTileArgumentsRange(TheCall, 0); 4357 case X86::BI__builtin_ia32_tdpbssd: 4358 case X86::BI__builtin_ia32_tdpbsud: 4359 case X86::BI__builtin_ia32_tdpbusd: 4360 case X86::BI__builtin_ia32_tdpbuud: 4361 case X86::BI__builtin_ia32_tdpbf16ps: 4362 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2}); 4363 } 4364 } 4365 static bool isX86_32Builtin(unsigned BuiltinID) { 4366 // These builtins only work on x86-32 targets. 4367 switch (BuiltinID) { 4368 case X86::BI__builtin_ia32_readeflags_u32: 4369 case X86::BI__builtin_ia32_writeeflags_u32: 4370 return true; 4371 } 4372 4373 return false; 4374 } 4375 4376 bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 4377 CallExpr *TheCall) { 4378 if (BuiltinID == X86::BI__builtin_cpu_supports) 4379 return SemaBuiltinCpuSupports(*this, TI, TheCall); 4380 4381 if (BuiltinID == X86::BI__builtin_cpu_is) 4382 return SemaBuiltinCpuIs(*this, TI, TheCall); 4383 4384 // Check for 32-bit only builtins on a 64-bit target. 4385 const llvm::Triple &TT = TI.getTriple(); 4386 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 4387 return Diag(TheCall->getCallee()->getBeginLoc(), 4388 diag::err_32_bit_builtin_64_bit_tgt); 4389 4390 // If the intrinsic has rounding or SAE make sure its valid. 4391 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 4392 return true; 4393 4394 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 4395 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 4396 return true; 4397 4398 // If the intrinsic has a tile arguments, make sure they are valid. 4399 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall)) 4400 return true; 4401 4402 // For intrinsics which take an immediate value as part of the instruction, 4403 // range check them here. 4404 int i = 0, l = 0, u = 0; 4405 switch (BuiltinID) { 4406 default: 4407 return false; 4408 case X86::BI__builtin_ia32_vec_ext_v2si: 4409 case X86::BI__builtin_ia32_vec_ext_v2di: 4410 case X86::BI__builtin_ia32_vextractf128_pd256: 4411 case X86::BI__builtin_ia32_vextractf128_ps256: 4412 case X86::BI__builtin_ia32_vextractf128_si256: 4413 case X86::BI__builtin_ia32_extract128i256: 4414 case X86::BI__builtin_ia32_extractf64x4_mask: 4415 case X86::BI__builtin_ia32_extracti64x4_mask: 4416 case X86::BI__builtin_ia32_extractf32x8_mask: 4417 case X86::BI__builtin_ia32_extracti32x8_mask: 4418 case X86::BI__builtin_ia32_extractf64x2_256_mask: 4419 case X86::BI__builtin_ia32_extracti64x2_256_mask: 4420 case X86::BI__builtin_ia32_extractf32x4_256_mask: 4421 case X86::BI__builtin_ia32_extracti32x4_256_mask: 4422 i = 1; l = 0; u = 1; 4423 break; 4424 case X86::BI__builtin_ia32_vec_set_v2di: 4425 case X86::BI__builtin_ia32_vinsertf128_pd256: 4426 case X86::BI__builtin_ia32_vinsertf128_ps256: 4427 case X86::BI__builtin_ia32_vinsertf128_si256: 4428 case X86::BI__builtin_ia32_insert128i256: 4429 case X86::BI__builtin_ia32_insertf32x8: 4430 case X86::BI__builtin_ia32_inserti32x8: 4431 case X86::BI__builtin_ia32_insertf64x4: 4432 case X86::BI__builtin_ia32_inserti64x4: 4433 case X86::BI__builtin_ia32_insertf64x2_256: 4434 case X86::BI__builtin_ia32_inserti64x2_256: 4435 case X86::BI__builtin_ia32_insertf32x4_256: 4436 case X86::BI__builtin_ia32_inserti32x4_256: 4437 i = 2; l = 0; u = 1; 4438 break; 4439 case X86::BI__builtin_ia32_vpermilpd: 4440 case X86::BI__builtin_ia32_vec_ext_v4hi: 4441 case X86::BI__builtin_ia32_vec_ext_v4si: 4442 case X86::BI__builtin_ia32_vec_ext_v4sf: 4443 case X86::BI__builtin_ia32_vec_ext_v4di: 4444 case X86::BI__builtin_ia32_extractf32x4_mask: 4445 case X86::BI__builtin_ia32_extracti32x4_mask: 4446 case X86::BI__builtin_ia32_extractf64x2_512_mask: 4447 case X86::BI__builtin_ia32_extracti64x2_512_mask: 4448 i = 1; l = 0; u = 3; 4449 break; 4450 case X86::BI_mm_prefetch: 4451 case X86::BI__builtin_ia32_vec_ext_v8hi: 4452 case X86::BI__builtin_ia32_vec_ext_v8si: 4453 i = 1; l = 0; u = 7; 4454 break; 4455 case X86::BI__builtin_ia32_sha1rnds4: 4456 case X86::BI__builtin_ia32_blendpd: 4457 case X86::BI__builtin_ia32_shufpd: 4458 case X86::BI__builtin_ia32_vec_set_v4hi: 4459 case X86::BI__builtin_ia32_vec_set_v4si: 4460 case X86::BI__builtin_ia32_vec_set_v4di: 4461 case X86::BI__builtin_ia32_shuf_f32x4_256: 4462 case X86::BI__builtin_ia32_shuf_f64x2_256: 4463 case X86::BI__builtin_ia32_shuf_i32x4_256: 4464 case X86::BI__builtin_ia32_shuf_i64x2_256: 4465 case X86::BI__builtin_ia32_insertf64x2_512: 4466 case X86::BI__builtin_ia32_inserti64x2_512: 4467 case X86::BI__builtin_ia32_insertf32x4: 4468 case X86::BI__builtin_ia32_inserti32x4: 4469 i = 2; l = 0; u = 3; 4470 break; 4471 case X86::BI__builtin_ia32_vpermil2pd: 4472 case X86::BI__builtin_ia32_vpermil2pd256: 4473 case X86::BI__builtin_ia32_vpermil2ps: 4474 case X86::BI__builtin_ia32_vpermil2ps256: 4475 i = 3; l = 0; u = 3; 4476 break; 4477 case X86::BI__builtin_ia32_cmpb128_mask: 4478 case X86::BI__builtin_ia32_cmpw128_mask: 4479 case X86::BI__builtin_ia32_cmpd128_mask: 4480 case X86::BI__builtin_ia32_cmpq128_mask: 4481 case X86::BI__builtin_ia32_cmpb256_mask: 4482 case X86::BI__builtin_ia32_cmpw256_mask: 4483 case X86::BI__builtin_ia32_cmpd256_mask: 4484 case X86::BI__builtin_ia32_cmpq256_mask: 4485 case X86::BI__builtin_ia32_cmpb512_mask: 4486 case X86::BI__builtin_ia32_cmpw512_mask: 4487 case X86::BI__builtin_ia32_cmpd512_mask: 4488 case X86::BI__builtin_ia32_cmpq512_mask: 4489 case X86::BI__builtin_ia32_ucmpb128_mask: 4490 case X86::BI__builtin_ia32_ucmpw128_mask: 4491 case X86::BI__builtin_ia32_ucmpd128_mask: 4492 case X86::BI__builtin_ia32_ucmpq128_mask: 4493 case X86::BI__builtin_ia32_ucmpb256_mask: 4494 case X86::BI__builtin_ia32_ucmpw256_mask: 4495 case X86::BI__builtin_ia32_ucmpd256_mask: 4496 case X86::BI__builtin_ia32_ucmpq256_mask: 4497 case X86::BI__builtin_ia32_ucmpb512_mask: 4498 case X86::BI__builtin_ia32_ucmpw512_mask: 4499 case X86::BI__builtin_ia32_ucmpd512_mask: 4500 case X86::BI__builtin_ia32_ucmpq512_mask: 4501 case X86::BI__builtin_ia32_vpcomub: 4502 case X86::BI__builtin_ia32_vpcomuw: 4503 case X86::BI__builtin_ia32_vpcomud: 4504 case X86::BI__builtin_ia32_vpcomuq: 4505 case X86::BI__builtin_ia32_vpcomb: 4506 case X86::BI__builtin_ia32_vpcomw: 4507 case X86::BI__builtin_ia32_vpcomd: 4508 case X86::BI__builtin_ia32_vpcomq: 4509 case X86::BI__builtin_ia32_vec_set_v8hi: 4510 case X86::BI__builtin_ia32_vec_set_v8si: 4511 i = 2; l = 0; u = 7; 4512 break; 4513 case X86::BI__builtin_ia32_vpermilpd256: 4514 case X86::BI__builtin_ia32_roundps: 4515 case X86::BI__builtin_ia32_roundpd: 4516 case X86::BI__builtin_ia32_roundps256: 4517 case X86::BI__builtin_ia32_roundpd256: 4518 case X86::BI__builtin_ia32_getmantpd128_mask: 4519 case X86::BI__builtin_ia32_getmantpd256_mask: 4520 case X86::BI__builtin_ia32_getmantps128_mask: 4521 case X86::BI__builtin_ia32_getmantps256_mask: 4522 case X86::BI__builtin_ia32_getmantpd512_mask: 4523 case X86::BI__builtin_ia32_getmantps512_mask: 4524 case X86::BI__builtin_ia32_getmantph128_mask: 4525 case X86::BI__builtin_ia32_getmantph256_mask: 4526 case X86::BI__builtin_ia32_getmantph512_mask: 4527 case X86::BI__builtin_ia32_vec_ext_v16qi: 4528 case X86::BI__builtin_ia32_vec_ext_v16hi: 4529 i = 1; l = 0; u = 15; 4530 break; 4531 case X86::BI__builtin_ia32_pblendd128: 4532 case X86::BI__builtin_ia32_blendps: 4533 case X86::BI__builtin_ia32_blendpd256: 4534 case X86::BI__builtin_ia32_shufpd256: 4535 case X86::BI__builtin_ia32_roundss: 4536 case X86::BI__builtin_ia32_roundsd: 4537 case X86::BI__builtin_ia32_rangepd128_mask: 4538 case X86::BI__builtin_ia32_rangepd256_mask: 4539 case X86::BI__builtin_ia32_rangepd512_mask: 4540 case X86::BI__builtin_ia32_rangeps128_mask: 4541 case X86::BI__builtin_ia32_rangeps256_mask: 4542 case X86::BI__builtin_ia32_rangeps512_mask: 4543 case X86::BI__builtin_ia32_getmantsd_round_mask: 4544 case X86::BI__builtin_ia32_getmantss_round_mask: 4545 case X86::BI__builtin_ia32_getmantsh_round_mask: 4546 case X86::BI__builtin_ia32_vec_set_v16qi: 4547 case X86::BI__builtin_ia32_vec_set_v16hi: 4548 i = 2; l = 0; u = 15; 4549 break; 4550 case X86::BI__builtin_ia32_vec_ext_v32qi: 4551 i = 1; l = 0; u = 31; 4552 break; 4553 case X86::BI__builtin_ia32_cmpps: 4554 case X86::BI__builtin_ia32_cmpss: 4555 case X86::BI__builtin_ia32_cmppd: 4556 case X86::BI__builtin_ia32_cmpsd: 4557 case X86::BI__builtin_ia32_cmpps256: 4558 case X86::BI__builtin_ia32_cmppd256: 4559 case X86::BI__builtin_ia32_cmpps128_mask: 4560 case X86::BI__builtin_ia32_cmppd128_mask: 4561 case X86::BI__builtin_ia32_cmpps256_mask: 4562 case X86::BI__builtin_ia32_cmppd256_mask: 4563 case X86::BI__builtin_ia32_cmpps512_mask: 4564 case X86::BI__builtin_ia32_cmppd512_mask: 4565 case X86::BI__builtin_ia32_cmpsd_mask: 4566 case X86::BI__builtin_ia32_cmpss_mask: 4567 case X86::BI__builtin_ia32_vec_set_v32qi: 4568 i = 2; l = 0; u = 31; 4569 break; 4570 case X86::BI__builtin_ia32_permdf256: 4571 case X86::BI__builtin_ia32_permdi256: 4572 case X86::BI__builtin_ia32_permdf512: 4573 case X86::BI__builtin_ia32_permdi512: 4574 case X86::BI__builtin_ia32_vpermilps: 4575 case X86::BI__builtin_ia32_vpermilps256: 4576 case X86::BI__builtin_ia32_vpermilpd512: 4577 case X86::BI__builtin_ia32_vpermilps512: 4578 case X86::BI__builtin_ia32_pshufd: 4579 case X86::BI__builtin_ia32_pshufd256: 4580 case X86::BI__builtin_ia32_pshufd512: 4581 case X86::BI__builtin_ia32_pshufhw: 4582 case X86::BI__builtin_ia32_pshufhw256: 4583 case X86::BI__builtin_ia32_pshufhw512: 4584 case X86::BI__builtin_ia32_pshuflw: 4585 case X86::BI__builtin_ia32_pshuflw256: 4586 case X86::BI__builtin_ia32_pshuflw512: 4587 case X86::BI__builtin_ia32_vcvtps2ph: 4588 case X86::BI__builtin_ia32_vcvtps2ph_mask: 4589 case X86::BI__builtin_ia32_vcvtps2ph256: 4590 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 4591 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 4592 case X86::BI__builtin_ia32_rndscaleps_128_mask: 4593 case X86::BI__builtin_ia32_rndscalepd_128_mask: 4594 case X86::BI__builtin_ia32_rndscaleps_256_mask: 4595 case X86::BI__builtin_ia32_rndscalepd_256_mask: 4596 case X86::BI__builtin_ia32_rndscaleps_mask: 4597 case X86::BI__builtin_ia32_rndscalepd_mask: 4598 case X86::BI__builtin_ia32_rndscaleph_mask: 4599 case X86::BI__builtin_ia32_reducepd128_mask: 4600 case X86::BI__builtin_ia32_reducepd256_mask: 4601 case X86::BI__builtin_ia32_reducepd512_mask: 4602 case X86::BI__builtin_ia32_reduceps128_mask: 4603 case X86::BI__builtin_ia32_reduceps256_mask: 4604 case X86::BI__builtin_ia32_reduceps512_mask: 4605 case X86::BI__builtin_ia32_reduceph128_mask: 4606 case X86::BI__builtin_ia32_reduceph256_mask: 4607 case X86::BI__builtin_ia32_reduceph512_mask: 4608 case X86::BI__builtin_ia32_prold512: 4609 case X86::BI__builtin_ia32_prolq512: 4610 case X86::BI__builtin_ia32_prold128: 4611 case X86::BI__builtin_ia32_prold256: 4612 case X86::BI__builtin_ia32_prolq128: 4613 case X86::BI__builtin_ia32_prolq256: 4614 case X86::BI__builtin_ia32_prord512: 4615 case X86::BI__builtin_ia32_prorq512: 4616 case X86::BI__builtin_ia32_prord128: 4617 case X86::BI__builtin_ia32_prord256: 4618 case X86::BI__builtin_ia32_prorq128: 4619 case X86::BI__builtin_ia32_prorq256: 4620 case X86::BI__builtin_ia32_fpclasspd128_mask: 4621 case X86::BI__builtin_ia32_fpclasspd256_mask: 4622 case X86::BI__builtin_ia32_fpclassps128_mask: 4623 case X86::BI__builtin_ia32_fpclassps256_mask: 4624 case X86::BI__builtin_ia32_fpclassps512_mask: 4625 case X86::BI__builtin_ia32_fpclasspd512_mask: 4626 case X86::BI__builtin_ia32_fpclassph128_mask: 4627 case X86::BI__builtin_ia32_fpclassph256_mask: 4628 case X86::BI__builtin_ia32_fpclassph512_mask: 4629 case X86::BI__builtin_ia32_fpclasssd_mask: 4630 case X86::BI__builtin_ia32_fpclassss_mask: 4631 case X86::BI__builtin_ia32_fpclasssh_mask: 4632 case X86::BI__builtin_ia32_pslldqi128_byteshift: 4633 case X86::BI__builtin_ia32_pslldqi256_byteshift: 4634 case X86::BI__builtin_ia32_pslldqi512_byteshift: 4635 case X86::BI__builtin_ia32_psrldqi128_byteshift: 4636 case X86::BI__builtin_ia32_psrldqi256_byteshift: 4637 case X86::BI__builtin_ia32_psrldqi512_byteshift: 4638 case X86::BI__builtin_ia32_kshiftliqi: 4639 case X86::BI__builtin_ia32_kshiftlihi: 4640 case X86::BI__builtin_ia32_kshiftlisi: 4641 case X86::BI__builtin_ia32_kshiftlidi: 4642 case X86::BI__builtin_ia32_kshiftriqi: 4643 case X86::BI__builtin_ia32_kshiftrihi: 4644 case X86::BI__builtin_ia32_kshiftrisi: 4645 case X86::BI__builtin_ia32_kshiftridi: 4646 i = 1; l = 0; u = 255; 4647 break; 4648 case X86::BI__builtin_ia32_vperm2f128_pd256: 4649 case X86::BI__builtin_ia32_vperm2f128_ps256: 4650 case X86::BI__builtin_ia32_vperm2f128_si256: 4651 case X86::BI__builtin_ia32_permti256: 4652 case X86::BI__builtin_ia32_pblendw128: 4653 case X86::BI__builtin_ia32_pblendw256: 4654 case X86::BI__builtin_ia32_blendps256: 4655 case X86::BI__builtin_ia32_pblendd256: 4656 case X86::BI__builtin_ia32_palignr128: 4657 case X86::BI__builtin_ia32_palignr256: 4658 case X86::BI__builtin_ia32_palignr512: 4659 case X86::BI__builtin_ia32_alignq512: 4660 case X86::BI__builtin_ia32_alignd512: 4661 case X86::BI__builtin_ia32_alignd128: 4662 case X86::BI__builtin_ia32_alignd256: 4663 case X86::BI__builtin_ia32_alignq128: 4664 case X86::BI__builtin_ia32_alignq256: 4665 case X86::BI__builtin_ia32_vcomisd: 4666 case X86::BI__builtin_ia32_vcomiss: 4667 case X86::BI__builtin_ia32_shuf_f32x4: 4668 case X86::BI__builtin_ia32_shuf_f64x2: 4669 case X86::BI__builtin_ia32_shuf_i32x4: 4670 case X86::BI__builtin_ia32_shuf_i64x2: 4671 case X86::BI__builtin_ia32_shufpd512: 4672 case X86::BI__builtin_ia32_shufps: 4673 case X86::BI__builtin_ia32_shufps256: 4674 case X86::BI__builtin_ia32_shufps512: 4675 case X86::BI__builtin_ia32_dbpsadbw128: 4676 case X86::BI__builtin_ia32_dbpsadbw256: 4677 case X86::BI__builtin_ia32_dbpsadbw512: 4678 case X86::BI__builtin_ia32_vpshldd128: 4679 case X86::BI__builtin_ia32_vpshldd256: 4680 case X86::BI__builtin_ia32_vpshldd512: 4681 case X86::BI__builtin_ia32_vpshldq128: 4682 case X86::BI__builtin_ia32_vpshldq256: 4683 case X86::BI__builtin_ia32_vpshldq512: 4684 case X86::BI__builtin_ia32_vpshldw128: 4685 case X86::BI__builtin_ia32_vpshldw256: 4686 case X86::BI__builtin_ia32_vpshldw512: 4687 case X86::BI__builtin_ia32_vpshrdd128: 4688 case X86::BI__builtin_ia32_vpshrdd256: 4689 case X86::BI__builtin_ia32_vpshrdd512: 4690 case X86::BI__builtin_ia32_vpshrdq128: 4691 case X86::BI__builtin_ia32_vpshrdq256: 4692 case X86::BI__builtin_ia32_vpshrdq512: 4693 case X86::BI__builtin_ia32_vpshrdw128: 4694 case X86::BI__builtin_ia32_vpshrdw256: 4695 case X86::BI__builtin_ia32_vpshrdw512: 4696 i = 2; l = 0; u = 255; 4697 break; 4698 case X86::BI__builtin_ia32_fixupimmpd512_mask: 4699 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 4700 case X86::BI__builtin_ia32_fixupimmps512_mask: 4701 case X86::BI__builtin_ia32_fixupimmps512_maskz: 4702 case X86::BI__builtin_ia32_fixupimmsd_mask: 4703 case X86::BI__builtin_ia32_fixupimmsd_maskz: 4704 case X86::BI__builtin_ia32_fixupimmss_mask: 4705 case X86::BI__builtin_ia32_fixupimmss_maskz: 4706 case X86::BI__builtin_ia32_fixupimmpd128_mask: 4707 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 4708 case X86::BI__builtin_ia32_fixupimmpd256_mask: 4709 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 4710 case X86::BI__builtin_ia32_fixupimmps128_mask: 4711 case X86::BI__builtin_ia32_fixupimmps128_maskz: 4712 case X86::BI__builtin_ia32_fixupimmps256_mask: 4713 case X86::BI__builtin_ia32_fixupimmps256_maskz: 4714 case X86::BI__builtin_ia32_pternlogd512_mask: 4715 case X86::BI__builtin_ia32_pternlogd512_maskz: 4716 case X86::BI__builtin_ia32_pternlogq512_mask: 4717 case X86::BI__builtin_ia32_pternlogq512_maskz: 4718 case X86::BI__builtin_ia32_pternlogd128_mask: 4719 case X86::BI__builtin_ia32_pternlogd128_maskz: 4720 case X86::BI__builtin_ia32_pternlogd256_mask: 4721 case X86::BI__builtin_ia32_pternlogd256_maskz: 4722 case X86::BI__builtin_ia32_pternlogq128_mask: 4723 case X86::BI__builtin_ia32_pternlogq128_maskz: 4724 case X86::BI__builtin_ia32_pternlogq256_mask: 4725 case X86::BI__builtin_ia32_pternlogq256_maskz: 4726 i = 3; l = 0; u = 255; 4727 break; 4728 case X86::BI__builtin_ia32_gatherpfdpd: 4729 case X86::BI__builtin_ia32_gatherpfdps: 4730 case X86::BI__builtin_ia32_gatherpfqpd: 4731 case X86::BI__builtin_ia32_gatherpfqps: 4732 case X86::BI__builtin_ia32_scatterpfdpd: 4733 case X86::BI__builtin_ia32_scatterpfdps: 4734 case X86::BI__builtin_ia32_scatterpfqpd: 4735 case X86::BI__builtin_ia32_scatterpfqps: 4736 i = 4; l = 2; u = 3; 4737 break; 4738 case X86::BI__builtin_ia32_reducesd_mask: 4739 case X86::BI__builtin_ia32_reducess_mask: 4740 case X86::BI__builtin_ia32_rndscalesd_round_mask: 4741 case X86::BI__builtin_ia32_rndscaless_round_mask: 4742 case X86::BI__builtin_ia32_rndscalesh_round_mask: 4743 case X86::BI__builtin_ia32_reducesh_mask: 4744 i = 4; l = 0; u = 255; 4745 break; 4746 } 4747 4748 // Note that we don't force a hard error on the range check here, allowing 4749 // template-generated or macro-generated dead code to potentially have out-of- 4750 // range values. These need to code generate, but don't need to necessarily 4751 // make any sense. We use a warning that defaults to an error. 4752 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 4753 } 4754 4755 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 4756 /// parameter with the FormatAttr's correct format_idx and firstDataArg. 4757 /// Returns true when the format fits the function and the FormatStringInfo has 4758 /// been populated. 4759 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 4760 FormatStringInfo *FSI) { 4761 FSI->HasVAListArg = Format->getFirstArg() == 0; 4762 FSI->FormatIdx = Format->getFormatIdx() - 1; 4763 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1; 4764 4765 // The way the format attribute works in GCC, the implicit this argument 4766 // of member functions is counted. However, it doesn't appear in our own 4767 // lists, so decrement format_idx in that case. 4768 if (IsCXXMember) { 4769 if(FSI->FormatIdx == 0) 4770 return false; 4771 --FSI->FormatIdx; 4772 if (FSI->FirstDataArg != 0) 4773 --FSI->FirstDataArg; 4774 } 4775 return true; 4776 } 4777 4778 /// Checks if a the given expression evaluates to null. 4779 /// 4780 /// Returns true if the value evaluates to null. 4781 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 4782 // If the expression has non-null type, it doesn't evaluate to null. 4783 if (auto nullability 4784 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { 4785 if (*nullability == NullabilityKind::NonNull) 4786 return false; 4787 } 4788 4789 // As a special case, transparent unions initialized with zero are 4790 // considered null for the purposes of the nonnull attribute. 4791 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 4792 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 4793 if (const CompoundLiteralExpr *CLE = 4794 dyn_cast<CompoundLiteralExpr>(Expr)) 4795 if (const InitListExpr *ILE = 4796 dyn_cast<InitListExpr>(CLE->getInitializer())) 4797 Expr = ILE->getInit(0); 4798 } 4799 4800 bool Result; 4801 return (!Expr->isValueDependent() && 4802 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 4803 !Result); 4804 } 4805 4806 static void CheckNonNullArgument(Sema &S, 4807 const Expr *ArgExpr, 4808 SourceLocation CallSiteLoc) { 4809 if (CheckNonNullExpr(S, ArgExpr)) 4810 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 4811 S.PDiag(diag::warn_null_arg) 4812 << ArgExpr->getSourceRange()); 4813 } 4814 4815 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 4816 FormatStringInfo FSI; 4817 if ((GetFormatStringType(Format) == FST_NSString) && 4818 getFormatStringInfo(Format, false, &FSI)) { 4819 Idx = FSI.FormatIdx; 4820 return true; 4821 } 4822 return false; 4823 } 4824 4825 /// Diagnose use of %s directive in an NSString which is being passed 4826 /// as formatting string to formatting method. 4827 static void 4828 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 4829 const NamedDecl *FDecl, 4830 Expr **Args, 4831 unsigned NumArgs) { 4832 unsigned Idx = 0; 4833 bool Format = false; 4834 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 4835 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 4836 Idx = 2; 4837 Format = true; 4838 } 4839 else 4840 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 4841 if (S.GetFormatNSStringIdx(I, Idx)) { 4842 Format = true; 4843 break; 4844 } 4845 } 4846 if (!Format || NumArgs <= Idx) 4847 return; 4848 const Expr *FormatExpr = Args[Idx]; 4849 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 4850 FormatExpr = CSCE->getSubExpr(); 4851 const StringLiteral *FormatString; 4852 if (const ObjCStringLiteral *OSL = 4853 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 4854 FormatString = OSL->getString(); 4855 else 4856 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 4857 if (!FormatString) 4858 return; 4859 if (S.FormatStringHasSArg(FormatString)) { 4860 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 4861 << "%s" << 1 << 1; 4862 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 4863 << FDecl->getDeclName(); 4864 } 4865 } 4866 4867 /// Determine whether the given type has a non-null nullability annotation. 4868 static bool isNonNullType(ASTContext &ctx, QualType type) { 4869 if (auto nullability = type->getNullability(ctx)) 4870 return *nullability == NullabilityKind::NonNull; 4871 4872 return false; 4873 } 4874 4875 static void CheckNonNullArguments(Sema &S, 4876 const NamedDecl *FDecl, 4877 const FunctionProtoType *Proto, 4878 ArrayRef<const Expr *> Args, 4879 SourceLocation CallSiteLoc) { 4880 assert((FDecl || Proto) && "Need a function declaration or prototype"); 4881 4882 // Already checked by by constant evaluator. 4883 if (S.isConstantEvaluated()) 4884 return; 4885 // Check the attributes attached to the method/function itself. 4886 llvm::SmallBitVector NonNullArgs; 4887 if (FDecl) { 4888 // Handle the nonnull attribute on the function/method declaration itself. 4889 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 4890 if (!NonNull->args_size()) { 4891 // Easy case: all pointer arguments are nonnull. 4892 for (const auto *Arg : Args) 4893 if (S.isValidPointerAttrType(Arg->getType())) 4894 CheckNonNullArgument(S, Arg, CallSiteLoc); 4895 return; 4896 } 4897 4898 for (const ParamIdx &Idx : NonNull->args()) { 4899 unsigned IdxAST = Idx.getASTIndex(); 4900 if (IdxAST >= Args.size()) 4901 continue; 4902 if (NonNullArgs.empty()) 4903 NonNullArgs.resize(Args.size()); 4904 NonNullArgs.set(IdxAST); 4905 } 4906 } 4907 } 4908 4909 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 4910 // Handle the nonnull attribute on the parameters of the 4911 // function/method. 4912 ArrayRef<ParmVarDecl*> parms; 4913 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 4914 parms = FD->parameters(); 4915 else 4916 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 4917 4918 unsigned ParamIndex = 0; 4919 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 4920 I != E; ++I, ++ParamIndex) { 4921 const ParmVarDecl *PVD = *I; 4922 if (PVD->hasAttr<NonNullAttr>() || 4923 isNonNullType(S.Context, PVD->getType())) { 4924 if (NonNullArgs.empty()) 4925 NonNullArgs.resize(Args.size()); 4926 4927 NonNullArgs.set(ParamIndex); 4928 } 4929 } 4930 } else { 4931 // If we have a non-function, non-method declaration but no 4932 // function prototype, try to dig out the function prototype. 4933 if (!Proto) { 4934 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 4935 QualType type = VD->getType().getNonReferenceType(); 4936 if (auto pointerType = type->getAs<PointerType>()) 4937 type = pointerType->getPointeeType(); 4938 else if (auto blockType = type->getAs<BlockPointerType>()) 4939 type = blockType->getPointeeType(); 4940 // FIXME: data member pointers? 4941 4942 // Dig out the function prototype, if there is one. 4943 Proto = type->getAs<FunctionProtoType>(); 4944 } 4945 } 4946 4947 // Fill in non-null argument information from the nullability 4948 // information on the parameter types (if we have them). 4949 if (Proto) { 4950 unsigned Index = 0; 4951 for (auto paramType : Proto->getParamTypes()) { 4952 if (isNonNullType(S.Context, paramType)) { 4953 if (NonNullArgs.empty()) 4954 NonNullArgs.resize(Args.size()); 4955 4956 NonNullArgs.set(Index); 4957 } 4958 4959 ++Index; 4960 } 4961 } 4962 } 4963 4964 // Check for non-null arguments. 4965 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 4966 ArgIndex != ArgIndexEnd; ++ArgIndex) { 4967 if (NonNullArgs[ArgIndex]) 4968 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc); 4969 } 4970 } 4971 4972 /// Warn if a pointer or reference argument passed to a function points to an 4973 /// object that is less aligned than the parameter. This can happen when 4974 /// creating a typedef with a lower alignment than the original type and then 4975 /// calling functions defined in terms of the original type. 4976 void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, 4977 StringRef ParamName, QualType ArgTy, 4978 QualType ParamTy) { 4979 4980 // If a function accepts a pointer or reference type 4981 if (!ParamTy->isPointerType() && !ParamTy->isReferenceType()) 4982 return; 4983 4984 // If the parameter is a pointer type, get the pointee type for the 4985 // argument too. If the parameter is a reference type, don't try to get 4986 // the pointee type for the argument. 4987 if (ParamTy->isPointerType()) 4988 ArgTy = ArgTy->getPointeeType(); 4989 4990 // Remove reference or pointer 4991 ParamTy = ParamTy->getPointeeType(); 4992 4993 // Find expected alignment, and the actual alignment of the passed object. 4994 // getTypeAlignInChars requires complete types 4995 if (ArgTy.isNull() || ParamTy->isIncompleteType() || 4996 ArgTy->isIncompleteType() || ParamTy->isUndeducedType() || 4997 ArgTy->isUndeducedType()) 4998 return; 4999 5000 CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy); 5001 CharUnits ArgAlign = Context.getTypeAlignInChars(ArgTy); 5002 5003 // If the argument is less aligned than the parameter, there is a 5004 // potential alignment issue. 5005 if (ArgAlign < ParamAlign) 5006 Diag(Loc, diag::warn_param_mismatched_alignment) 5007 << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity() 5008 << ParamName << FDecl; 5009 } 5010 5011 /// Handles the checks for format strings, non-POD arguments to vararg 5012 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if 5013 /// attributes. 5014 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 5015 const Expr *ThisArg, ArrayRef<const Expr *> Args, 5016 bool IsMemberFunction, SourceLocation Loc, 5017 SourceRange Range, VariadicCallType CallType) { 5018 // FIXME: We should check as much as we can in the template definition. 5019 if (CurContext->isDependentContext()) 5020 return; 5021 5022 // Printf and scanf checking. 5023 llvm::SmallBitVector CheckedVarArgs; 5024 if (FDecl) { 5025 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 5026 // Only create vector if there are format attributes. 5027 CheckedVarArgs.resize(Args.size()); 5028 5029 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 5030 CheckedVarArgs); 5031 } 5032 } 5033 5034 // Refuse POD arguments that weren't caught by the format string 5035 // checks above. 5036 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 5037 if (CallType != VariadicDoesNotApply && 5038 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 5039 unsigned NumParams = Proto ? Proto->getNumParams() 5040 : FDecl && isa<FunctionDecl>(FDecl) 5041 ? cast<FunctionDecl>(FDecl)->getNumParams() 5042 : FDecl && isa<ObjCMethodDecl>(FDecl) 5043 ? cast<ObjCMethodDecl>(FDecl)->param_size() 5044 : 0; 5045 5046 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 5047 // Args[ArgIdx] can be null in malformed code. 5048 if (const Expr *Arg = Args[ArgIdx]) { 5049 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 5050 checkVariadicArgument(Arg, CallType); 5051 } 5052 } 5053 } 5054 5055 if (FDecl || Proto) { 5056 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 5057 5058 // Type safety checking. 5059 if (FDecl) { 5060 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 5061 CheckArgumentWithTypeTag(I, Args, Loc); 5062 } 5063 } 5064 5065 // Check that passed arguments match the alignment of original arguments. 5066 // Try to get the missing prototype from the declaration. 5067 if (!Proto && FDecl) { 5068 const auto *FT = FDecl->getFunctionType(); 5069 if (isa_and_nonnull<FunctionProtoType>(FT)) 5070 Proto = cast<FunctionProtoType>(FDecl->getFunctionType()); 5071 } 5072 if (Proto) { 5073 // For variadic functions, we may have more args than parameters. 5074 // For some K&R functions, we may have less args than parameters. 5075 const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size()); 5076 for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) { 5077 // Args[ArgIdx] can be null in malformed code. 5078 if (const Expr *Arg = Args[ArgIdx]) { 5079 if (Arg->containsErrors()) 5080 continue; 5081 5082 QualType ParamTy = Proto->getParamType(ArgIdx); 5083 QualType ArgTy = Arg->getType(); 5084 CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1), 5085 ArgTy, ParamTy); 5086 } 5087 } 5088 } 5089 5090 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) { 5091 auto *AA = FDecl->getAttr<AllocAlignAttr>(); 5092 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()]; 5093 if (!Arg->isValueDependent()) { 5094 Expr::EvalResult Align; 5095 if (Arg->EvaluateAsInt(Align, Context)) { 5096 const llvm::APSInt &I = Align.Val.getInt(); 5097 if (!I.isPowerOf2()) 5098 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two) 5099 << Arg->getSourceRange(); 5100 5101 if (I > Sema::MaximumAlignment) 5102 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great) 5103 << Arg->getSourceRange() << Sema::MaximumAlignment; 5104 } 5105 } 5106 } 5107 5108 if (FD) 5109 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 5110 } 5111 5112 /// CheckConstructorCall - Check a constructor call for correctness and safety 5113 /// properties not enforced by the C type system. 5114 void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, 5115 ArrayRef<const Expr *> Args, 5116 const FunctionProtoType *Proto, 5117 SourceLocation Loc) { 5118 VariadicCallType CallType = 5119 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 5120 5121 auto *Ctor = cast<CXXConstructorDecl>(FDecl); 5122 CheckArgAlignment(Loc, FDecl, "'this'", Context.getPointerType(ThisType), 5123 Context.getPointerType(Ctor->getThisObjectType())); 5124 5125 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 5126 Loc, SourceRange(), CallType); 5127 } 5128 5129 /// CheckFunctionCall - Check a direct function call for various correctness 5130 /// and safety properties not strictly enforced by the C type system. 5131 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 5132 const FunctionProtoType *Proto) { 5133 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 5134 isa<CXXMethodDecl>(FDecl); 5135 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 5136 IsMemberOperatorCall; 5137 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 5138 TheCall->getCallee()); 5139 Expr** Args = TheCall->getArgs(); 5140 unsigned NumArgs = TheCall->getNumArgs(); 5141 5142 Expr *ImplicitThis = nullptr; 5143 if (IsMemberOperatorCall) { 5144 // If this is a call to a member operator, hide the first argument 5145 // from checkCall. 5146 // FIXME: Our choice of AST representation here is less than ideal. 5147 ImplicitThis = Args[0]; 5148 ++Args; 5149 --NumArgs; 5150 } else if (IsMemberFunction) 5151 ImplicitThis = 5152 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 5153 5154 if (ImplicitThis) { 5155 // ImplicitThis may or may not be a pointer, depending on whether . or -> is 5156 // used. 5157 QualType ThisType = ImplicitThis->getType(); 5158 if (!ThisType->isPointerType()) { 5159 assert(!ThisType->isReferenceType()); 5160 ThisType = Context.getPointerType(ThisType); 5161 } 5162 5163 QualType ThisTypeFromDecl = 5164 Context.getPointerType(cast<CXXMethodDecl>(FDecl)->getThisObjectType()); 5165 5166 CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType, 5167 ThisTypeFromDecl); 5168 } 5169 5170 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs), 5171 IsMemberFunction, TheCall->getRParenLoc(), 5172 TheCall->getCallee()->getSourceRange(), CallType); 5173 5174 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 5175 // None of the checks below are needed for functions that don't have 5176 // simple names (e.g., C++ conversion functions). 5177 if (!FnInfo) 5178 return false; 5179 5180 CheckTCBEnforcement(TheCall, FDecl); 5181 5182 CheckAbsoluteValueFunction(TheCall, FDecl); 5183 CheckMaxUnsignedZero(TheCall, FDecl); 5184 5185 if (getLangOpts().ObjC) 5186 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 5187 5188 unsigned CMId = FDecl->getMemoryFunctionKind(); 5189 5190 // Handle memory setting and copying functions. 5191 switch (CMId) { 5192 case 0: 5193 return false; 5194 case Builtin::BIstrlcpy: // fallthrough 5195 case Builtin::BIstrlcat: 5196 CheckStrlcpycatArguments(TheCall, FnInfo); 5197 break; 5198 case Builtin::BIstrncat: 5199 CheckStrncatArguments(TheCall, FnInfo); 5200 break; 5201 case Builtin::BIfree: 5202 CheckFreeArguments(TheCall); 5203 break; 5204 default: 5205 CheckMemaccessArguments(TheCall, CMId, FnInfo); 5206 } 5207 5208 return false; 5209 } 5210 5211 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 5212 ArrayRef<const Expr *> Args) { 5213 VariadicCallType CallType = 5214 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 5215 5216 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 5217 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 5218 CallType); 5219 5220 return false; 5221 } 5222 5223 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 5224 const FunctionProtoType *Proto) { 5225 QualType Ty; 5226 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 5227 Ty = V->getType().getNonReferenceType(); 5228 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 5229 Ty = F->getType().getNonReferenceType(); 5230 else 5231 return false; 5232 5233 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 5234 !Ty->isFunctionProtoType()) 5235 return false; 5236 5237 VariadicCallType CallType; 5238 if (!Proto || !Proto->isVariadic()) { 5239 CallType = VariadicDoesNotApply; 5240 } else if (Ty->isBlockPointerType()) { 5241 CallType = VariadicBlock; 5242 } else { // Ty->isFunctionPointerType() 5243 CallType = VariadicFunction; 5244 } 5245 5246 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 5247 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 5248 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 5249 TheCall->getCallee()->getSourceRange(), CallType); 5250 5251 return false; 5252 } 5253 5254 /// Checks function calls when a FunctionDecl or a NamedDecl is not available, 5255 /// such as function pointers returned from functions. 5256 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 5257 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 5258 TheCall->getCallee()); 5259 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 5260 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 5261 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 5262 TheCall->getCallee()->getSourceRange(), CallType); 5263 5264 return false; 5265 } 5266 5267 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 5268 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 5269 return false; 5270 5271 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 5272 switch (Op) { 5273 case AtomicExpr::AO__c11_atomic_init: 5274 case AtomicExpr::AO__opencl_atomic_init: 5275 llvm_unreachable("There is no ordering argument for an init"); 5276 5277 case AtomicExpr::AO__c11_atomic_load: 5278 case AtomicExpr::AO__opencl_atomic_load: 5279 case AtomicExpr::AO__atomic_load_n: 5280 case AtomicExpr::AO__atomic_load: 5281 return OrderingCABI != llvm::AtomicOrderingCABI::release && 5282 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 5283 5284 case AtomicExpr::AO__c11_atomic_store: 5285 case AtomicExpr::AO__opencl_atomic_store: 5286 case AtomicExpr::AO__atomic_store: 5287 case AtomicExpr::AO__atomic_store_n: 5288 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 5289 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 5290 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 5291 5292 default: 5293 return true; 5294 } 5295 } 5296 5297 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 5298 AtomicExpr::AtomicOp Op) { 5299 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 5300 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 5301 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()}; 5302 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()}, 5303 DRE->getSourceRange(), TheCall->getRParenLoc(), Args, 5304 Op); 5305 } 5306 5307 ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, 5308 SourceLocation RParenLoc, MultiExprArg Args, 5309 AtomicExpr::AtomicOp Op, 5310 AtomicArgumentOrder ArgOrder) { 5311 // All the non-OpenCL operations take one of the following forms. 5312 // The OpenCL operations take the __c11 forms with one extra argument for 5313 // synchronization scope. 5314 enum { 5315 // C __c11_atomic_init(A *, C) 5316 Init, 5317 5318 // C __c11_atomic_load(A *, int) 5319 Load, 5320 5321 // void __atomic_load(A *, CP, int) 5322 LoadCopy, 5323 5324 // void __atomic_store(A *, CP, int) 5325 Copy, 5326 5327 // C __c11_atomic_add(A *, M, int) 5328 Arithmetic, 5329 5330 // C __atomic_exchange_n(A *, CP, int) 5331 Xchg, 5332 5333 // void __atomic_exchange(A *, C *, CP, int) 5334 GNUXchg, 5335 5336 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 5337 C11CmpXchg, 5338 5339 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 5340 GNUCmpXchg 5341 } Form = Init; 5342 5343 const unsigned NumForm = GNUCmpXchg + 1; 5344 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 5345 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 5346 // where: 5347 // C is an appropriate type, 5348 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 5349 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 5350 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 5351 // the int parameters are for orderings. 5352 5353 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 5354 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 5355 "need to update code for modified forms"); 5356 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 5357 AtomicExpr::AO__c11_atomic_fetch_min + 1 == 5358 AtomicExpr::AO__atomic_load, 5359 "need to update code for modified C11 atomics"); 5360 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 5361 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 5362 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 5363 Op <= AtomicExpr::AO__c11_atomic_fetch_min) || 5364 IsOpenCL; 5365 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 5366 Op == AtomicExpr::AO__atomic_store_n || 5367 Op == AtomicExpr::AO__atomic_exchange_n || 5368 Op == AtomicExpr::AO__atomic_compare_exchange_n; 5369 bool IsAddSub = false; 5370 5371 switch (Op) { 5372 case AtomicExpr::AO__c11_atomic_init: 5373 case AtomicExpr::AO__opencl_atomic_init: 5374 Form = Init; 5375 break; 5376 5377 case AtomicExpr::AO__c11_atomic_load: 5378 case AtomicExpr::AO__opencl_atomic_load: 5379 case AtomicExpr::AO__atomic_load_n: 5380 Form = Load; 5381 break; 5382 5383 case AtomicExpr::AO__atomic_load: 5384 Form = LoadCopy; 5385 break; 5386 5387 case AtomicExpr::AO__c11_atomic_store: 5388 case AtomicExpr::AO__opencl_atomic_store: 5389 case AtomicExpr::AO__atomic_store: 5390 case AtomicExpr::AO__atomic_store_n: 5391 Form = Copy; 5392 break; 5393 5394 case AtomicExpr::AO__c11_atomic_fetch_add: 5395 case AtomicExpr::AO__c11_atomic_fetch_sub: 5396 case AtomicExpr::AO__opencl_atomic_fetch_add: 5397 case AtomicExpr::AO__opencl_atomic_fetch_sub: 5398 case AtomicExpr::AO__atomic_fetch_add: 5399 case AtomicExpr::AO__atomic_fetch_sub: 5400 case AtomicExpr::AO__atomic_add_fetch: 5401 case AtomicExpr::AO__atomic_sub_fetch: 5402 IsAddSub = true; 5403 Form = Arithmetic; 5404 break; 5405 case AtomicExpr::AO__c11_atomic_fetch_and: 5406 case AtomicExpr::AO__c11_atomic_fetch_or: 5407 case AtomicExpr::AO__c11_atomic_fetch_xor: 5408 case AtomicExpr::AO__opencl_atomic_fetch_and: 5409 case AtomicExpr::AO__opencl_atomic_fetch_or: 5410 case AtomicExpr::AO__opencl_atomic_fetch_xor: 5411 case AtomicExpr::AO__atomic_fetch_and: 5412 case AtomicExpr::AO__atomic_fetch_or: 5413 case AtomicExpr::AO__atomic_fetch_xor: 5414 case AtomicExpr::AO__atomic_fetch_nand: 5415 case AtomicExpr::AO__atomic_and_fetch: 5416 case AtomicExpr::AO__atomic_or_fetch: 5417 case AtomicExpr::AO__atomic_xor_fetch: 5418 case AtomicExpr::AO__atomic_nand_fetch: 5419 Form = Arithmetic; 5420 break; 5421 case AtomicExpr::AO__c11_atomic_fetch_min: 5422 case AtomicExpr::AO__c11_atomic_fetch_max: 5423 case AtomicExpr::AO__opencl_atomic_fetch_min: 5424 case AtomicExpr::AO__opencl_atomic_fetch_max: 5425 case AtomicExpr::AO__atomic_min_fetch: 5426 case AtomicExpr::AO__atomic_max_fetch: 5427 case AtomicExpr::AO__atomic_fetch_min: 5428 case AtomicExpr::AO__atomic_fetch_max: 5429 Form = Arithmetic; 5430 break; 5431 5432 case AtomicExpr::AO__c11_atomic_exchange: 5433 case AtomicExpr::AO__opencl_atomic_exchange: 5434 case AtomicExpr::AO__atomic_exchange_n: 5435 Form = Xchg; 5436 break; 5437 5438 case AtomicExpr::AO__atomic_exchange: 5439 Form = GNUXchg; 5440 break; 5441 5442 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 5443 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 5444 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 5445 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 5446 Form = C11CmpXchg; 5447 break; 5448 5449 case AtomicExpr::AO__atomic_compare_exchange: 5450 case AtomicExpr::AO__atomic_compare_exchange_n: 5451 Form = GNUCmpXchg; 5452 break; 5453 } 5454 5455 unsigned AdjustedNumArgs = NumArgs[Form]; 5456 if (IsOpenCL && Op != AtomicExpr::AO__opencl_atomic_init) 5457 ++AdjustedNumArgs; 5458 // Check we have the right number of arguments. 5459 if (Args.size() < AdjustedNumArgs) { 5460 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args) 5461 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 5462 << ExprRange; 5463 return ExprError(); 5464 } else if (Args.size() > AdjustedNumArgs) { 5465 Diag(Args[AdjustedNumArgs]->getBeginLoc(), 5466 diag::err_typecheck_call_too_many_args) 5467 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 5468 << ExprRange; 5469 return ExprError(); 5470 } 5471 5472 // Inspect the first argument of the atomic operation. 5473 Expr *Ptr = Args[0]; 5474 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 5475 if (ConvertedPtr.isInvalid()) 5476 return ExprError(); 5477 5478 Ptr = ConvertedPtr.get(); 5479 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 5480 if (!pointerType) { 5481 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer) 5482 << Ptr->getType() << Ptr->getSourceRange(); 5483 return ExprError(); 5484 } 5485 5486 // For a __c11 builtin, this should be a pointer to an _Atomic type. 5487 QualType AtomTy = pointerType->getPointeeType(); // 'A' 5488 QualType ValType = AtomTy; // 'C' 5489 if (IsC11) { 5490 if (!AtomTy->isAtomicType()) { 5491 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic) 5492 << Ptr->getType() << Ptr->getSourceRange(); 5493 return ExprError(); 5494 } 5495 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 5496 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 5497 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic) 5498 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 5499 << Ptr->getSourceRange(); 5500 return ExprError(); 5501 } 5502 ValType = AtomTy->castAs<AtomicType>()->getValueType(); 5503 } else if (Form != Load && Form != LoadCopy) { 5504 if (ValType.isConstQualified()) { 5505 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer) 5506 << Ptr->getType() << Ptr->getSourceRange(); 5507 return ExprError(); 5508 } 5509 } 5510 5511 // For an arithmetic operation, the implied arithmetic must be well-formed. 5512 if (Form == Arithmetic) { 5513 // gcc does not enforce these rules for GNU atomics, but we do so for 5514 // sanity. 5515 auto IsAllowedValueType = [&](QualType ValType) { 5516 if (ValType->isIntegerType()) 5517 return true; 5518 if (ValType->isPointerType()) 5519 return true; 5520 if (!ValType->isFloatingType()) 5521 return false; 5522 // LLVM Parser does not allow atomicrmw with x86_fp80 type. 5523 if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) && 5524 &Context.getTargetInfo().getLongDoubleFormat() == 5525 &llvm::APFloat::x87DoubleExtended()) 5526 return false; 5527 return true; 5528 }; 5529 if (IsAddSub && !IsAllowedValueType(ValType)) { 5530 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_ptr_or_fp) 5531 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 5532 return ExprError(); 5533 } 5534 if (!IsAddSub && !ValType->isIntegerType()) { 5535 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int) 5536 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 5537 return ExprError(); 5538 } 5539 if (IsC11 && ValType->isPointerType() && 5540 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 5541 diag::err_incomplete_type)) { 5542 return ExprError(); 5543 } 5544 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 5545 // For __atomic_*_n operations, the value type must be a scalar integral or 5546 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 5547 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 5548 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 5549 return ExprError(); 5550 } 5551 5552 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 5553 !AtomTy->isScalarType()) { 5554 // For GNU atomics, require a trivially-copyable type. This is not part of 5555 // the GNU atomics specification, but we enforce it for sanity. 5556 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy) 5557 << Ptr->getType() << Ptr->getSourceRange(); 5558 return ExprError(); 5559 } 5560 5561 switch (ValType.getObjCLifetime()) { 5562 case Qualifiers::OCL_None: 5563 case Qualifiers::OCL_ExplicitNone: 5564 // okay 5565 break; 5566 5567 case Qualifiers::OCL_Weak: 5568 case Qualifiers::OCL_Strong: 5569 case Qualifiers::OCL_Autoreleasing: 5570 // FIXME: Can this happen? By this point, ValType should be known 5571 // to be trivially copyable. 5572 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership) 5573 << ValType << Ptr->getSourceRange(); 5574 return ExprError(); 5575 } 5576 5577 // All atomic operations have an overload which takes a pointer to a volatile 5578 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 5579 // into the result or the other operands. Similarly atomic_load takes a 5580 // pointer to a const 'A'. 5581 ValType.removeLocalVolatile(); 5582 ValType.removeLocalConst(); 5583 QualType ResultType = ValType; 5584 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 5585 Form == Init) 5586 ResultType = Context.VoidTy; 5587 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 5588 ResultType = Context.BoolTy; 5589 5590 // The type of a parameter passed 'by value'. In the GNU atomics, such 5591 // arguments are actually passed as pointers. 5592 QualType ByValType = ValType; // 'CP' 5593 bool IsPassedByAddress = false; 5594 if (!IsC11 && !IsN) { 5595 ByValType = Ptr->getType(); 5596 IsPassedByAddress = true; 5597 } 5598 5599 SmallVector<Expr *, 5> APIOrderedArgs; 5600 if (ArgOrder == Sema::AtomicArgumentOrder::AST) { 5601 APIOrderedArgs.push_back(Args[0]); 5602 switch (Form) { 5603 case Init: 5604 case Load: 5605 APIOrderedArgs.push_back(Args[1]); // Val1/Order 5606 break; 5607 case LoadCopy: 5608 case Copy: 5609 case Arithmetic: 5610 case Xchg: 5611 APIOrderedArgs.push_back(Args[2]); // Val1 5612 APIOrderedArgs.push_back(Args[1]); // Order 5613 break; 5614 case GNUXchg: 5615 APIOrderedArgs.push_back(Args[2]); // Val1 5616 APIOrderedArgs.push_back(Args[3]); // Val2 5617 APIOrderedArgs.push_back(Args[1]); // Order 5618 break; 5619 case C11CmpXchg: 5620 APIOrderedArgs.push_back(Args[2]); // Val1 5621 APIOrderedArgs.push_back(Args[4]); // Val2 5622 APIOrderedArgs.push_back(Args[1]); // Order 5623 APIOrderedArgs.push_back(Args[3]); // OrderFail 5624 break; 5625 case GNUCmpXchg: 5626 APIOrderedArgs.push_back(Args[2]); // Val1 5627 APIOrderedArgs.push_back(Args[4]); // Val2 5628 APIOrderedArgs.push_back(Args[5]); // Weak 5629 APIOrderedArgs.push_back(Args[1]); // Order 5630 APIOrderedArgs.push_back(Args[3]); // OrderFail 5631 break; 5632 } 5633 } else 5634 APIOrderedArgs.append(Args.begin(), Args.end()); 5635 5636 // The first argument's non-CV pointer type is used to deduce the type of 5637 // subsequent arguments, except for: 5638 // - weak flag (always converted to bool) 5639 // - memory order (always converted to int) 5640 // - scope (always converted to int) 5641 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) { 5642 QualType Ty; 5643 if (i < NumVals[Form] + 1) { 5644 switch (i) { 5645 case 0: 5646 // The first argument is always a pointer. It has a fixed type. 5647 // It is always dereferenced, a nullptr is undefined. 5648 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 5649 // Nothing else to do: we already know all we want about this pointer. 5650 continue; 5651 case 1: 5652 // The second argument is the non-atomic operand. For arithmetic, this 5653 // is always passed by value, and for a compare_exchange it is always 5654 // passed by address. For the rest, GNU uses by-address and C11 uses 5655 // by-value. 5656 assert(Form != Load); 5657 if (Form == Arithmetic && ValType->isPointerType()) 5658 Ty = Context.getPointerDiffType(); 5659 else if (Form == Init || Form == Arithmetic) 5660 Ty = ValType; 5661 else if (Form == Copy || Form == Xchg) { 5662 if (IsPassedByAddress) { 5663 // The value pointer is always dereferenced, a nullptr is undefined. 5664 CheckNonNullArgument(*this, APIOrderedArgs[i], 5665 ExprRange.getBegin()); 5666 } 5667 Ty = ByValType; 5668 } else { 5669 Expr *ValArg = APIOrderedArgs[i]; 5670 // The value pointer is always dereferenced, a nullptr is undefined. 5671 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin()); 5672 LangAS AS = LangAS::Default; 5673 // Keep address space of non-atomic pointer type. 5674 if (const PointerType *PtrTy = 5675 ValArg->getType()->getAs<PointerType>()) { 5676 AS = PtrTy->getPointeeType().getAddressSpace(); 5677 } 5678 Ty = Context.getPointerType( 5679 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 5680 } 5681 break; 5682 case 2: 5683 // The third argument to compare_exchange / GNU exchange is the desired 5684 // value, either by-value (for the C11 and *_n variant) or as a pointer. 5685 if (IsPassedByAddress) 5686 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 5687 Ty = ByValType; 5688 break; 5689 case 3: 5690 // The fourth argument to GNU compare_exchange is a 'weak' flag. 5691 Ty = Context.BoolTy; 5692 break; 5693 } 5694 } else { 5695 // The order(s) and scope are always converted to int. 5696 Ty = Context.IntTy; 5697 } 5698 5699 InitializedEntity Entity = 5700 InitializedEntity::InitializeParameter(Context, Ty, false); 5701 ExprResult Arg = APIOrderedArgs[i]; 5702 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5703 if (Arg.isInvalid()) 5704 return true; 5705 APIOrderedArgs[i] = Arg.get(); 5706 } 5707 5708 // Permute the arguments into a 'consistent' order. 5709 SmallVector<Expr*, 5> SubExprs; 5710 SubExprs.push_back(Ptr); 5711 switch (Form) { 5712 case Init: 5713 // Note, AtomicExpr::getVal1() has a special case for this atomic. 5714 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5715 break; 5716 case Load: 5717 SubExprs.push_back(APIOrderedArgs[1]); // Order 5718 break; 5719 case LoadCopy: 5720 case Copy: 5721 case Arithmetic: 5722 case Xchg: 5723 SubExprs.push_back(APIOrderedArgs[2]); // Order 5724 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5725 break; 5726 case GNUXchg: 5727 // Note, AtomicExpr::getVal2() has a special case for this atomic. 5728 SubExprs.push_back(APIOrderedArgs[3]); // Order 5729 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5730 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5731 break; 5732 case C11CmpXchg: 5733 SubExprs.push_back(APIOrderedArgs[3]); // Order 5734 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5735 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail 5736 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5737 break; 5738 case GNUCmpXchg: 5739 SubExprs.push_back(APIOrderedArgs[4]); // Order 5740 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5741 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail 5742 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5743 SubExprs.push_back(APIOrderedArgs[3]); // Weak 5744 break; 5745 } 5746 5747 if (SubExprs.size() >= 2 && Form != Init) { 5748 if (Optional<llvm::APSInt> Result = 5749 SubExprs[1]->getIntegerConstantExpr(Context)) 5750 if (!isValidOrderingForOp(Result->getSExtValue(), Op)) 5751 Diag(SubExprs[1]->getBeginLoc(), 5752 diag::warn_atomic_op_has_invalid_memory_order) 5753 << SubExprs[1]->getSourceRange(); 5754 } 5755 5756 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 5757 auto *Scope = Args[Args.size() - 1]; 5758 if (Optional<llvm::APSInt> Result = 5759 Scope->getIntegerConstantExpr(Context)) { 5760 if (!ScopeModel->isValid(Result->getZExtValue())) 5761 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 5762 << Scope->getSourceRange(); 5763 } 5764 SubExprs.push_back(Scope); 5765 } 5766 5767 AtomicExpr *AE = new (Context) 5768 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc); 5769 5770 if ((Op == AtomicExpr::AO__c11_atomic_load || 5771 Op == AtomicExpr::AO__c11_atomic_store || 5772 Op == AtomicExpr::AO__opencl_atomic_load || 5773 Op == AtomicExpr::AO__opencl_atomic_store ) && 5774 Context.AtomicUsesUnsupportedLibcall(AE)) 5775 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 5776 << ((Op == AtomicExpr::AO__c11_atomic_load || 5777 Op == AtomicExpr::AO__opencl_atomic_load) 5778 ? 0 5779 : 1); 5780 5781 if (ValType->isExtIntType()) { 5782 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_ext_int_prohibit); 5783 return ExprError(); 5784 } 5785 5786 return AE; 5787 } 5788 5789 /// checkBuiltinArgument - Given a call to a builtin function, perform 5790 /// normal type-checking on the given argument, updating the call in 5791 /// place. This is useful when a builtin function requires custom 5792 /// type-checking for some of its arguments but not necessarily all of 5793 /// them. 5794 /// 5795 /// Returns true on error. 5796 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 5797 FunctionDecl *Fn = E->getDirectCallee(); 5798 assert(Fn && "builtin call without direct callee!"); 5799 5800 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 5801 InitializedEntity Entity = 5802 InitializedEntity::InitializeParameter(S.Context, Param); 5803 5804 ExprResult Arg = E->getArg(0); 5805 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 5806 if (Arg.isInvalid()) 5807 return true; 5808 5809 E->setArg(ArgIndex, Arg.get()); 5810 return false; 5811 } 5812 5813 /// We have a call to a function like __sync_fetch_and_add, which is an 5814 /// overloaded function based on the pointer type of its first argument. 5815 /// The main BuildCallExpr routines have already promoted the types of 5816 /// arguments because all of these calls are prototyped as void(...). 5817 /// 5818 /// This function goes through and does final semantic checking for these 5819 /// builtins, as well as generating any warnings. 5820 ExprResult 5821 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 5822 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 5823 Expr *Callee = TheCall->getCallee(); 5824 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 5825 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 5826 5827 // Ensure that we have at least one argument to do type inference from. 5828 if (TheCall->getNumArgs() < 1) { 5829 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 5830 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange(); 5831 return ExprError(); 5832 } 5833 5834 // Inspect the first argument of the atomic builtin. This should always be 5835 // a pointer type, whose element is an integral scalar or pointer type. 5836 // Because it is a pointer type, we don't have to worry about any implicit 5837 // casts here. 5838 // FIXME: We don't allow floating point scalars as input. 5839 Expr *FirstArg = TheCall->getArg(0); 5840 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 5841 if (FirstArgResult.isInvalid()) 5842 return ExprError(); 5843 FirstArg = FirstArgResult.get(); 5844 TheCall->setArg(0, FirstArg); 5845 5846 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 5847 if (!pointerType) { 5848 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 5849 << FirstArg->getType() << FirstArg->getSourceRange(); 5850 return ExprError(); 5851 } 5852 5853 QualType ValType = pointerType->getPointeeType(); 5854 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 5855 !ValType->isBlockPointerType()) { 5856 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 5857 << FirstArg->getType() << FirstArg->getSourceRange(); 5858 return ExprError(); 5859 } 5860 5861 if (ValType.isConstQualified()) { 5862 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 5863 << FirstArg->getType() << FirstArg->getSourceRange(); 5864 return ExprError(); 5865 } 5866 5867 switch (ValType.getObjCLifetime()) { 5868 case Qualifiers::OCL_None: 5869 case Qualifiers::OCL_ExplicitNone: 5870 // okay 5871 break; 5872 5873 case Qualifiers::OCL_Weak: 5874 case Qualifiers::OCL_Strong: 5875 case Qualifiers::OCL_Autoreleasing: 5876 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 5877 << ValType << FirstArg->getSourceRange(); 5878 return ExprError(); 5879 } 5880 5881 // Strip any qualifiers off ValType. 5882 ValType = ValType.getUnqualifiedType(); 5883 5884 // The majority of builtins return a value, but a few have special return 5885 // types, so allow them to override appropriately below. 5886 QualType ResultType = ValType; 5887 5888 // We need to figure out which concrete builtin this maps onto. For example, 5889 // __sync_fetch_and_add with a 2 byte object turns into 5890 // __sync_fetch_and_add_2. 5891 #define BUILTIN_ROW(x) \ 5892 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 5893 Builtin::BI##x##_8, Builtin::BI##x##_16 } 5894 5895 static const unsigned BuiltinIndices[][5] = { 5896 BUILTIN_ROW(__sync_fetch_and_add), 5897 BUILTIN_ROW(__sync_fetch_and_sub), 5898 BUILTIN_ROW(__sync_fetch_and_or), 5899 BUILTIN_ROW(__sync_fetch_and_and), 5900 BUILTIN_ROW(__sync_fetch_and_xor), 5901 BUILTIN_ROW(__sync_fetch_and_nand), 5902 5903 BUILTIN_ROW(__sync_add_and_fetch), 5904 BUILTIN_ROW(__sync_sub_and_fetch), 5905 BUILTIN_ROW(__sync_and_and_fetch), 5906 BUILTIN_ROW(__sync_or_and_fetch), 5907 BUILTIN_ROW(__sync_xor_and_fetch), 5908 BUILTIN_ROW(__sync_nand_and_fetch), 5909 5910 BUILTIN_ROW(__sync_val_compare_and_swap), 5911 BUILTIN_ROW(__sync_bool_compare_and_swap), 5912 BUILTIN_ROW(__sync_lock_test_and_set), 5913 BUILTIN_ROW(__sync_lock_release), 5914 BUILTIN_ROW(__sync_swap) 5915 }; 5916 #undef BUILTIN_ROW 5917 5918 // Determine the index of the size. 5919 unsigned SizeIndex; 5920 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 5921 case 1: SizeIndex = 0; break; 5922 case 2: SizeIndex = 1; break; 5923 case 4: SizeIndex = 2; break; 5924 case 8: SizeIndex = 3; break; 5925 case 16: SizeIndex = 4; break; 5926 default: 5927 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 5928 << FirstArg->getType() << FirstArg->getSourceRange(); 5929 return ExprError(); 5930 } 5931 5932 // Each of these builtins has one pointer argument, followed by some number of 5933 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 5934 // that we ignore. Find out which row of BuiltinIndices to read from as well 5935 // as the number of fixed args. 5936 unsigned BuiltinID = FDecl->getBuiltinID(); 5937 unsigned BuiltinIndex, NumFixed = 1; 5938 bool WarnAboutSemanticsChange = false; 5939 switch (BuiltinID) { 5940 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 5941 case Builtin::BI__sync_fetch_and_add: 5942 case Builtin::BI__sync_fetch_and_add_1: 5943 case Builtin::BI__sync_fetch_and_add_2: 5944 case Builtin::BI__sync_fetch_and_add_4: 5945 case Builtin::BI__sync_fetch_and_add_8: 5946 case Builtin::BI__sync_fetch_and_add_16: 5947 BuiltinIndex = 0; 5948 break; 5949 5950 case Builtin::BI__sync_fetch_and_sub: 5951 case Builtin::BI__sync_fetch_and_sub_1: 5952 case Builtin::BI__sync_fetch_and_sub_2: 5953 case Builtin::BI__sync_fetch_and_sub_4: 5954 case Builtin::BI__sync_fetch_and_sub_8: 5955 case Builtin::BI__sync_fetch_and_sub_16: 5956 BuiltinIndex = 1; 5957 break; 5958 5959 case Builtin::BI__sync_fetch_and_or: 5960 case Builtin::BI__sync_fetch_and_or_1: 5961 case Builtin::BI__sync_fetch_and_or_2: 5962 case Builtin::BI__sync_fetch_and_or_4: 5963 case Builtin::BI__sync_fetch_and_or_8: 5964 case Builtin::BI__sync_fetch_and_or_16: 5965 BuiltinIndex = 2; 5966 break; 5967 5968 case Builtin::BI__sync_fetch_and_and: 5969 case Builtin::BI__sync_fetch_and_and_1: 5970 case Builtin::BI__sync_fetch_and_and_2: 5971 case Builtin::BI__sync_fetch_and_and_4: 5972 case Builtin::BI__sync_fetch_and_and_8: 5973 case Builtin::BI__sync_fetch_and_and_16: 5974 BuiltinIndex = 3; 5975 break; 5976 5977 case Builtin::BI__sync_fetch_and_xor: 5978 case Builtin::BI__sync_fetch_and_xor_1: 5979 case Builtin::BI__sync_fetch_and_xor_2: 5980 case Builtin::BI__sync_fetch_and_xor_4: 5981 case Builtin::BI__sync_fetch_and_xor_8: 5982 case Builtin::BI__sync_fetch_and_xor_16: 5983 BuiltinIndex = 4; 5984 break; 5985 5986 case Builtin::BI__sync_fetch_and_nand: 5987 case Builtin::BI__sync_fetch_and_nand_1: 5988 case Builtin::BI__sync_fetch_and_nand_2: 5989 case Builtin::BI__sync_fetch_and_nand_4: 5990 case Builtin::BI__sync_fetch_and_nand_8: 5991 case Builtin::BI__sync_fetch_and_nand_16: 5992 BuiltinIndex = 5; 5993 WarnAboutSemanticsChange = true; 5994 break; 5995 5996 case Builtin::BI__sync_add_and_fetch: 5997 case Builtin::BI__sync_add_and_fetch_1: 5998 case Builtin::BI__sync_add_and_fetch_2: 5999 case Builtin::BI__sync_add_and_fetch_4: 6000 case Builtin::BI__sync_add_and_fetch_8: 6001 case Builtin::BI__sync_add_and_fetch_16: 6002 BuiltinIndex = 6; 6003 break; 6004 6005 case Builtin::BI__sync_sub_and_fetch: 6006 case Builtin::BI__sync_sub_and_fetch_1: 6007 case Builtin::BI__sync_sub_and_fetch_2: 6008 case Builtin::BI__sync_sub_and_fetch_4: 6009 case Builtin::BI__sync_sub_and_fetch_8: 6010 case Builtin::BI__sync_sub_and_fetch_16: 6011 BuiltinIndex = 7; 6012 break; 6013 6014 case Builtin::BI__sync_and_and_fetch: 6015 case Builtin::BI__sync_and_and_fetch_1: 6016 case Builtin::BI__sync_and_and_fetch_2: 6017 case Builtin::BI__sync_and_and_fetch_4: 6018 case Builtin::BI__sync_and_and_fetch_8: 6019 case Builtin::BI__sync_and_and_fetch_16: 6020 BuiltinIndex = 8; 6021 break; 6022 6023 case Builtin::BI__sync_or_and_fetch: 6024 case Builtin::BI__sync_or_and_fetch_1: 6025 case Builtin::BI__sync_or_and_fetch_2: 6026 case Builtin::BI__sync_or_and_fetch_4: 6027 case Builtin::BI__sync_or_and_fetch_8: 6028 case Builtin::BI__sync_or_and_fetch_16: 6029 BuiltinIndex = 9; 6030 break; 6031 6032 case Builtin::BI__sync_xor_and_fetch: 6033 case Builtin::BI__sync_xor_and_fetch_1: 6034 case Builtin::BI__sync_xor_and_fetch_2: 6035 case Builtin::BI__sync_xor_and_fetch_4: 6036 case Builtin::BI__sync_xor_and_fetch_8: 6037 case Builtin::BI__sync_xor_and_fetch_16: 6038 BuiltinIndex = 10; 6039 break; 6040 6041 case Builtin::BI__sync_nand_and_fetch: 6042 case Builtin::BI__sync_nand_and_fetch_1: 6043 case Builtin::BI__sync_nand_and_fetch_2: 6044 case Builtin::BI__sync_nand_and_fetch_4: 6045 case Builtin::BI__sync_nand_and_fetch_8: 6046 case Builtin::BI__sync_nand_and_fetch_16: 6047 BuiltinIndex = 11; 6048 WarnAboutSemanticsChange = true; 6049 break; 6050 6051 case Builtin::BI__sync_val_compare_and_swap: 6052 case Builtin::BI__sync_val_compare_and_swap_1: 6053 case Builtin::BI__sync_val_compare_and_swap_2: 6054 case Builtin::BI__sync_val_compare_and_swap_4: 6055 case Builtin::BI__sync_val_compare_and_swap_8: 6056 case Builtin::BI__sync_val_compare_and_swap_16: 6057 BuiltinIndex = 12; 6058 NumFixed = 2; 6059 break; 6060 6061 case Builtin::BI__sync_bool_compare_and_swap: 6062 case Builtin::BI__sync_bool_compare_and_swap_1: 6063 case Builtin::BI__sync_bool_compare_and_swap_2: 6064 case Builtin::BI__sync_bool_compare_and_swap_4: 6065 case Builtin::BI__sync_bool_compare_and_swap_8: 6066 case Builtin::BI__sync_bool_compare_and_swap_16: 6067 BuiltinIndex = 13; 6068 NumFixed = 2; 6069 ResultType = Context.BoolTy; 6070 break; 6071 6072 case Builtin::BI__sync_lock_test_and_set: 6073 case Builtin::BI__sync_lock_test_and_set_1: 6074 case Builtin::BI__sync_lock_test_and_set_2: 6075 case Builtin::BI__sync_lock_test_and_set_4: 6076 case Builtin::BI__sync_lock_test_and_set_8: 6077 case Builtin::BI__sync_lock_test_and_set_16: 6078 BuiltinIndex = 14; 6079 break; 6080 6081 case Builtin::BI__sync_lock_release: 6082 case Builtin::BI__sync_lock_release_1: 6083 case Builtin::BI__sync_lock_release_2: 6084 case Builtin::BI__sync_lock_release_4: 6085 case Builtin::BI__sync_lock_release_8: 6086 case Builtin::BI__sync_lock_release_16: 6087 BuiltinIndex = 15; 6088 NumFixed = 0; 6089 ResultType = Context.VoidTy; 6090 break; 6091 6092 case Builtin::BI__sync_swap: 6093 case Builtin::BI__sync_swap_1: 6094 case Builtin::BI__sync_swap_2: 6095 case Builtin::BI__sync_swap_4: 6096 case Builtin::BI__sync_swap_8: 6097 case Builtin::BI__sync_swap_16: 6098 BuiltinIndex = 16; 6099 break; 6100 } 6101 6102 // Now that we know how many fixed arguments we expect, first check that we 6103 // have at least that many. 6104 if (TheCall->getNumArgs() < 1+NumFixed) { 6105 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 6106 << 0 << 1 + NumFixed << TheCall->getNumArgs() 6107 << Callee->getSourceRange(); 6108 return ExprError(); 6109 } 6110 6111 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 6112 << Callee->getSourceRange(); 6113 6114 if (WarnAboutSemanticsChange) { 6115 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 6116 << Callee->getSourceRange(); 6117 } 6118 6119 // Get the decl for the concrete builtin from this, we can tell what the 6120 // concrete integer type we should convert to is. 6121 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 6122 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 6123 FunctionDecl *NewBuiltinDecl; 6124 if (NewBuiltinID == BuiltinID) 6125 NewBuiltinDecl = FDecl; 6126 else { 6127 // Perform builtin lookup to avoid redeclaring it. 6128 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 6129 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 6130 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 6131 assert(Res.getFoundDecl()); 6132 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 6133 if (!NewBuiltinDecl) 6134 return ExprError(); 6135 } 6136 6137 // The first argument --- the pointer --- has a fixed type; we 6138 // deduce the types of the rest of the arguments accordingly. Walk 6139 // the remaining arguments, converting them to the deduced value type. 6140 for (unsigned i = 0; i != NumFixed; ++i) { 6141 ExprResult Arg = TheCall->getArg(i+1); 6142 6143 // GCC does an implicit conversion to the pointer or integer ValType. This 6144 // can fail in some cases (1i -> int**), check for this error case now. 6145 // Initialize the argument. 6146 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 6147 ValType, /*consume*/ false); 6148 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6149 if (Arg.isInvalid()) 6150 return ExprError(); 6151 6152 // Okay, we have something that *can* be converted to the right type. Check 6153 // to see if there is a potentially weird extension going on here. This can 6154 // happen when you do an atomic operation on something like an char* and 6155 // pass in 42. The 42 gets converted to char. This is even more strange 6156 // for things like 45.123 -> char, etc. 6157 // FIXME: Do this check. 6158 TheCall->setArg(i+1, Arg.get()); 6159 } 6160 6161 // Create a new DeclRefExpr to refer to the new decl. 6162 DeclRefExpr *NewDRE = DeclRefExpr::Create( 6163 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, 6164 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, 6165 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse()); 6166 6167 // Set the callee in the CallExpr. 6168 // FIXME: This loses syntactic information. 6169 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 6170 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 6171 CK_BuiltinFnToFnPtr); 6172 TheCall->setCallee(PromotedCall.get()); 6173 6174 // Change the result type of the call to match the original value type. This 6175 // is arbitrary, but the codegen for these builtins ins design to handle it 6176 // gracefully. 6177 TheCall->setType(ResultType); 6178 6179 // Prohibit use of _ExtInt with atomic builtins. 6180 // The arguments would have already been converted to the first argument's 6181 // type, so only need to check the first argument. 6182 const auto *ExtIntValType = ValType->getAs<ExtIntType>(); 6183 if (ExtIntValType && !llvm::isPowerOf2_64(ExtIntValType->getNumBits())) { 6184 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size); 6185 return ExprError(); 6186 } 6187 6188 return TheCallResult; 6189 } 6190 6191 /// SemaBuiltinNontemporalOverloaded - We have a call to 6192 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 6193 /// overloaded function based on the pointer type of its last argument. 6194 /// 6195 /// This function goes through and does final semantic checking for these 6196 /// builtins. 6197 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 6198 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 6199 DeclRefExpr *DRE = 6200 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6201 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6202 unsigned BuiltinID = FDecl->getBuiltinID(); 6203 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 6204 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 6205 "Unexpected nontemporal load/store builtin!"); 6206 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 6207 unsigned numArgs = isStore ? 2 : 1; 6208 6209 // Ensure that we have the proper number of arguments. 6210 if (checkArgCount(*this, TheCall, numArgs)) 6211 return ExprError(); 6212 6213 // Inspect the last argument of the nontemporal builtin. This should always 6214 // be a pointer type, from which we imply the type of the memory access. 6215 // Because it is a pointer type, we don't have to worry about any implicit 6216 // casts here. 6217 Expr *PointerArg = TheCall->getArg(numArgs - 1); 6218 ExprResult PointerArgResult = 6219 DefaultFunctionArrayLvalueConversion(PointerArg); 6220 6221 if (PointerArgResult.isInvalid()) 6222 return ExprError(); 6223 PointerArg = PointerArgResult.get(); 6224 TheCall->setArg(numArgs - 1, PointerArg); 6225 6226 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 6227 if (!pointerType) { 6228 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 6229 << PointerArg->getType() << PointerArg->getSourceRange(); 6230 return ExprError(); 6231 } 6232 6233 QualType ValType = pointerType->getPointeeType(); 6234 6235 // Strip any qualifiers off ValType. 6236 ValType = ValType.getUnqualifiedType(); 6237 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 6238 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 6239 !ValType->isVectorType()) { 6240 Diag(DRE->getBeginLoc(), 6241 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 6242 << PointerArg->getType() << PointerArg->getSourceRange(); 6243 return ExprError(); 6244 } 6245 6246 if (!isStore) { 6247 TheCall->setType(ValType); 6248 return TheCallResult; 6249 } 6250 6251 ExprResult ValArg = TheCall->getArg(0); 6252 InitializedEntity Entity = InitializedEntity::InitializeParameter( 6253 Context, ValType, /*consume*/ false); 6254 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 6255 if (ValArg.isInvalid()) 6256 return ExprError(); 6257 6258 TheCall->setArg(0, ValArg.get()); 6259 TheCall->setType(Context.VoidTy); 6260 return TheCallResult; 6261 } 6262 6263 /// CheckObjCString - Checks that the argument to the builtin 6264 /// CFString constructor is correct 6265 /// Note: It might also make sense to do the UTF-16 conversion here (would 6266 /// simplify the backend). 6267 bool Sema::CheckObjCString(Expr *Arg) { 6268 Arg = Arg->IgnoreParenCasts(); 6269 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 6270 6271 if (!Literal || !Literal->isAscii()) { 6272 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 6273 << Arg->getSourceRange(); 6274 return true; 6275 } 6276 6277 if (Literal->containsNonAsciiOrNull()) { 6278 StringRef String = Literal->getString(); 6279 unsigned NumBytes = String.size(); 6280 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 6281 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 6282 llvm::UTF16 *ToPtr = &ToBuf[0]; 6283 6284 llvm::ConversionResult Result = 6285 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 6286 ToPtr + NumBytes, llvm::strictConversion); 6287 // Check for conversion failure. 6288 if (Result != llvm::conversionOK) 6289 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 6290 << Arg->getSourceRange(); 6291 } 6292 return false; 6293 } 6294 6295 /// CheckObjCString - Checks that the format string argument to the os_log() 6296 /// and os_trace() functions is correct, and converts it to const char *. 6297 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 6298 Arg = Arg->IgnoreParenCasts(); 6299 auto *Literal = dyn_cast<StringLiteral>(Arg); 6300 if (!Literal) { 6301 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 6302 Literal = ObjcLiteral->getString(); 6303 } 6304 } 6305 6306 if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) { 6307 return ExprError( 6308 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 6309 << Arg->getSourceRange()); 6310 } 6311 6312 ExprResult Result(Literal); 6313 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 6314 InitializedEntity Entity = 6315 InitializedEntity::InitializeParameter(Context, ResultTy, false); 6316 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 6317 return Result; 6318 } 6319 6320 /// Check that the user is calling the appropriate va_start builtin for the 6321 /// target and calling convention. 6322 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 6323 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 6324 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 6325 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 || 6326 TT.getArch() == llvm::Triple::aarch64_32); 6327 bool IsWindows = TT.isOSWindows(); 6328 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 6329 if (IsX64 || IsAArch64) { 6330 CallingConv CC = CC_C; 6331 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 6332 CC = FD->getType()->castAs<FunctionType>()->getCallConv(); 6333 if (IsMSVAStart) { 6334 // Don't allow this in System V ABI functions. 6335 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 6336 return S.Diag(Fn->getBeginLoc(), 6337 diag::err_ms_va_start_used_in_sysv_function); 6338 } else { 6339 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 6340 // On x64 Windows, don't allow this in System V ABI functions. 6341 // (Yes, that means there's no corresponding way to support variadic 6342 // System V ABI functions on Windows.) 6343 if ((IsWindows && CC == CC_X86_64SysV) || 6344 (!IsWindows && CC == CC_Win64)) 6345 return S.Diag(Fn->getBeginLoc(), 6346 diag::err_va_start_used_in_wrong_abi_function) 6347 << !IsWindows; 6348 } 6349 return false; 6350 } 6351 6352 if (IsMSVAStart) 6353 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 6354 return false; 6355 } 6356 6357 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 6358 ParmVarDecl **LastParam = nullptr) { 6359 // Determine whether the current function, block, or obj-c method is variadic 6360 // and get its parameter list. 6361 bool IsVariadic = false; 6362 ArrayRef<ParmVarDecl *> Params; 6363 DeclContext *Caller = S.CurContext; 6364 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 6365 IsVariadic = Block->isVariadic(); 6366 Params = Block->parameters(); 6367 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 6368 IsVariadic = FD->isVariadic(); 6369 Params = FD->parameters(); 6370 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 6371 IsVariadic = MD->isVariadic(); 6372 // FIXME: This isn't correct for methods (results in bogus warning). 6373 Params = MD->parameters(); 6374 } else if (isa<CapturedDecl>(Caller)) { 6375 // We don't support va_start in a CapturedDecl. 6376 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 6377 return true; 6378 } else { 6379 // This must be some other declcontext that parses exprs. 6380 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 6381 return true; 6382 } 6383 6384 if (!IsVariadic) { 6385 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 6386 return true; 6387 } 6388 6389 if (LastParam) 6390 *LastParam = Params.empty() ? nullptr : Params.back(); 6391 6392 return false; 6393 } 6394 6395 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 6396 /// for validity. Emit an error and return true on failure; return false 6397 /// on success. 6398 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 6399 Expr *Fn = TheCall->getCallee(); 6400 6401 if (checkVAStartABI(*this, BuiltinID, Fn)) 6402 return true; 6403 6404 if (checkArgCount(*this, TheCall, 2)) 6405 return true; 6406 6407 // Type-check the first argument normally. 6408 if (checkBuiltinArgument(*this, TheCall, 0)) 6409 return true; 6410 6411 // Check that the current function is variadic, and get its last parameter. 6412 ParmVarDecl *LastParam; 6413 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 6414 return true; 6415 6416 // Verify that the second argument to the builtin is the last argument of the 6417 // current function or method. 6418 bool SecondArgIsLastNamedArgument = false; 6419 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 6420 6421 // These are valid if SecondArgIsLastNamedArgument is false after the next 6422 // block. 6423 QualType Type; 6424 SourceLocation ParamLoc; 6425 bool IsCRegister = false; 6426 6427 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 6428 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 6429 SecondArgIsLastNamedArgument = PV == LastParam; 6430 6431 Type = PV->getType(); 6432 ParamLoc = PV->getLocation(); 6433 IsCRegister = 6434 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 6435 } 6436 } 6437 6438 if (!SecondArgIsLastNamedArgument) 6439 Diag(TheCall->getArg(1)->getBeginLoc(), 6440 diag::warn_second_arg_of_va_start_not_last_named_param); 6441 else if (IsCRegister || Type->isReferenceType() || 6442 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 6443 // Promotable integers are UB, but enumerations need a bit of 6444 // extra checking to see what their promotable type actually is. 6445 if (!Type->isPromotableIntegerType()) 6446 return false; 6447 if (!Type->isEnumeralType()) 6448 return true; 6449 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl(); 6450 return !(ED && 6451 Context.typesAreCompatible(ED->getPromotionType(), Type)); 6452 }()) { 6453 unsigned Reason = 0; 6454 if (Type->isReferenceType()) Reason = 1; 6455 else if (IsCRegister) Reason = 2; 6456 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 6457 Diag(ParamLoc, diag::note_parameter_type) << Type; 6458 } 6459 6460 TheCall->setType(Context.VoidTy); 6461 return false; 6462 } 6463 6464 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 6465 auto IsSuitablyTypedFormatArgument = [this](const Expr *Arg) -> bool { 6466 const LangOptions &LO = getLangOpts(); 6467 6468 if (LO.CPlusPlus) 6469 return Arg->getType() 6470 .getCanonicalType() 6471 .getTypePtr() 6472 ->getPointeeType() 6473 .withoutLocalFastQualifiers() == Context.CharTy; 6474 6475 // In C, allow aliasing through `char *`, this is required for AArch64 at 6476 // least. 6477 return true; 6478 }; 6479 6480 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 6481 // const char *named_addr); 6482 6483 Expr *Func = Call->getCallee(); 6484 6485 if (Call->getNumArgs() < 3) 6486 return Diag(Call->getEndLoc(), 6487 diag::err_typecheck_call_too_few_args_at_least) 6488 << 0 /*function call*/ << 3 << Call->getNumArgs(); 6489 6490 // Type-check the first argument normally. 6491 if (checkBuiltinArgument(*this, Call, 0)) 6492 return true; 6493 6494 // Check that the current function is variadic. 6495 if (checkVAStartIsInVariadicFunction(*this, Func)) 6496 return true; 6497 6498 // __va_start on Windows does not validate the parameter qualifiers 6499 6500 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 6501 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 6502 6503 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 6504 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 6505 6506 const QualType &ConstCharPtrTy = 6507 Context.getPointerType(Context.CharTy.withConst()); 6508 if (!Arg1Ty->isPointerType() || !IsSuitablyTypedFormatArgument(Arg1)) 6509 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 6510 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 6511 << 0 /* qualifier difference */ 6512 << 3 /* parameter mismatch */ 6513 << 2 << Arg1->getType() << ConstCharPtrTy; 6514 6515 const QualType SizeTy = Context.getSizeType(); 6516 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 6517 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 6518 << Arg2->getType() << SizeTy << 1 /* different class */ 6519 << 0 /* qualifier difference */ 6520 << 3 /* parameter mismatch */ 6521 << 3 << Arg2->getType() << SizeTy; 6522 6523 return false; 6524 } 6525 6526 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 6527 /// friends. This is declared to take (...), so we have to check everything. 6528 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 6529 if (checkArgCount(*this, TheCall, 2)) 6530 return true; 6531 6532 ExprResult OrigArg0 = TheCall->getArg(0); 6533 ExprResult OrigArg1 = TheCall->getArg(1); 6534 6535 // Do standard promotions between the two arguments, returning their common 6536 // type. 6537 QualType Res = UsualArithmeticConversions( 6538 OrigArg0, OrigArg1, TheCall->getExprLoc(), ACK_Comparison); 6539 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 6540 return true; 6541 6542 // Make sure any conversions are pushed back into the call; this is 6543 // type safe since unordered compare builtins are declared as "_Bool 6544 // foo(...)". 6545 TheCall->setArg(0, OrigArg0.get()); 6546 TheCall->setArg(1, OrigArg1.get()); 6547 6548 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 6549 return false; 6550 6551 // If the common type isn't a real floating type, then the arguments were 6552 // invalid for this operation. 6553 if (Res.isNull() || !Res->isRealFloatingType()) 6554 return Diag(OrigArg0.get()->getBeginLoc(), 6555 diag::err_typecheck_call_invalid_ordered_compare) 6556 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 6557 << SourceRange(OrigArg0.get()->getBeginLoc(), 6558 OrigArg1.get()->getEndLoc()); 6559 6560 return false; 6561 } 6562 6563 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 6564 /// __builtin_isnan and friends. This is declared to take (...), so we have 6565 /// to check everything. We expect the last argument to be a floating point 6566 /// value. 6567 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 6568 if (checkArgCount(*this, TheCall, NumArgs)) 6569 return true; 6570 6571 // __builtin_fpclassify is the only case where NumArgs != 1, so we can count 6572 // on all preceding parameters just being int. Try all of those. 6573 for (unsigned i = 0; i < NumArgs - 1; ++i) { 6574 Expr *Arg = TheCall->getArg(i); 6575 6576 if (Arg->isTypeDependent()) 6577 return false; 6578 6579 ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing); 6580 6581 if (Res.isInvalid()) 6582 return true; 6583 TheCall->setArg(i, Res.get()); 6584 } 6585 6586 Expr *OrigArg = TheCall->getArg(NumArgs-1); 6587 6588 if (OrigArg->isTypeDependent()) 6589 return false; 6590 6591 // Usual Unary Conversions will convert half to float, which we want for 6592 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the 6593 // type how it is, but do normal L->Rvalue conversions. 6594 if (Context.getTargetInfo().useFP16ConversionIntrinsics()) 6595 OrigArg = UsualUnaryConversions(OrigArg).get(); 6596 else 6597 OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get(); 6598 TheCall->setArg(NumArgs - 1, OrigArg); 6599 6600 // This operation requires a non-_Complex floating-point number. 6601 if (!OrigArg->getType()->isRealFloatingType()) 6602 return Diag(OrigArg->getBeginLoc(), 6603 diag::err_typecheck_call_invalid_unary_fp) 6604 << OrigArg->getType() << OrigArg->getSourceRange(); 6605 6606 return false; 6607 } 6608 6609 /// Perform semantic analysis for a call to __builtin_complex. 6610 bool Sema::SemaBuiltinComplex(CallExpr *TheCall) { 6611 if (checkArgCount(*this, TheCall, 2)) 6612 return true; 6613 6614 bool Dependent = false; 6615 for (unsigned I = 0; I != 2; ++I) { 6616 Expr *Arg = TheCall->getArg(I); 6617 QualType T = Arg->getType(); 6618 if (T->isDependentType()) { 6619 Dependent = true; 6620 continue; 6621 } 6622 6623 // Despite supporting _Complex int, GCC requires a real floating point type 6624 // for the operands of __builtin_complex. 6625 if (!T->isRealFloatingType()) { 6626 return Diag(Arg->getBeginLoc(), diag::err_typecheck_call_requires_real_fp) 6627 << Arg->getType() << Arg->getSourceRange(); 6628 } 6629 6630 ExprResult Converted = DefaultLvalueConversion(Arg); 6631 if (Converted.isInvalid()) 6632 return true; 6633 TheCall->setArg(I, Converted.get()); 6634 } 6635 6636 if (Dependent) { 6637 TheCall->setType(Context.DependentTy); 6638 return false; 6639 } 6640 6641 Expr *Real = TheCall->getArg(0); 6642 Expr *Imag = TheCall->getArg(1); 6643 if (!Context.hasSameType(Real->getType(), Imag->getType())) { 6644 return Diag(Real->getBeginLoc(), 6645 diag::err_typecheck_call_different_arg_types) 6646 << Real->getType() << Imag->getType() 6647 << Real->getSourceRange() << Imag->getSourceRange(); 6648 } 6649 6650 // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers; 6651 // don't allow this builtin to form those types either. 6652 // FIXME: Should we allow these types? 6653 if (Real->getType()->isFloat16Type()) 6654 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 6655 << "_Float16"; 6656 if (Real->getType()->isHalfType()) 6657 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 6658 << "half"; 6659 6660 TheCall->setType(Context.getComplexType(Real->getType())); 6661 return false; 6662 } 6663 6664 // Customized Sema Checking for VSX builtins that have the following signature: 6665 // vector [...] builtinName(vector [...], vector [...], const int); 6666 // Which takes the same type of vectors (any legal vector type) for the first 6667 // two arguments and takes compile time constant for the third argument. 6668 // Example builtins are : 6669 // vector double vec_xxpermdi(vector double, vector double, int); 6670 // vector short vec_xxsldwi(vector short, vector short, int); 6671 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 6672 unsigned ExpectedNumArgs = 3; 6673 if (checkArgCount(*this, TheCall, ExpectedNumArgs)) 6674 return true; 6675 6676 // Check the third argument is a compile time constant 6677 if (!TheCall->getArg(2)->isIntegerConstantExpr(Context)) 6678 return Diag(TheCall->getBeginLoc(), 6679 diag::err_vsx_builtin_nonconstant_argument) 6680 << 3 /* argument index */ << TheCall->getDirectCallee() 6681 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 6682 TheCall->getArg(2)->getEndLoc()); 6683 6684 QualType Arg1Ty = TheCall->getArg(0)->getType(); 6685 QualType Arg2Ty = TheCall->getArg(1)->getType(); 6686 6687 // Check the type of argument 1 and argument 2 are vectors. 6688 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 6689 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 6690 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 6691 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 6692 << TheCall->getDirectCallee() 6693 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6694 TheCall->getArg(1)->getEndLoc()); 6695 } 6696 6697 // Check the first two arguments are the same type. 6698 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 6699 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 6700 << TheCall->getDirectCallee() 6701 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6702 TheCall->getArg(1)->getEndLoc()); 6703 } 6704 6705 // When default clang type checking is turned off and the customized type 6706 // checking is used, the returning type of the function must be explicitly 6707 // set. Otherwise it is _Bool by default. 6708 TheCall->setType(Arg1Ty); 6709 6710 return false; 6711 } 6712 6713 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 6714 // This is declared to take (...), so we have to check everything. 6715 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 6716 if (TheCall->getNumArgs() < 2) 6717 return ExprError(Diag(TheCall->getEndLoc(), 6718 diag::err_typecheck_call_too_few_args_at_least) 6719 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 6720 << TheCall->getSourceRange()); 6721 6722 // Determine which of the following types of shufflevector we're checking: 6723 // 1) unary, vector mask: (lhs, mask) 6724 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 6725 QualType resType = TheCall->getArg(0)->getType(); 6726 unsigned numElements = 0; 6727 6728 if (!TheCall->getArg(0)->isTypeDependent() && 6729 !TheCall->getArg(1)->isTypeDependent()) { 6730 QualType LHSType = TheCall->getArg(0)->getType(); 6731 QualType RHSType = TheCall->getArg(1)->getType(); 6732 6733 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 6734 return ExprError( 6735 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 6736 << TheCall->getDirectCallee() 6737 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6738 TheCall->getArg(1)->getEndLoc())); 6739 6740 numElements = LHSType->castAs<VectorType>()->getNumElements(); 6741 unsigned numResElements = TheCall->getNumArgs() - 2; 6742 6743 // Check to see if we have a call with 2 vector arguments, the unary shuffle 6744 // with mask. If so, verify that RHS is an integer vector type with the 6745 // same number of elts as lhs. 6746 if (TheCall->getNumArgs() == 2) { 6747 if (!RHSType->hasIntegerRepresentation() || 6748 RHSType->castAs<VectorType>()->getNumElements() != numElements) 6749 return ExprError(Diag(TheCall->getBeginLoc(), 6750 diag::err_vec_builtin_incompatible_vector) 6751 << TheCall->getDirectCallee() 6752 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 6753 TheCall->getArg(1)->getEndLoc())); 6754 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 6755 return ExprError(Diag(TheCall->getBeginLoc(), 6756 diag::err_vec_builtin_incompatible_vector) 6757 << TheCall->getDirectCallee() 6758 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6759 TheCall->getArg(1)->getEndLoc())); 6760 } else if (numElements != numResElements) { 6761 QualType eltType = LHSType->castAs<VectorType>()->getElementType(); 6762 resType = Context.getVectorType(eltType, numResElements, 6763 VectorType::GenericVector); 6764 } 6765 } 6766 6767 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 6768 if (TheCall->getArg(i)->isTypeDependent() || 6769 TheCall->getArg(i)->isValueDependent()) 6770 continue; 6771 6772 Optional<llvm::APSInt> Result; 6773 if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context))) 6774 return ExprError(Diag(TheCall->getBeginLoc(), 6775 diag::err_shufflevector_nonconstant_argument) 6776 << TheCall->getArg(i)->getSourceRange()); 6777 6778 // Allow -1 which will be translated to undef in the IR. 6779 if (Result->isSigned() && Result->isAllOnesValue()) 6780 continue; 6781 6782 if (Result->getActiveBits() > 64 || 6783 Result->getZExtValue() >= numElements * 2) 6784 return ExprError(Diag(TheCall->getBeginLoc(), 6785 diag::err_shufflevector_argument_too_large) 6786 << TheCall->getArg(i)->getSourceRange()); 6787 } 6788 6789 SmallVector<Expr*, 32> exprs; 6790 6791 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 6792 exprs.push_back(TheCall->getArg(i)); 6793 TheCall->setArg(i, nullptr); 6794 } 6795 6796 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 6797 TheCall->getCallee()->getBeginLoc(), 6798 TheCall->getRParenLoc()); 6799 } 6800 6801 /// SemaConvertVectorExpr - Handle __builtin_convertvector 6802 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 6803 SourceLocation BuiltinLoc, 6804 SourceLocation RParenLoc) { 6805 ExprValueKind VK = VK_PRValue; 6806 ExprObjectKind OK = OK_Ordinary; 6807 QualType DstTy = TInfo->getType(); 6808 QualType SrcTy = E->getType(); 6809 6810 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 6811 return ExprError(Diag(BuiltinLoc, 6812 diag::err_convertvector_non_vector) 6813 << E->getSourceRange()); 6814 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 6815 return ExprError(Diag(BuiltinLoc, 6816 diag::err_convertvector_non_vector_type)); 6817 6818 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 6819 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements(); 6820 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements(); 6821 if (SrcElts != DstElts) 6822 return ExprError(Diag(BuiltinLoc, 6823 diag::err_convertvector_incompatible_vector) 6824 << E->getSourceRange()); 6825 } 6826 6827 return new (Context) 6828 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 6829 } 6830 6831 /// SemaBuiltinPrefetch - Handle __builtin_prefetch. 6832 // This is declared to take (const void*, ...) and can take two 6833 // optional constant int args. 6834 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 6835 unsigned NumArgs = TheCall->getNumArgs(); 6836 6837 if (NumArgs > 3) 6838 return Diag(TheCall->getEndLoc(), 6839 diag::err_typecheck_call_too_many_args_at_most) 6840 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 6841 6842 // Argument 0 is checked for us and the remaining arguments must be 6843 // constant integers. 6844 for (unsigned i = 1; i != NumArgs; ++i) 6845 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 6846 return true; 6847 6848 return false; 6849 } 6850 6851 /// SemaBuiltinArithmeticFence - Handle __arithmetic_fence. 6852 bool Sema::SemaBuiltinArithmeticFence(CallExpr *TheCall) { 6853 if (!Context.getTargetInfo().checkArithmeticFenceSupported()) 6854 return Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 6855 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 6856 if (checkArgCount(*this, TheCall, 1)) 6857 return true; 6858 Expr *Arg = TheCall->getArg(0); 6859 if (Arg->isInstantiationDependent()) 6860 return false; 6861 6862 QualType ArgTy = Arg->getType(); 6863 if (!ArgTy->hasFloatingRepresentation()) 6864 return Diag(TheCall->getEndLoc(), diag::err_typecheck_expect_flt_or_vector) 6865 << ArgTy; 6866 if (Arg->isLValue()) { 6867 ExprResult FirstArg = DefaultLvalueConversion(Arg); 6868 TheCall->setArg(0, FirstArg.get()); 6869 } 6870 TheCall->setType(TheCall->getArg(0)->getType()); 6871 return false; 6872 } 6873 6874 /// SemaBuiltinAssume - Handle __assume (MS Extension). 6875 // __assume does not evaluate its arguments, and should warn if its argument 6876 // has side effects. 6877 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 6878 Expr *Arg = TheCall->getArg(0); 6879 if (Arg->isInstantiationDependent()) return false; 6880 6881 if (Arg->HasSideEffects(Context)) 6882 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 6883 << Arg->getSourceRange() 6884 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 6885 6886 return false; 6887 } 6888 6889 /// Handle __builtin_alloca_with_align. This is declared 6890 /// as (size_t, size_t) where the second size_t must be a power of 2 greater 6891 /// than 8. 6892 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 6893 // The alignment must be a constant integer. 6894 Expr *Arg = TheCall->getArg(1); 6895 6896 // We can't check the value of a dependent argument. 6897 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 6898 if (const auto *UE = 6899 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 6900 if (UE->getKind() == UETT_AlignOf || 6901 UE->getKind() == UETT_PreferredAlignOf) 6902 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 6903 << Arg->getSourceRange(); 6904 6905 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 6906 6907 if (!Result.isPowerOf2()) 6908 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 6909 << Arg->getSourceRange(); 6910 6911 if (Result < Context.getCharWidth()) 6912 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 6913 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 6914 6915 if (Result > std::numeric_limits<int32_t>::max()) 6916 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 6917 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 6918 } 6919 6920 return false; 6921 } 6922 6923 /// Handle __builtin_assume_aligned. This is declared 6924 /// as (const void*, size_t, ...) and can take one optional constant int arg. 6925 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 6926 unsigned NumArgs = TheCall->getNumArgs(); 6927 6928 if (NumArgs > 3) 6929 return Diag(TheCall->getEndLoc(), 6930 diag::err_typecheck_call_too_many_args_at_most) 6931 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 6932 6933 // The alignment must be a constant integer. 6934 Expr *Arg = TheCall->getArg(1); 6935 6936 // We can't check the value of a dependent argument. 6937 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 6938 llvm::APSInt Result; 6939 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 6940 return true; 6941 6942 if (!Result.isPowerOf2()) 6943 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 6944 << Arg->getSourceRange(); 6945 6946 if (Result > Sema::MaximumAlignment) 6947 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great) 6948 << Arg->getSourceRange() << Sema::MaximumAlignment; 6949 } 6950 6951 if (NumArgs > 2) { 6952 ExprResult Arg(TheCall->getArg(2)); 6953 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 6954 Context.getSizeType(), false); 6955 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6956 if (Arg.isInvalid()) return true; 6957 TheCall->setArg(2, Arg.get()); 6958 } 6959 6960 return false; 6961 } 6962 6963 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 6964 unsigned BuiltinID = 6965 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 6966 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 6967 6968 unsigned NumArgs = TheCall->getNumArgs(); 6969 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 6970 if (NumArgs < NumRequiredArgs) { 6971 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 6972 << 0 /* function call */ << NumRequiredArgs << NumArgs 6973 << TheCall->getSourceRange(); 6974 } 6975 if (NumArgs >= NumRequiredArgs + 0x100) { 6976 return Diag(TheCall->getEndLoc(), 6977 diag::err_typecheck_call_too_many_args_at_most) 6978 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 6979 << TheCall->getSourceRange(); 6980 } 6981 unsigned i = 0; 6982 6983 // For formatting call, check buffer arg. 6984 if (!IsSizeCall) { 6985 ExprResult Arg(TheCall->getArg(i)); 6986 InitializedEntity Entity = InitializedEntity::InitializeParameter( 6987 Context, Context.VoidPtrTy, false); 6988 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6989 if (Arg.isInvalid()) 6990 return true; 6991 TheCall->setArg(i, Arg.get()); 6992 i++; 6993 } 6994 6995 // Check string literal arg. 6996 unsigned FormatIdx = i; 6997 { 6998 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 6999 if (Arg.isInvalid()) 7000 return true; 7001 TheCall->setArg(i, Arg.get()); 7002 i++; 7003 } 7004 7005 // Make sure variadic args are scalar. 7006 unsigned FirstDataArg = i; 7007 while (i < NumArgs) { 7008 ExprResult Arg = DefaultVariadicArgumentPromotion( 7009 TheCall->getArg(i), VariadicFunction, nullptr); 7010 if (Arg.isInvalid()) 7011 return true; 7012 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 7013 if (ArgSize.getQuantity() >= 0x100) { 7014 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 7015 << i << (int)ArgSize.getQuantity() << 0xff 7016 << TheCall->getSourceRange(); 7017 } 7018 TheCall->setArg(i, Arg.get()); 7019 i++; 7020 } 7021 7022 // Check formatting specifiers. NOTE: We're only doing this for the non-size 7023 // call to avoid duplicate diagnostics. 7024 if (!IsSizeCall) { 7025 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 7026 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 7027 bool Success = CheckFormatArguments( 7028 Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog, 7029 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 7030 CheckedVarArgs); 7031 if (!Success) 7032 return true; 7033 } 7034 7035 if (IsSizeCall) { 7036 TheCall->setType(Context.getSizeType()); 7037 } else { 7038 TheCall->setType(Context.VoidPtrTy); 7039 } 7040 return false; 7041 } 7042 7043 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 7044 /// TheCall is a constant expression. 7045 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 7046 llvm::APSInt &Result) { 7047 Expr *Arg = TheCall->getArg(ArgNum); 7048 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 7049 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 7050 7051 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 7052 7053 Optional<llvm::APSInt> R; 7054 if (!(R = Arg->getIntegerConstantExpr(Context))) 7055 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 7056 << FDecl->getDeclName() << Arg->getSourceRange(); 7057 Result = *R; 7058 return false; 7059 } 7060 7061 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 7062 /// TheCall is a constant expression in the range [Low, High]. 7063 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 7064 int Low, int High, bool RangeIsError) { 7065 if (isConstantEvaluated()) 7066 return false; 7067 llvm::APSInt Result; 7068 7069 // We can't check the value of a dependent argument. 7070 Expr *Arg = TheCall->getArg(ArgNum); 7071 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7072 return false; 7073 7074 // Check constant-ness first. 7075 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7076 return true; 7077 7078 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 7079 if (RangeIsError) 7080 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 7081 << toString(Result, 10) << Low << High << Arg->getSourceRange(); 7082 else 7083 // Defer the warning until we know if the code will be emitted so that 7084 // dead code can ignore this. 7085 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 7086 PDiag(diag::warn_argument_invalid_range) 7087 << toString(Result, 10) << Low << High 7088 << Arg->getSourceRange()); 7089 } 7090 7091 return false; 7092 } 7093 7094 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 7095 /// TheCall is a constant expression is a multiple of Num.. 7096 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 7097 unsigned Num) { 7098 llvm::APSInt Result; 7099 7100 // We can't check the value of a dependent argument. 7101 Expr *Arg = TheCall->getArg(ArgNum); 7102 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7103 return false; 7104 7105 // Check constant-ness first. 7106 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7107 return true; 7108 7109 if (Result.getSExtValue() % Num != 0) 7110 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 7111 << Num << Arg->getSourceRange(); 7112 7113 return false; 7114 } 7115 7116 /// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a 7117 /// constant expression representing a power of 2. 7118 bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) { 7119 llvm::APSInt Result; 7120 7121 // We can't check the value of a dependent argument. 7122 Expr *Arg = TheCall->getArg(ArgNum); 7123 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7124 return false; 7125 7126 // Check constant-ness first. 7127 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7128 return true; 7129 7130 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if 7131 // and only if x is a power of 2. 7132 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0) 7133 return false; 7134 7135 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2) 7136 << Arg->getSourceRange(); 7137 } 7138 7139 static bool IsShiftedByte(llvm::APSInt Value) { 7140 if (Value.isNegative()) 7141 return false; 7142 7143 // Check if it's a shifted byte, by shifting it down 7144 while (true) { 7145 // If the value fits in the bottom byte, the check passes. 7146 if (Value < 0x100) 7147 return true; 7148 7149 // Otherwise, if the value has _any_ bits in the bottom byte, the check 7150 // fails. 7151 if ((Value & 0xFF) != 0) 7152 return false; 7153 7154 // If the bottom 8 bits are all 0, but something above that is nonzero, 7155 // then shifting the value right by 8 bits won't affect whether it's a 7156 // shifted byte or not. So do that, and go round again. 7157 Value >>= 8; 7158 } 7159 } 7160 7161 /// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is 7162 /// a constant expression representing an arbitrary byte value shifted left by 7163 /// a multiple of 8 bits. 7164 bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, 7165 unsigned ArgBits) { 7166 llvm::APSInt Result; 7167 7168 // We can't check the value of a dependent argument. 7169 Expr *Arg = TheCall->getArg(ArgNum); 7170 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7171 return false; 7172 7173 // Check constant-ness first. 7174 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7175 return true; 7176 7177 // Truncate to the given size. 7178 Result = Result.getLoBits(ArgBits); 7179 Result.setIsUnsigned(true); 7180 7181 if (IsShiftedByte(Result)) 7182 return false; 7183 7184 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte) 7185 << Arg->getSourceRange(); 7186 } 7187 7188 /// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of 7189 /// TheCall is a constant expression representing either a shifted byte value, 7190 /// or a value of the form 0x??FF (i.e. a member of the arithmetic progression 7191 /// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some 7192 /// Arm MVE intrinsics. 7193 bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, 7194 int ArgNum, 7195 unsigned ArgBits) { 7196 llvm::APSInt Result; 7197 7198 // We can't check the value of a dependent argument. 7199 Expr *Arg = TheCall->getArg(ArgNum); 7200 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7201 return false; 7202 7203 // Check constant-ness first. 7204 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7205 return true; 7206 7207 // Truncate to the given size. 7208 Result = Result.getLoBits(ArgBits); 7209 Result.setIsUnsigned(true); 7210 7211 // Check to see if it's in either of the required forms. 7212 if (IsShiftedByte(Result) || 7213 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF)) 7214 return false; 7215 7216 return Diag(TheCall->getBeginLoc(), 7217 diag::err_argument_not_shifted_byte_or_xxff) 7218 << Arg->getSourceRange(); 7219 } 7220 7221 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 7222 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { 7223 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 7224 if (checkArgCount(*this, TheCall, 2)) 7225 return true; 7226 Expr *Arg0 = TheCall->getArg(0); 7227 Expr *Arg1 = TheCall->getArg(1); 7228 7229 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7230 if (FirstArg.isInvalid()) 7231 return true; 7232 QualType FirstArgType = FirstArg.get()->getType(); 7233 if (!FirstArgType->isAnyPointerType()) 7234 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7235 << "first" << FirstArgType << Arg0->getSourceRange(); 7236 TheCall->setArg(0, FirstArg.get()); 7237 7238 ExprResult SecArg = DefaultLvalueConversion(Arg1); 7239 if (SecArg.isInvalid()) 7240 return true; 7241 QualType SecArgType = SecArg.get()->getType(); 7242 if (!SecArgType->isIntegerType()) 7243 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 7244 << "second" << SecArgType << Arg1->getSourceRange(); 7245 7246 // Derive the return type from the pointer argument. 7247 TheCall->setType(FirstArgType); 7248 return false; 7249 } 7250 7251 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 7252 if (checkArgCount(*this, TheCall, 2)) 7253 return true; 7254 7255 Expr *Arg0 = TheCall->getArg(0); 7256 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7257 if (FirstArg.isInvalid()) 7258 return true; 7259 QualType FirstArgType = FirstArg.get()->getType(); 7260 if (!FirstArgType->isAnyPointerType()) 7261 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7262 << "first" << FirstArgType << Arg0->getSourceRange(); 7263 TheCall->setArg(0, FirstArg.get()); 7264 7265 // Derive the return type from the pointer argument. 7266 TheCall->setType(FirstArgType); 7267 7268 // Second arg must be an constant in range [0,15] 7269 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 7270 } 7271 7272 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 7273 if (checkArgCount(*this, TheCall, 2)) 7274 return true; 7275 Expr *Arg0 = TheCall->getArg(0); 7276 Expr *Arg1 = TheCall->getArg(1); 7277 7278 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7279 if (FirstArg.isInvalid()) 7280 return true; 7281 QualType FirstArgType = FirstArg.get()->getType(); 7282 if (!FirstArgType->isAnyPointerType()) 7283 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7284 << "first" << FirstArgType << Arg0->getSourceRange(); 7285 7286 QualType SecArgType = Arg1->getType(); 7287 if (!SecArgType->isIntegerType()) 7288 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 7289 << "second" << SecArgType << Arg1->getSourceRange(); 7290 TheCall->setType(Context.IntTy); 7291 return false; 7292 } 7293 7294 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 7295 BuiltinID == AArch64::BI__builtin_arm_stg) { 7296 if (checkArgCount(*this, TheCall, 1)) 7297 return true; 7298 Expr *Arg0 = TheCall->getArg(0); 7299 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7300 if (FirstArg.isInvalid()) 7301 return true; 7302 7303 QualType FirstArgType = FirstArg.get()->getType(); 7304 if (!FirstArgType->isAnyPointerType()) 7305 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7306 << "first" << FirstArgType << Arg0->getSourceRange(); 7307 TheCall->setArg(0, FirstArg.get()); 7308 7309 // Derive the return type from the pointer argument. 7310 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 7311 TheCall->setType(FirstArgType); 7312 return false; 7313 } 7314 7315 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 7316 Expr *ArgA = TheCall->getArg(0); 7317 Expr *ArgB = TheCall->getArg(1); 7318 7319 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA); 7320 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB); 7321 7322 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 7323 return true; 7324 7325 QualType ArgTypeA = ArgExprA.get()->getType(); 7326 QualType ArgTypeB = ArgExprB.get()->getType(); 7327 7328 auto isNull = [&] (Expr *E) -> bool { 7329 return E->isNullPointerConstant( 7330 Context, Expr::NPC_ValueDependentIsNotNull); }; 7331 7332 // argument should be either a pointer or null 7333 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 7334 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 7335 << "first" << ArgTypeA << ArgA->getSourceRange(); 7336 7337 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 7338 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 7339 << "second" << ArgTypeB << ArgB->getSourceRange(); 7340 7341 // Ensure Pointee types are compatible 7342 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 7343 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 7344 QualType pointeeA = ArgTypeA->getPointeeType(); 7345 QualType pointeeB = ArgTypeB->getPointeeType(); 7346 if (!Context.typesAreCompatible( 7347 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 7348 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 7349 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) 7350 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 7351 << ArgB->getSourceRange(); 7352 } 7353 } 7354 7355 // at least one argument should be pointer type 7356 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 7357 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 7358 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 7359 7360 if (isNull(ArgA)) // adopt type of the other pointer 7361 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 7362 7363 if (isNull(ArgB)) 7364 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 7365 7366 TheCall->setArg(0, ArgExprA.get()); 7367 TheCall->setArg(1, ArgExprB.get()); 7368 TheCall->setType(Context.LongLongTy); 7369 return false; 7370 } 7371 assert(false && "Unhandled ARM MTE intrinsic"); 7372 return true; 7373 } 7374 7375 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 7376 /// TheCall is an ARM/AArch64 special register string literal. 7377 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 7378 int ArgNum, unsigned ExpectedFieldNum, 7379 bool AllowName) { 7380 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 7381 BuiltinID == ARM::BI__builtin_arm_wsr64 || 7382 BuiltinID == ARM::BI__builtin_arm_rsr || 7383 BuiltinID == ARM::BI__builtin_arm_rsrp || 7384 BuiltinID == ARM::BI__builtin_arm_wsr || 7385 BuiltinID == ARM::BI__builtin_arm_wsrp; 7386 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 7387 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 7388 BuiltinID == AArch64::BI__builtin_arm_rsr || 7389 BuiltinID == AArch64::BI__builtin_arm_rsrp || 7390 BuiltinID == AArch64::BI__builtin_arm_wsr || 7391 BuiltinID == AArch64::BI__builtin_arm_wsrp; 7392 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 7393 7394 // We can't check the value of a dependent argument. 7395 Expr *Arg = TheCall->getArg(ArgNum); 7396 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7397 return false; 7398 7399 // Check if the argument is a string literal. 7400 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 7401 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 7402 << Arg->getSourceRange(); 7403 7404 // Check the type of special register given. 7405 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 7406 SmallVector<StringRef, 6> Fields; 7407 Reg.split(Fields, ":"); 7408 7409 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 7410 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 7411 << Arg->getSourceRange(); 7412 7413 // If the string is the name of a register then we cannot check that it is 7414 // valid here but if the string is of one the forms described in ACLE then we 7415 // can check that the supplied fields are integers and within the valid 7416 // ranges. 7417 if (Fields.size() > 1) { 7418 bool FiveFields = Fields.size() == 5; 7419 7420 bool ValidString = true; 7421 if (IsARMBuiltin) { 7422 ValidString &= Fields[0].startswith_insensitive("cp") || 7423 Fields[0].startswith_insensitive("p"); 7424 if (ValidString) 7425 Fields[0] = Fields[0].drop_front( 7426 Fields[0].startswith_insensitive("cp") ? 2 : 1); 7427 7428 ValidString &= Fields[2].startswith_insensitive("c"); 7429 if (ValidString) 7430 Fields[2] = Fields[2].drop_front(1); 7431 7432 if (FiveFields) { 7433 ValidString &= Fields[3].startswith_insensitive("c"); 7434 if (ValidString) 7435 Fields[3] = Fields[3].drop_front(1); 7436 } 7437 } 7438 7439 SmallVector<int, 5> Ranges; 7440 if (FiveFields) 7441 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 7442 else 7443 Ranges.append({15, 7, 15}); 7444 7445 for (unsigned i=0; i<Fields.size(); ++i) { 7446 int IntField; 7447 ValidString &= !Fields[i].getAsInteger(10, IntField); 7448 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 7449 } 7450 7451 if (!ValidString) 7452 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 7453 << Arg->getSourceRange(); 7454 } else if (IsAArch64Builtin && Fields.size() == 1) { 7455 // If the register name is one of those that appear in the condition below 7456 // and the special register builtin being used is one of the write builtins, 7457 // then we require that the argument provided for writing to the register 7458 // is an integer constant expression. This is because it will be lowered to 7459 // an MSR (immediate) instruction, so we need to know the immediate at 7460 // compile time. 7461 if (TheCall->getNumArgs() != 2) 7462 return false; 7463 7464 std::string RegLower = Reg.lower(); 7465 if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" && 7466 RegLower != "pan" && RegLower != "uao") 7467 return false; 7468 7469 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 7470 } 7471 7472 return false; 7473 } 7474 7475 /// SemaBuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity. 7476 /// Emit an error and return true on failure; return false on success. 7477 /// TypeStr is a string containing the type descriptor of the value returned by 7478 /// the builtin and the descriptors of the expected type of the arguments. 7479 bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, const char *TypeStr) { 7480 7481 assert((TypeStr[0] != '\0') && 7482 "Invalid types in PPC MMA builtin declaration"); 7483 7484 unsigned Mask = 0; 7485 unsigned ArgNum = 0; 7486 7487 // The first type in TypeStr is the type of the value returned by the 7488 // builtin. So we first read that type and change the type of TheCall. 7489 QualType type = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 7490 TheCall->setType(type); 7491 7492 while (*TypeStr != '\0') { 7493 Mask = 0; 7494 QualType ExpectedType = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 7495 if (ArgNum >= TheCall->getNumArgs()) { 7496 ArgNum++; 7497 break; 7498 } 7499 7500 Expr *Arg = TheCall->getArg(ArgNum); 7501 QualType ArgType = Arg->getType(); 7502 7503 if ((ExpectedType->isVoidPointerType() && !ArgType->isPointerType()) || 7504 (!ExpectedType->isVoidPointerType() && 7505 ArgType.getCanonicalType() != ExpectedType)) 7506 return Diag(Arg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 7507 << ArgType << ExpectedType << 1 << 0 << 0; 7508 7509 // If the value of the Mask is not 0, we have a constraint in the size of 7510 // the integer argument so here we ensure the argument is a constant that 7511 // is in the valid range. 7512 if (Mask != 0 && 7513 SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true)) 7514 return true; 7515 7516 ArgNum++; 7517 } 7518 7519 // In case we exited early from the previous loop, there are other types to 7520 // read from TypeStr. So we need to read them all to ensure we have the right 7521 // number of arguments in TheCall and if it is not the case, to display a 7522 // better error message. 7523 while (*TypeStr != '\0') { 7524 (void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 7525 ArgNum++; 7526 } 7527 if (checkArgCount(*this, TheCall, ArgNum)) 7528 return true; 7529 7530 return false; 7531 } 7532 7533 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 7534 /// This checks that the target supports __builtin_longjmp and 7535 /// that val is a constant 1. 7536 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 7537 if (!Context.getTargetInfo().hasSjLjLowering()) 7538 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 7539 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 7540 7541 Expr *Arg = TheCall->getArg(1); 7542 llvm::APSInt Result; 7543 7544 // TODO: This is less than ideal. Overload this to take a value. 7545 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 7546 return true; 7547 7548 if (Result != 1) 7549 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 7550 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 7551 7552 return false; 7553 } 7554 7555 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 7556 /// This checks that the target supports __builtin_setjmp. 7557 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 7558 if (!Context.getTargetInfo().hasSjLjLowering()) 7559 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 7560 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 7561 return false; 7562 } 7563 7564 namespace { 7565 7566 class UncoveredArgHandler { 7567 enum { Unknown = -1, AllCovered = -2 }; 7568 7569 signed FirstUncoveredArg = Unknown; 7570 SmallVector<const Expr *, 4> DiagnosticExprs; 7571 7572 public: 7573 UncoveredArgHandler() = default; 7574 7575 bool hasUncoveredArg() const { 7576 return (FirstUncoveredArg >= 0); 7577 } 7578 7579 unsigned getUncoveredArg() const { 7580 assert(hasUncoveredArg() && "no uncovered argument"); 7581 return FirstUncoveredArg; 7582 } 7583 7584 void setAllCovered() { 7585 // A string has been found with all arguments covered, so clear out 7586 // the diagnostics. 7587 DiagnosticExprs.clear(); 7588 FirstUncoveredArg = AllCovered; 7589 } 7590 7591 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 7592 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 7593 7594 // Don't update if a previous string covers all arguments. 7595 if (FirstUncoveredArg == AllCovered) 7596 return; 7597 7598 // UncoveredArgHandler tracks the highest uncovered argument index 7599 // and with it all the strings that match this index. 7600 if (NewFirstUncoveredArg == FirstUncoveredArg) 7601 DiagnosticExprs.push_back(StrExpr); 7602 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 7603 DiagnosticExprs.clear(); 7604 DiagnosticExprs.push_back(StrExpr); 7605 FirstUncoveredArg = NewFirstUncoveredArg; 7606 } 7607 } 7608 7609 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 7610 }; 7611 7612 enum StringLiteralCheckType { 7613 SLCT_NotALiteral, 7614 SLCT_UncheckedLiteral, 7615 SLCT_CheckedLiteral 7616 }; 7617 7618 } // namespace 7619 7620 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 7621 BinaryOperatorKind BinOpKind, 7622 bool AddendIsRight) { 7623 unsigned BitWidth = Offset.getBitWidth(); 7624 unsigned AddendBitWidth = Addend.getBitWidth(); 7625 // There might be negative interim results. 7626 if (Addend.isUnsigned()) { 7627 Addend = Addend.zext(++AddendBitWidth); 7628 Addend.setIsSigned(true); 7629 } 7630 // Adjust the bit width of the APSInts. 7631 if (AddendBitWidth > BitWidth) { 7632 Offset = Offset.sext(AddendBitWidth); 7633 BitWidth = AddendBitWidth; 7634 } else if (BitWidth > AddendBitWidth) { 7635 Addend = Addend.sext(BitWidth); 7636 } 7637 7638 bool Ov = false; 7639 llvm::APSInt ResOffset = Offset; 7640 if (BinOpKind == BO_Add) 7641 ResOffset = Offset.sadd_ov(Addend, Ov); 7642 else { 7643 assert(AddendIsRight && BinOpKind == BO_Sub && 7644 "operator must be add or sub with addend on the right"); 7645 ResOffset = Offset.ssub_ov(Addend, Ov); 7646 } 7647 7648 // We add an offset to a pointer here so we should support an offset as big as 7649 // possible. 7650 if (Ov) { 7651 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 7652 "index (intermediate) result too big"); 7653 Offset = Offset.sext(2 * BitWidth); 7654 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 7655 return; 7656 } 7657 7658 Offset = ResOffset; 7659 } 7660 7661 namespace { 7662 7663 // This is a wrapper class around StringLiteral to support offsetted string 7664 // literals as format strings. It takes the offset into account when returning 7665 // the string and its length or the source locations to display notes correctly. 7666 class FormatStringLiteral { 7667 const StringLiteral *FExpr; 7668 int64_t Offset; 7669 7670 public: 7671 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 7672 : FExpr(fexpr), Offset(Offset) {} 7673 7674 StringRef getString() const { 7675 return FExpr->getString().drop_front(Offset); 7676 } 7677 7678 unsigned getByteLength() const { 7679 return FExpr->getByteLength() - getCharByteWidth() * Offset; 7680 } 7681 7682 unsigned getLength() const { return FExpr->getLength() - Offset; } 7683 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 7684 7685 StringLiteral::StringKind getKind() const { return FExpr->getKind(); } 7686 7687 QualType getType() const { return FExpr->getType(); } 7688 7689 bool isAscii() const { return FExpr->isAscii(); } 7690 bool isWide() const { return FExpr->isWide(); } 7691 bool isUTF8() const { return FExpr->isUTF8(); } 7692 bool isUTF16() const { return FExpr->isUTF16(); } 7693 bool isUTF32() const { return FExpr->isUTF32(); } 7694 bool isPascal() const { return FExpr->isPascal(); } 7695 7696 SourceLocation getLocationOfByte( 7697 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 7698 const TargetInfo &Target, unsigned *StartToken = nullptr, 7699 unsigned *StartTokenByteOffset = nullptr) const { 7700 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 7701 StartToken, StartTokenByteOffset); 7702 } 7703 7704 SourceLocation getBeginLoc() const LLVM_READONLY { 7705 return FExpr->getBeginLoc().getLocWithOffset(Offset); 7706 } 7707 7708 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 7709 }; 7710 7711 } // namespace 7712 7713 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 7714 const Expr *OrigFormatExpr, 7715 ArrayRef<const Expr *> Args, 7716 bool HasVAListArg, unsigned format_idx, 7717 unsigned firstDataArg, 7718 Sema::FormatStringType Type, 7719 bool inFunctionCall, 7720 Sema::VariadicCallType CallType, 7721 llvm::SmallBitVector &CheckedVarArgs, 7722 UncoveredArgHandler &UncoveredArg, 7723 bool IgnoreStringsWithoutSpecifiers); 7724 7725 // Determine if an expression is a string literal or constant string. 7726 // If this function returns false on the arguments to a function expecting a 7727 // format string, we will usually need to emit a warning. 7728 // True string literals are then checked by CheckFormatString. 7729 static StringLiteralCheckType 7730 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 7731 bool HasVAListArg, unsigned format_idx, 7732 unsigned firstDataArg, Sema::FormatStringType Type, 7733 Sema::VariadicCallType CallType, bool InFunctionCall, 7734 llvm::SmallBitVector &CheckedVarArgs, 7735 UncoveredArgHandler &UncoveredArg, 7736 llvm::APSInt Offset, 7737 bool IgnoreStringsWithoutSpecifiers = false) { 7738 if (S.isConstantEvaluated()) 7739 return SLCT_NotALiteral; 7740 tryAgain: 7741 assert(Offset.isSigned() && "invalid offset"); 7742 7743 if (E->isTypeDependent() || E->isValueDependent()) 7744 return SLCT_NotALiteral; 7745 7746 E = E->IgnoreParenCasts(); 7747 7748 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 7749 // Technically -Wformat-nonliteral does not warn about this case. 7750 // The behavior of printf and friends in this case is implementation 7751 // dependent. Ideally if the format string cannot be null then 7752 // it should have a 'nonnull' attribute in the function prototype. 7753 return SLCT_UncheckedLiteral; 7754 7755 switch (E->getStmtClass()) { 7756 case Stmt::BinaryConditionalOperatorClass: 7757 case Stmt::ConditionalOperatorClass: { 7758 // The expression is a literal if both sub-expressions were, and it was 7759 // completely checked only if both sub-expressions were checked. 7760 const AbstractConditionalOperator *C = 7761 cast<AbstractConditionalOperator>(E); 7762 7763 // Determine whether it is necessary to check both sub-expressions, for 7764 // example, because the condition expression is a constant that can be 7765 // evaluated at compile time. 7766 bool CheckLeft = true, CheckRight = true; 7767 7768 bool Cond; 7769 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(), 7770 S.isConstantEvaluated())) { 7771 if (Cond) 7772 CheckRight = false; 7773 else 7774 CheckLeft = false; 7775 } 7776 7777 // We need to maintain the offsets for the right and the left hand side 7778 // separately to check if every possible indexed expression is a valid 7779 // string literal. They might have different offsets for different string 7780 // literals in the end. 7781 StringLiteralCheckType Left; 7782 if (!CheckLeft) 7783 Left = SLCT_UncheckedLiteral; 7784 else { 7785 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, 7786 HasVAListArg, format_idx, firstDataArg, 7787 Type, CallType, InFunctionCall, 7788 CheckedVarArgs, UncoveredArg, Offset, 7789 IgnoreStringsWithoutSpecifiers); 7790 if (Left == SLCT_NotALiteral || !CheckRight) { 7791 return Left; 7792 } 7793 } 7794 7795 StringLiteralCheckType Right = checkFormatStringExpr( 7796 S, C->getFalseExpr(), Args, HasVAListArg, format_idx, firstDataArg, 7797 Type, CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 7798 IgnoreStringsWithoutSpecifiers); 7799 7800 return (CheckLeft && Left < Right) ? Left : Right; 7801 } 7802 7803 case Stmt::ImplicitCastExprClass: 7804 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 7805 goto tryAgain; 7806 7807 case Stmt::OpaqueValueExprClass: 7808 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 7809 E = src; 7810 goto tryAgain; 7811 } 7812 return SLCT_NotALiteral; 7813 7814 case Stmt::PredefinedExprClass: 7815 // While __func__, etc., are technically not string literals, they 7816 // cannot contain format specifiers and thus are not a security 7817 // liability. 7818 return SLCT_UncheckedLiteral; 7819 7820 case Stmt::DeclRefExprClass: { 7821 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 7822 7823 // As an exception, do not flag errors for variables binding to 7824 // const string literals. 7825 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 7826 bool isConstant = false; 7827 QualType T = DR->getType(); 7828 7829 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 7830 isConstant = AT->getElementType().isConstant(S.Context); 7831 } else if (const PointerType *PT = T->getAs<PointerType>()) { 7832 isConstant = T.isConstant(S.Context) && 7833 PT->getPointeeType().isConstant(S.Context); 7834 } else if (T->isObjCObjectPointerType()) { 7835 // In ObjC, there is usually no "const ObjectPointer" type, 7836 // so don't check if the pointee type is constant. 7837 isConstant = T.isConstant(S.Context); 7838 } 7839 7840 if (isConstant) { 7841 if (const Expr *Init = VD->getAnyInitializer()) { 7842 // Look through initializers like const char c[] = { "foo" } 7843 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 7844 if (InitList->isStringLiteralInit()) 7845 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 7846 } 7847 return checkFormatStringExpr(S, Init, Args, 7848 HasVAListArg, format_idx, 7849 firstDataArg, Type, CallType, 7850 /*InFunctionCall*/ false, CheckedVarArgs, 7851 UncoveredArg, Offset); 7852 } 7853 } 7854 7855 // For vprintf* functions (i.e., HasVAListArg==true), we add a 7856 // special check to see if the format string is a function parameter 7857 // of the function calling the printf function. If the function 7858 // has an attribute indicating it is a printf-like function, then we 7859 // should suppress warnings concerning non-literals being used in a call 7860 // to a vprintf function. For example: 7861 // 7862 // void 7863 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){ 7864 // va_list ap; 7865 // va_start(ap, fmt); 7866 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt". 7867 // ... 7868 // } 7869 if (HasVAListArg) { 7870 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) { 7871 if (const NamedDecl *ND = dyn_cast<NamedDecl>(PV->getDeclContext())) { 7872 int PVIndex = PV->getFunctionScopeIndex() + 1; 7873 for (const auto *PVFormat : ND->specific_attrs<FormatAttr>()) { 7874 // adjust for implicit parameter 7875 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND)) 7876 if (MD->isInstance()) 7877 ++PVIndex; 7878 // We also check if the formats are compatible. 7879 // We can't pass a 'scanf' string to a 'printf' function. 7880 if (PVIndex == PVFormat->getFormatIdx() && 7881 Type == S.GetFormatStringType(PVFormat)) 7882 return SLCT_UncheckedLiteral; 7883 } 7884 } 7885 } 7886 } 7887 } 7888 7889 return SLCT_NotALiteral; 7890 } 7891 7892 case Stmt::CallExprClass: 7893 case Stmt::CXXMemberCallExprClass: { 7894 const CallExpr *CE = cast<CallExpr>(E); 7895 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 7896 bool IsFirst = true; 7897 StringLiteralCheckType CommonResult; 7898 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 7899 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 7900 StringLiteralCheckType Result = checkFormatStringExpr( 7901 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 7902 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 7903 IgnoreStringsWithoutSpecifiers); 7904 if (IsFirst) { 7905 CommonResult = Result; 7906 IsFirst = false; 7907 } 7908 } 7909 if (!IsFirst) 7910 return CommonResult; 7911 7912 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 7913 unsigned BuiltinID = FD->getBuiltinID(); 7914 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 7915 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 7916 const Expr *Arg = CE->getArg(0); 7917 return checkFormatStringExpr(S, Arg, Args, 7918 HasVAListArg, format_idx, 7919 firstDataArg, Type, CallType, 7920 InFunctionCall, CheckedVarArgs, 7921 UncoveredArg, Offset, 7922 IgnoreStringsWithoutSpecifiers); 7923 } 7924 } 7925 } 7926 7927 return SLCT_NotALiteral; 7928 } 7929 case Stmt::ObjCMessageExprClass: { 7930 const auto *ME = cast<ObjCMessageExpr>(E); 7931 if (const auto *MD = ME->getMethodDecl()) { 7932 if (const auto *FA = MD->getAttr<FormatArgAttr>()) { 7933 // As a special case heuristic, if we're using the method -[NSBundle 7934 // localizedStringForKey:value:table:], ignore any key strings that lack 7935 // format specifiers. The idea is that if the key doesn't have any 7936 // format specifiers then its probably just a key to map to the 7937 // localized strings. If it does have format specifiers though, then its 7938 // likely that the text of the key is the format string in the 7939 // programmer's language, and should be checked. 7940 const ObjCInterfaceDecl *IFace; 7941 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) && 7942 IFace->getIdentifier()->isStr("NSBundle") && 7943 MD->getSelector().isKeywordSelector( 7944 {"localizedStringForKey", "value", "table"})) { 7945 IgnoreStringsWithoutSpecifiers = true; 7946 } 7947 7948 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 7949 return checkFormatStringExpr( 7950 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 7951 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 7952 IgnoreStringsWithoutSpecifiers); 7953 } 7954 } 7955 7956 return SLCT_NotALiteral; 7957 } 7958 case Stmt::ObjCStringLiteralClass: 7959 case Stmt::StringLiteralClass: { 7960 const StringLiteral *StrE = nullptr; 7961 7962 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 7963 StrE = ObjCFExpr->getString(); 7964 else 7965 StrE = cast<StringLiteral>(E); 7966 7967 if (StrE) { 7968 if (Offset.isNegative() || Offset > StrE->getLength()) { 7969 // TODO: It would be better to have an explicit warning for out of 7970 // bounds literals. 7971 return SLCT_NotALiteral; 7972 } 7973 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 7974 CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx, 7975 firstDataArg, Type, InFunctionCall, CallType, 7976 CheckedVarArgs, UncoveredArg, 7977 IgnoreStringsWithoutSpecifiers); 7978 return SLCT_CheckedLiteral; 7979 } 7980 7981 return SLCT_NotALiteral; 7982 } 7983 case Stmt::BinaryOperatorClass: { 7984 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 7985 7986 // A string literal + an int offset is still a string literal. 7987 if (BinOp->isAdditiveOp()) { 7988 Expr::EvalResult LResult, RResult; 7989 7990 bool LIsInt = BinOp->getLHS()->EvaluateAsInt( 7991 LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 7992 bool RIsInt = BinOp->getRHS()->EvaluateAsInt( 7993 RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 7994 7995 if (LIsInt != RIsInt) { 7996 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 7997 7998 if (LIsInt) { 7999 if (BinOpKind == BO_Add) { 8000 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt); 8001 E = BinOp->getRHS(); 8002 goto tryAgain; 8003 } 8004 } else { 8005 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt); 8006 E = BinOp->getLHS(); 8007 goto tryAgain; 8008 } 8009 } 8010 } 8011 8012 return SLCT_NotALiteral; 8013 } 8014 case Stmt::UnaryOperatorClass: { 8015 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 8016 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 8017 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 8018 Expr::EvalResult IndexResult; 8019 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context, 8020 Expr::SE_NoSideEffects, 8021 S.isConstantEvaluated())) { 8022 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add, 8023 /*RHS is int*/ true); 8024 E = ASE->getBase(); 8025 goto tryAgain; 8026 } 8027 } 8028 8029 return SLCT_NotALiteral; 8030 } 8031 8032 default: 8033 return SLCT_NotALiteral; 8034 } 8035 } 8036 8037 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 8038 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 8039 .Case("scanf", FST_Scanf) 8040 .Cases("printf", "printf0", FST_Printf) 8041 .Cases("NSString", "CFString", FST_NSString) 8042 .Case("strftime", FST_Strftime) 8043 .Case("strfmon", FST_Strfmon) 8044 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 8045 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 8046 .Case("os_trace", FST_OSLog) 8047 .Case("os_log", FST_OSLog) 8048 .Default(FST_Unknown); 8049 } 8050 8051 /// CheckFormatArguments - Check calls to printf and scanf (and similar 8052 /// functions) for correct use of format strings. 8053 /// Returns true if a format string has been fully checked. 8054 bool Sema::CheckFormatArguments(const FormatAttr *Format, 8055 ArrayRef<const Expr *> Args, 8056 bool IsCXXMember, 8057 VariadicCallType CallType, 8058 SourceLocation Loc, SourceRange Range, 8059 llvm::SmallBitVector &CheckedVarArgs) { 8060 FormatStringInfo FSI; 8061 if (getFormatStringInfo(Format, IsCXXMember, &FSI)) 8062 return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx, 8063 FSI.FirstDataArg, GetFormatStringType(Format), 8064 CallType, Loc, Range, CheckedVarArgs); 8065 return false; 8066 } 8067 8068 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 8069 bool HasVAListArg, unsigned format_idx, 8070 unsigned firstDataArg, FormatStringType Type, 8071 VariadicCallType CallType, 8072 SourceLocation Loc, SourceRange Range, 8073 llvm::SmallBitVector &CheckedVarArgs) { 8074 // CHECK: printf/scanf-like function is called with no format string. 8075 if (format_idx >= Args.size()) { 8076 Diag(Loc, diag::warn_missing_format_string) << Range; 8077 return false; 8078 } 8079 8080 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 8081 8082 // CHECK: format string is not a string literal. 8083 // 8084 // Dynamically generated format strings are difficult to 8085 // automatically vet at compile time. Requiring that format strings 8086 // are string literals: (1) permits the checking of format strings by 8087 // the compiler and thereby (2) can practically remove the source of 8088 // many format string exploits. 8089 8090 // Format string can be either ObjC string (e.g. @"%d") or 8091 // C string (e.g. "%d") 8092 // ObjC string uses the same format specifiers as C string, so we can use 8093 // the same format string checking logic for both ObjC and C strings. 8094 UncoveredArgHandler UncoveredArg; 8095 StringLiteralCheckType CT = 8096 checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg, 8097 format_idx, firstDataArg, Type, CallType, 8098 /*IsFunctionCall*/ true, CheckedVarArgs, 8099 UncoveredArg, 8100 /*no string offset*/ llvm::APSInt(64, false) = 0); 8101 8102 // Generate a diagnostic where an uncovered argument is detected. 8103 if (UncoveredArg.hasUncoveredArg()) { 8104 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 8105 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 8106 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 8107 } 8108 8109 if (CT != SLCT_NotALiteral) 8110 // Literal format string found, check done! 8111 return CT == SLCT_CheckedLiteral; 8112 8113 // Strftime is particular as it always uses a single 'time' argument, 8114 // so it is safe to pass a non-literal string. 8115 if (Type == FST_Strftime) 8116 return false; 8117 8118 // Do not emit diag when the string param is a macro expansion and the 8119 // format is either NSString or CFString. This is a hack to prevent 8120 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 8121 // which are usually used in place of NS and CF string literals. 8122 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 8123 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 8124 return false; 8125 8126 // If there are no arguments specified, warn with -Wformat-security, otherwise 8127 // warn only with -Wformat-nonliteral. 8128 if (Args.size() == firstDataArg) { 8129 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 8130 << OrigFormatExpr->getSourceRange(); 8131 switch (Type) { 8132 default: 8133 break; 8134 case FST_Kprintf: 8135 case FST_FreeBSDKPrintf: 8136 case FST_Printf: 8137 Diag(FormatLoc, diag::note_format_security_fixit) 8138 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 8139 break; 8140 case FST_NSString: 8141 Diag(FormatLoc, diag::note_format_security_fixit) 8142 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 8143 break; 8144 } 8145 } else { 8146 Diag(FormatLoc, diag::warn_format_nonliteral) 8147 << OrigFormatExpr->getSourceRange(); 8148 } 8149 return false; 8150 } 8151 8152 namespace { 8153 8154 class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 8155 protected: 8156 Sema &S; 8157 const FormatStringLiteral *FExpr; 8158 const Expr *OrigFormatExpr; 8159 const Sema::FormatStringType FSType; 8160 const unsigned FirstDataArg; 8161 const unsigned NumDataArgs; 8162 const char *Beg; // Start of format string. 8163 const bool HasVAListArg; 8164 ArrayRef<const Expr *> Args; 8165 unsigned FormatIdx; 8166 llvm::SmallBitVector CoveredArgs; 8167 bool usesPositionalArgs = false; 8168 bool atFirstArg = true; 8169 bool inFunctionCall; 8170 Sema::VariadicCallType CallType; 8171 llvm::SmallBitVector &CheckedVarArgs; 8172 UncoveredArgHandler &UncoveredArg; 8173 8174 public: 8175 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 8176 const Expr *origFormatExpr, 8177 const Sema::FormatStringType type, unsigned firstDataArg, 8178 unsigned numDataArgs, const char *beg, bool hasVAListArg, 8179 ArrayRef<const Expr *> Args, unsigned formatIdx, 8180 bool inFunctionCall, Sema::VariadicCallType callType, 8181 llvm::SmallBitVector &CheckedVarArgs, 8182 UncoveredArgHandler &UncoveredArg) 8183 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 8184 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 8185 HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx), 8186 inFunctionCall(inFunctionCall), CallType(callType), 8187 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 8188 CoveredArgs.resize(numDataArgs); 8189 CoveredArgs.reset(); 8190 } 8191 8192 void DoneProcessing(); 8193 8194 void HandleIncompleteSpecifier(const char *startSpecifier, 8195 unsigned specifierLen) override; 8196 8197 void HandleInvalidLengthModifier( 8198 const analyze_format_string::FormatSpecifier &FS, 8199 const analyze_format_string::ConversionSpecifier &CS, 8200 const char *startSpecifier, unsigned specifierLen, 8201 unsigned DiagID); 8202 8203 void HandleNonStandardLengthModifier( 8204 const analyze_format_string::FormatSpecifier &FS, 8205 const char *startSpecifier, unsigned specifierLen); 8206 8207 void HandleNonStandardConversionSpecifier( 8208 const analyze_format_string::ConversionSpecifier &CS, 8209 const char *startSpecifier, unsigned specifierLen); 8210 8211 void HandlePosition(const char *startPos, unsigned posLen) override; 8212 8213 void HandleInvalidPosition(const char *startSpecifier, 8214 unsigned specifierLen, 8215 analyze_format_string::PositionContext p) override; 8216 8217 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 8218 8219 void HandleNullChar(const char *nullCharacter) override; 8220 8221 template <typename Range> 8222 static void 8223 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 8224 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 8225 bool IsStringLocation, Range StringRange, 8226 ArrayRef<FixItHint> Fixit = None); 8227 8228 protected: 8229 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 8230 const char *startSpec, 8231 unsigned specifierLen, 8232 const char *csStart, unsigned csLen); 8233 8234 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 8235 const char *startSpec, 8236 unsigned specifierLen); 8237 8238 SourceRange getFormatStringRange(); 8239 CharSourceRange getSpecifierRange(const char *startSpecifier, 8240 unsigned specifierLen); 8241 SourceLocation getLocationOfByte(const char *x); 8242 8243 const Expr *getDataArg(unsigned i) const; 8244 8245 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 8246 const analyze_format_string::ConversionSpecifier &CS, 8247 const char *startSpecifier, unsigned specifierLen, 8248 unsigned argIndex); 8249 8250 template <typename Range> 8251 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 8252 bool IsStringLocation, Range StringRange, 8253 ArrayRef<FixItHint> Fixit = None); 8254 }; 8255 8256 } // namespace 8257 8258 SourceRange CheckFormatHandler::getFormatStringRange() { 8259 return OrigFormatExpr->getSourceRange(); 8260 } 8261 8262 CharSourceRange CheckFormatHandler:: 8263 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 8264 SourceLocation Start = getLocationOfByte(startSpecifier); 8265 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 8266 8267 // Advance the end SourceLocation by one due to half-open ranges. 8268 End = End.getLocWithOffset(1); 8269 8270 return CharSourceRange::getCharRange(Start, End); 8271 } 8272 8273 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 8274 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 8275 S.getLangOpts(), S.Context.getTargetInfo()); 8276 } 8277 8278 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 8279 unsigned specifierLen){ 8280 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 8281 getLocationOfByte(startSpecifier), 8282 /*IsStringLocation*/true, 8283 getSpecifierRange(startSpecifier, specifierLen)); 8284 } 8285 8286 void CheckFormatHandler::HandleInvalidLengthModifier( 8287 const analyze_format_string::FormatSpecifier &FS, 8288 const analyze_format_string::ConversionSpecifier &CS, 8289 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 8290 using namespace analyze_format_string; 8291 8292 const LengthModifier &LM = FS.getLengthModifier(); 8293 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 8294 8295 // See if we know how to fix this length modifier. 8296 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 8297 if (FixedLM) { 8298 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 8299 getLocationOfByte(LM.getStart()), 8300 /*IsStringLocation*/true, 8301 getSpecifierRange(startSpecifier, specifierLen)); 8302 8303 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 8304 << FixedLM->toString() 8305 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 8306 8307 } else { 8308 FixItHint Hint; 8309 if (DiagID == diag::warn_format_nonsensical_length) 8310 Hint = FixItHint::CreateRemoval(LMRange); 8311 8312 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 8313 getLocationOfByte(LM.getStart()), 8314 /*IsStringLocation*/true, 8315 getSpecifierRange(startSpecifier, specifierLen), 8316 Hint); 8317 } 8318 } 8319 8320 void CheckFormatHandler::HandleNonStandardLengthModifier( 8321 const analyze_format_string::FormatSpecifier &FS, 8322 const char *startSpecifier, unsigned specifierLen) { 8323 using namespace analyze_format_string; 8324 8325 const LengthModifier &LM = FS.getLengthModifier(); 8326 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 8327 8328 // See if we know how to fix this length modifier. 8329 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 8330 if (FixedLM) { 8331 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8332 << LM.toString() << 0, 8333 getLocationOfByte(LM.getStart()), 8334 /*IsStringLocation*/true, 8335 getSpecifierRange(startSpecifier, specifierLen)); 8336 8337 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 8338 << FixedLM->toString() 8339 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 8340 8341 } else { 8342 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8343 << LM.toString() << 0, 8344 getLocationOfByte(LM.getStart()), 8345 /*IsStringLocation*/true, 8346 getSpecifierRange(startSpecifier, specifierLen)); 8347 } 8348 } 8349 8350 void CheckFormatHandler::HandleNonStandardConversionSpecifier( 8351 const analyze_format_string::ConversionSpecifier &CS, 8352 const char *startSpecifier, unsigned specifierLen) { 8353 using namespace analyze_format_string; 8354 8355 // See if we know how to fix this conversion specifier. 8356 Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 8357 if (FixedCS) { 8358 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8359 << CS.toString() << /*conversion specifier*/1, 8360 getLocationOfByte(CS.getStart()), 8361 /*IsStringLocation*/true, 8362 getSpecifierRange(startSpecifier, specifierLen)); 8363 8364 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 8365 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 8366 << FixedCS->toString() 8367 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 8368 } else { 8369 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8370 << CS.toString() << /*conversion specifier*/1, 8371 getLocationOfByte(CS.getStart()), 8372 /*IsStringLocation*/true, 8373 getSpecifierRange(startSpecifier, specifierLen)); 8374 } 8375 } 8376 8377 void CheckFormatHandler::HandlePosition(const char *startPos, 8378 unsigned posLen) { 8379 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 8380 getLocationOfByte(startPos), 8381 /*IsStringLocation*/true, 8382 getSpecifierRange(startPos, posLen)); 8383 } 8384 8385 void 8386 CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, 8387 analyze_format_string::PositionContext p) { 8388 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) 8389 << (unsigned) p, 8390 getLocationOfByte(startPos), /*IsStringLocation*/true, 8391 getSpecifierRange(startPos, posLen)); 8392 } 8393 8394 void CheckFormatHandler::HandleZeroPosition(const char *startPos, 8395 unsigned posLen) { 8396 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 8397 getLocationOfByte(startPos), 8398 /*IsStringLocation*/true, 8399 getSpecifierRange(startPos, posLen)); 8400 } 8401 8402 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 8403 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 8404 // The presence of a null character is likely an error. 8405 EmitFormatDiagnostic( 8406 S.PDiag(diag::warn_printf_format_string_contains_null_char), 8407 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 8408 getFormatStringRange()); 8409 } 8410 } 8411 8412 // Note that this may return NULL if there was an error parsing or building 8413 // one of the argument expressions. 8414 const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 8415 return Args[FirstDataArg + i]; 8416 } 8417 8418 void CheckFormatHandler::DoneProcessing() { 8419 // Does the number of data arguments exceed the number of 8420 // format conversions in the format string? 8421 if (!HasVAListArg) { 8422 // Find any arguments that weren't covered. 8423 CoveredArgs.flip(); 8424 signed notCoveredArg = CoveredArgs.find_first(); 8425 if (notCoveredArg >= 0) { 8426 assert((unsigned)notCoveredArg < NumDataArgs); 8427 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 8428 } else { 8429 UncoveredArg.setAllCovered(); 8430 } 8431 } 8432 } 8433 8434 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 8435 const Expr *ArgExpr) { 8436 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 && 8437 "Invalid state"); 8438 8439 if (!ArgExpr) 8440 return; 8441 8442 SourceLocation Loc = ArgExpr->getBeginLoc(); 8443 8444 if (S.getSourceManager().isInSystemMacro(Loc)) 8445 return; 8446 8447 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 8448 for (auto E : DiagnosticExprs) 8449 PDiag << E->getSourceRange(); 8450 8451 CheckFormatHandler::EmitFormatDiagnostic( 8452 S, IsFunctionCall, DiagnosticExprs[0], 8453 PDiag, Loc, /*IsStringLocation*/false, 8454 DiagnosticExprs[0]->getSourceRange()); 8455 } 8456 8457 bool 8458 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 8459 SourceLocation Loc, 8460 const char *startSpec, 8461 unsigned specifierLen, 8462 const char *csStart, 8463 unsigned csLen) { 8464 bool keepGoing = true; 8465 if (argIndex < NumDataArgs) { 8466 // Consider the argument coverered, even though the specifier doesn't 8467 // make sense. 8468 CoveredArgs.set(argIndex); 8469 } 8470 else { 8471 // If argIndex exceeds the number of data arguments we 8472 // don't issue a warning because that is just a cascade of warnings (and 8473 // they may have intended '%%' anyway). We don't want to continue processing 8474 // the format string after this point, however, as we will like just get 8475 // gibberish when trying to match arguments. 8476 keepGoing = false; 8477 } 8478 8479 StringRef Specifier(csStart, csLen); 8480 8481 // If the specifier in non-printable, it could be the first byte of a UTF-8 8482 // sequence. In that case, print the UTF-8 code point. If not, print the byte 8483 // hex value. 8484 std::string CodePointStr; 8485 if (!llvm::sys::locale::isPrint(*csStart)) { 8486 llvm::UTF32 CodePoint; 8487 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 8488 const llvm::UTF8 *E = 8489 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 8490 llvm::ConversionResult Result = 8491 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 8492 8493 if (Result != llvm::conversionOK) { 8494 unsigned char FirstChar = *csStart; 8495 CodePoint = (llvm::UTF32)FirstChar; 8496 } 8497 8498 llvm::raw_string_ostream OS(CodePointStr); 8499 if (CodePoint < 256) 8500 OS << "\\x" << llvm::format("%02x", CodePoint); 8501 else if (CodePoint <= 0xFFFF) 8502 OS << "\\u" << llvm::format("%04x", CodePoint); 8503 else 8504 OS << "\\U" << llvm::format("%08x", CodePoint); 8505 OS.flush(); 8506 Specifier = CodePointStr; 8507 } 8508 8509 EmitFormatDiagnostic( 8510 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 8511 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 8512 8513 return keepGoing; 8514 } 8515 8516 void 8517 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 8518 const char *startSpec, 8519 unsigned specifierLen) { 8520 EmitFormatDiagnostic( 8521 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 8522 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 8523 } 8524 8525 bool 8526 CheckFormatHandler::CheckNumArgs( 8527 const analyze_format_string::FormatSpecifier &FS, 8528 const analyze_format_string::ConversionSpecifier &CS, 8529 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 8530 8531 if (argIndex >= NumDataArgs) { 8532 PartialDiagnostic PDiag = FS.usesPositionalArg() 8533 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 8534 << (argIndex+1) << NumDataArgs) 8535 : S.PDiag(diag::warn_printf_insufficient_data_args); 8536 EmitFormatDiagnostic( 8537 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 8538 getSpecifierRange(startSpecifier, specifierLen)); 8539 8540 // Since more arguments than conversion tokens are given, by extension 8541 // all arguments are covered, so mark this as so. 8542 UncoveredArg.setAllCovered(); 8543 return false; 8544 } 8545 return true; 8546 } 8547 8548 template<typename Range> 8549 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 8550 SourceLocation Loc, 8551 bool IsStringLocation, 8552 Range StringRange, 8553 ArrayRef<FixItHint> FixIt) { 8554 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 8555 Loc, IsStringLocation, StringRange, FixIt); 8556 } 8557 8558 /// If the format string is not within the function call, emit a note 8559 /// so that the function call and string are in diagnostic messages. 8560 /// 8561 /// \param InFunctionCall if true, the format string is within the function 8562 /// call and only one diagnostic message will be produced. Otherwise, an 8563 /// extra note will be emitted pointing to location of the format string. 8564 /// 8565 /// \param ArgumentExpr the expression that is passed as the format string 8566 /// argument in the function call. Used for getting locations when two 8567 /// diagnostics are emitted. 8568 /// 8569 /// \param PDiag the callee should already have provided any strings for the 8570 /// diagnostic message. This function only adds locations and fixits 8571 /// to diagnostics. 8572 /// 8573 /// \param Loc primary location for diagnostic. If two diagnostics are 8574 /// required, one will be at Loc and a new SourceLocation will be created for 8575 /// the other one. 8576 /// 8577 /// \param IsStringLocation if true, Loc points to the format string should be 8578 /// used for the note. Otherwise, Loc points to the argument list and will 8579 /// be used with PDiag. 8580 /// 8581 /// \param StringRange some or all of the string to highlight. This is 8582 /// templated so it can accept either a CharSourceRange or a SourceRange. 8583 /// 8584 /// \param FixIt optional fix it hint for the format string. 8585 template <typename Range> 8586 void CheckFormatHandler::EmitFormatDiagnostic( 8587 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 8588 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 8589 Range StringRange, ArrayRef<FixItHint> FixIt) { 8590 if (InFunctionCall) { 8591 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 8592 D << StringRange; 8593 D << FixIt; 8594 } else { 8595 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 8596 << ArgumentExpr->getSourceRange(); 8597 8598 const Sema::SemaDiagnosticBuilder &Note = 8599 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 8600 diag::note_format_string_defined); 8601 8602 Note << StringRange; 8603 Note << FixIt; 8604 } 8605 } 8606 8607 //===--- CHECK: Printf format string checking ------------------------------===// 8608 8609 namespace { 8610 8611 class CheckPrintfHandler : public CheckFormatHandler { 8612 public: 8613 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 8614 const Expr *origFormatExpr, 8615 const Sema::FormatStringType type, unsigned firstDataArg, 8616 unsigned numDataArgs, bool isObjC, const char *beg, 8617 bool hasVAListArg, ArrayRef<const Expr *> Args, 8618 unsigned formatIdx, bool inFunctionCall, 8619 Sema::VariadicCallType CallType, 8620 llvm::SmallBitVector &CheckedVarArgs, 8621 UncoveredArgHandler &UncoveredArg) 8622 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 8623 numDataArgs, beg, hasVAListArg, Args, formatIdx, 8624 inFunctionCall, CallType, CheckedVarArgs, 8625 UncoveredArg) {} 8626 8627 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 8628 8629 /// Returns true if '%@' specifiers are allowed in the format string. 8630 bool allowsObjCArg() const { 8631 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 8632 FSType == Sema::FST_OSTrace; 8633 } 8634 8635 bool HandleInvalidPrintfConversionSpecifier( 8636 const analyze_printf::PrintfSpecifier &FS, 8637 const char *startSpecifier, 8638 unsigned specifierLen) override; 8639 8640 void handleInvalidMaskType(StringRef MaskType) override; 8641 8642 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 8643 const char *startSpecifier, 8644 unsigned specifierLen) override; 8645 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 8646 const char *StartSpecifier, 8647 unsigned SpecifierLen, 8648 const Expr *E); 8649 8650 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 8651 const char *startSpecifier, unsigned specifierLen); 8652 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 8653 const analyze_printf::OptionalAmount &Amt, 8654 unsigned type, 8655 const char *startSpecifier, unsigned specifierLen); 8656 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 8657 const analyze_printf::OptionalFlag &flag, 8658 const char *startSpecifier, unsigned specifierLen); 8659 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 8660 const analyze_printf::OptionalFlag &ignoredFlag, 8661 const analyze_printf::OptionalFlag &flag, 8662 const char *startSpecifier, unsigned specifierLen); 8663 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 8664 const Expr *E); 8665 8666 void HandleEmptyObjCModifierFlag(const char *startFlag, 8667 unsigned flagLen) override; 8668 8669 void HandleInvalidObjCModifierFlag(const char *startFlag, 8670 unsigned flagLen) override; 8671 8672 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 8673 const char *flagsEnd, 8674 const char *conversionPosition) 8675 override; 8676 }; 8677 8678 } // namespace 8679 8680 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 8681 const analyze_printf::PrintfSpecifier &FS, 8682 const char *startSpecifier, 8683 unsigned specifierLen) { 8684 const analyze_printf::PrintfConversionSpecifier &CS = 8685 FS.getConversionSpecifier(); 8686 8687 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 8688 getLocationOfByte(CS.getStart()), 8689 startSpecifier, specifierLen, 8690 CS.getStart(), CS.getLength()); 8691 } 8692 8693 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 8694 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 8695 } 8696 8697 bool CheckPrintfHandler::HandleAmount( 8698 const analyze_format_string::OptionalAmount &Amt, 8699 unsigned k, const char *startSpecifier, 8700 unsigned specifierLen) { 8701 if (Amt.hasDataArgument()) { 8702 if (!HasVAListArg) { 8703 unsigned argIndex = Amt.getArgIndex(); 8704 if (argIndex >= NumDataArgs) { 8705 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 8706 << k, 8707 getLocationOfByte(Amt.getStart()), 8708 /*IsStringLocation*/true, 8709 getSpecifierRange(startSpecifier, specifierLen)); 8710 // Don't do any more checking. We will just emit 8711 // spurious errors. 8712 return false; 8713 } 8714 8715 // Type check the data argument. It should be an 'int'. 8716 // Although not in conformance with C99, we also allow the argument to be 8717 // an 'unsigned int' as that is a reasonably safe case. GCC also 8718 // doesn't emit a warning for that case. 8719 CoveredArgs.set(argIndex); 8720 const Expr *Arg = getDataArg(argIndex); 8721 if (!Arg) 8722 return false; 8723 8724 QualType T = Arg->getType(); 8725 8726 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 8727 assert(AT.isValid()); 8728 8729 if (!AT.matchesType(S.Context, T)) { 8730 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 8731 << k << AT.getRepresentativeTypeName(S.Context) 8732 << T << Arg->getSourceRange(), 8733 getLocationOfByte(Amt.getStart()), 8734 /*IsStringLocation*/true, 8735 getSpecifierRange(startSpecifier, specifierLen)); 8736 // Don't do any more checking. We will just emit 8737 // spurious errors. 8738 return false; 8739 } 8740 } 8741 } 8742 return true; 8743 } 8744 8745 void CheckPrintfHandler::HandleInvalidAmount( 8746 const analyze_printf::PrintfSpecifier &FS, 8747 const analyze_printf::OptionalAmount &Amt, 8748 unsigned type, 8749 const char *startSpecifier, 8750 unsigned specifierLen) { 8751 const analyze_printf::PrintfConversionSpecifier &CS = 8752 FS.getConversionSpecifier(); 8753 8754 FixItHint fixit = 8755 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 8756 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 8757 Amt.getConstantLength())) 8758 : FixItHint(); 8759 8760 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 8761 << type << CS.toString(), 8762 getLocationOfByte(Amt.getStart()), 8763 /*IsStringLocation*/true, 8764 getSpecifierRange(startSpecifier, specifierLen), 8765 fixit); 8766 } 8767 8768 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 8769 const analyze_printf::OptionalFlag &flag, 8770 const char *startSpecifier, 8771 unsigned specifierLen) { 8772 // Warn about pointless flag with a fixit removal. 8773 const analyze_printf::PrintfConversionSpecifier &CS = 8774 FS.getConversionSpecifier(); 8775 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 8776 << flag.toString() << CS.toString(), 8777 getLocationOfByte(flag.getPosition()), 8778 /*IsStringLocation*/true, 8779 getSpecifierRange(startSpecifier, specifierLen), 8780 FixItHint::CreateRemoval( 8781 getSpecifierRange(flag.getPosition(), 1))); 8782 } 8783 8784 void CheckPrintfHandler::HandleIgnoredFlag( 8785 const analyze_printf::PrintfSpecifier &FS, 8786 const analyze_printf::OptionalFlag &ignoredFlag, 8787 const analyze_printf::OptionalFlag &flag, 8788 const char *startSpecifier, 8789 unsigned specifierLen) { 8790 // Warn about ignored flag with a fixit removal. 8791 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 8792 << ignoredFlag.toString() << flag.toString(), 8793 getLocationOfByte(ignoredFlag.getPosition()), 8794 /*IsStringLocation*/true, 8795 getSpecifierRange(startSpecifier, specifierLen), 8796 FixItHint::CreateRemoval( 8797 getSpecifierRange(ignoredFlag.getPosition(), 1))); 8798 } 8799 8800 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 8801 unsigned flagLen) { 8802 // Warn about an empty flag. 8803 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 8804 getLocationOfByte(startFlag), 8805 /*IsStringLocation*/true, 8806 getSpecifierRange(startFlag, flagLen)); 8807 } 8808 8809 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 8810 unsigned flagLen) { 8811 // Warn about an invalid flag. 8812 auto Range = getSpecifierRange(startFlag, flagLen); 8813 StringRef flag(startFlag, flagLen); 8814 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 8815 getLocationOfByte(startFlag), 8816 /*IsStringLocation*/true, 8817 Range, FixItHint::CreateRemoval(Range)); 8818 } 8819 8820 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 8821 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 8822 // Warn about using '[...]' without a '@' conversion. 8823 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 8824 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 8825 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 8826 getLocationOfByte(conversionPosition), 8827 /*IsStringLocation*/true, 8828 Range, FixItHint::CreateRemoval(Range)); 8829 } 8830 8831 // Determines if the specified is a C++ class or struct containing 8832 // a member with the specified name and kind (e.g. a CXXMethodDecl named 8833 // "c_str()"). 8834 template<typename MemberKind> 8835 static llvm::SmallPtrSet<MemberKind*, 1> 8836 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 8837 const RecordType *RT = Ty->getAs<RecordType>(); 8838 llvm::SmallPtrSet<MemberKind*, 1> Results; 8839 8840 if (!RT) 8841 return Results; 8842 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 8843 if (!RD || !RD->getDefinition()) 8844 return Results; 8845 8846 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 8847 Sema::LookupMemberName); 8848 R.suppressDiagnostics(); 8849 8850 // We just need to include all members of the right kind turned up by the 8851 // filter, at this point. 8852 if (S.LookupQualifiedName(R, RT->getDecl())) 8853 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 8854 NamedDecl *decl = (*I)->getUnderlyingDecl(); 8855 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 8856 Results.insert(FK); 8857 } 8858 return Results; 8859 } 8860 8861 /// Check if we could call '.c_str()' on an object. 8862 /// 8863 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 8864 /// allow the call, or if it would be ambiguous). 8865 bool Sema::hasCStrMethod(const Expr *E) { 8866 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 8867 8868 MethodSet Results = 8869 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 8870 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 8871 MI != ME; ++MI) 8872 if ((*MI)->getMinRequiredArguments() == 0) 8873 return true; 8874 return false; 8875 } 8876 8877 // Check if a (w)string was passed when a (w)char* was needed, and offer a 8878 // better diagnostic if so. AT is assumed to be valid. 8879 // Returns true when a c_str() conversion method is found. 8880 bool CheckPrintfHandler::checkForCStrMembers( 8881 const analyze_printf::ArgType &AT, const Expr *E) { 8882 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 8883 8884 MethodSet Results = 8885 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 8886 8887 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 8888 MI != ME; ++MI) { 8889 const CXXMethodDecl *Method = *MI; 8890 if (Method->getMinRequiredArguments() == 0 && 8891 AT.matchesType(S.Context, Method->getReturnType())) { 8892 // FIXME: Suggest parens if the expression needs them. 8893 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 8894 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 8895 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 8896 return true; 8897 } 8898 } 8899 8900 return false; 8901 } 8902 8903 bool 8904 CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier 8905 &FS, 8906 const char *startSpecifier, 8907 unsigned specifierLen) { 8908 using namespace analyze_format_string; 8909 using namespace analyze_printf; 8910 8911 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 8912 8913 if (FS.consumesDataArgument()) { 8914 if (atFirstArg) { 8915 atFirstArg = false; 8916 usesPositionalArgs = FS.usesPositionalArg(); 8917 } 8918 else if (usesPositionalArgs != FS.usesPositionalArg()) { 8919 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 8920 startSpecifier, specifierLen); 8921 return false; 8922 } 8923 } 8924 8925 // First check if the field width, precision, and conversion specifier 8926 // have matching data arguments. 8927 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 8928 startSpecifier, specifierLen)) { 8929 return false; 8930 } 8931 8932 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 8933 startSpecifier, specifierLen)) { 8934 return false; 8935 } 8936 8937 if (!CS.consumesDataArgument()) { 8938 // FIXME: Technically specifying a precision or field width here 8939 // makes no sense. Worth issuing a warning at some point. 8940 return true; 8941 } 8942 8943 // Consume the argument. 8944 unsigned argIndex = FS.getArgIndex(); 8945 if (argIndex < NumDataArgs) { 8946 // The check to see if the argIndex is valid will come later. 8947 // We set the bit here because we may exit early from this 8948 // function if we encounter some other error. 8949 CoveredArgs.set(argIndex); 8950 } 8951 8952 // FreeBSD kernel extensions. 8953 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 8954 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 8955 // We need at least two arguments. 8956 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 8957 return false; 8958 8959 // Claim the second argument. 8960 CoveredArgs.set(argIndex + 1); 8961 8962 // Type check the first argument (int for %b, pointer for %D) 8963 const Expr *Ex = getDataArg(argIndex); 8964 const analyze_printf::ArgType &AT = 8965 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? 8966 ArgType(S.Context.IntTy) : ArgType::CPointerTy; 8967 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 8968 EmitFormatDiagnostic( 8969 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 8970 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 8971 << false << Ex->getSourceRange(), 8972 Ex->getBeginLoc(), /*IsStringLocation*/ false, 8973 getSpecifierRange(startSpecifier, specifierLen)); 8974 8975 // Type check the second argument (char * for both %b and %D) 8976 Ex = getDataArg(argIndex + 1); 8977 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 8978 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 8979 EmitFormatDiagnostic( 8980 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 8981 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 8982 << false << Ex->getSourceRange(), 8983 Ex->getBeginLoc(), /*IsStringLocation*/ false, 8984 getSpecifierRange(startSpecifier, specifierLen)); 8985 8986 return true; 8987 } 8988 8989 // Check for using an Objective-C specific conversion specifier 8990 // in a non-ObjC literal. 8991 if (!allowsObjCArg() && CS.isObjCArg()) { 8992 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 8993 specifierLen); 8994 } 8995 8996 // %P can only be used with os_log. 8997 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 8998 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 8999 specifierLen); 9000 } 9001 9002 // %n is not allowed with os_log. 9003 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 9004 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 9005 getLocationOfByte(CS.getStart()), 9006 /*IsStringLocation*/ false, 9007 getSpecifierRange(startSpecifier, specifierLen)); 9008 9009 return true; 9010 } 9011 9012 // Only scalars are allowed for os_trace. 9013 if (FSType == Sema::FST_OSTrace && 9014 (CS.getKind() == ConversionSpecifier::PArg || 9015 CS.getKind() == ConversionSpecifier::sArg || 9016 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 9017 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 9018 specifierLen); 9019 } 9020 9021 // Check for use of public/private annotation outside of os_log(). 9022 if (FSType != Sema::FST_OSLog) { 9023 if (FS.isPublic().isSet()) { 9024 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 9025 << "public", 9026 getLocationOfByte(FS.isPublic().getPosition()), 9027 /*IsStringLocation*/ false, 9028 getSpecifierRange(startSpecifier, specifierLen)); 9029 } 9030 if (FS.isPrivate().isSet()) { 9031 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 9032 << "private", 9033 getLocationOfByte(FS.isPrivate().getPosition()), 9034 /*IsStringLocation*/ false, 9035 getSpecifierRange(startSpecifier, specifierLen)); 9036 } 9037 } 9038 9039 // Check for invalid use of field width 9040 if (!FS.hasValidFieldWidth()) { 9041 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 9042 startSpecifier, specifierLen); 9043 } 9044 9045 // Check for invalid use of precision 9046 if (!FS.hasValidPrecision()) { 9047 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 9048 startSpecifier, specifierLen); 9049 } 9050 9051 // Precision is mandatory for %P specifier. 9052 if (CS.getKind() == ConversionSpecifier::PArg && 9053 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 9054 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 9055 getLocationOfByte(startSpecifier), 9056 /*IsStringLocation*/ false, 9057 getSpecifierRange(startSpecifier, specifierLen)); 9058 } 9059 9060 // Check each flag does not conflict with any other component. 9061 if (!FS.hasValidThousandsGroupingPrefix()) 9062 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 9063 if (!FS.hasValidLeadingZeros()) 9064 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 9065 if (!FS.hasValidPlusPrefix()) 9066 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 9067 if (!FS.hasValidSpacePrefix()) 9068 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 9069 if (!FS.hasValidAlternativeForm()) 9070 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 9071 if (!FS.hasValidLeftJustified()) 9072 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 9073 9074 // Check that flags are not ignored by another flag 9075 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 9076 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 9077 startSpecifier, specifierLen); 9078 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 9079 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 9080 startSpecifier, specifierLen); 9081 9082 // Check the length modifier is valid with the given conversion specifier. 9083 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 9084 S.getLangOpts())) 9085 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9086 diag::warn_format_nonsensical_length); 9087 else if (!FS.hasStandardLengthModifier()) 9088 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 9089 else if (!FS.hasStandardLengthConversionCombination()) 9090 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9091 diag::warn_format_non_standard_conversion_spec); 9092 9093 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 9094 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 9095 9096 // The remaining checks depend on the data arguments. 9097 if (HasVAListArg) 9098 return true; 9099 9100 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 9101 return false; 9102 9103 const Expr *Arg = getDataArg(argIndex); 9104 if (!Arg) 9105 return true; 9106 9107 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 9108 } 9109 9110 static bool requiresParensToAddCast(const Expr *E) { 9111 // FIXME: We should have a general way to reason about operator 9112 // precedence and whether parens are actually needed here. 9113 // Take care of a few common cases where they aren't. 9114 const Expr *Inside = E->IgnoreImpCasts(); 9115 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 9116 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 9117 9118 switch (Inside->getStmtClass()) { 9119 case Stmt::ArraySubscriptExprClass: 9120 case Stmt::CallExprClass: 9121 case Stmt::CharacterLiteralClass: 9122 case Stmt::CXXBoolLiteralExprClass: 9123 case Stmt::DeclRefExprClass: 9124 case Stmt::FloatingLiteralClass: 9125 case Stmt::IntegerLiteralClass: 9126 case Stmt::MemberExprClass: 9127 case Stmt::ObjCArrayLiteralClass: 9128 case Stmt::ObjCBoolLiteralExprClass: 9129 case Stmt::ObjCBoxedExprClass: 9130 case Stmt::ObjCDictionaryLiteralClass: 9131 case Stmt::ObjCEncodeExprClass: 9132 case Stmt::ObjCIvarRefExprClass: 9133 case Stmt::ObjCMessageExprClass: 9134 case Stmt::ObjCPropertyRefExprClass: 9135 case Stmt::ObjCStringLiteralClass: 9136 case Stmt::ObjCSubscriptRefExprClass: 9137 case Stmt::ParenExprClass: 9138 case Stmt::StringLiteralClass: 9139 case Stmt::UnaryOperatorClass: 9140 return false; 9141 default: 9142 return true; 9143 } 9144 } 9145 9146 static std::pair<QualType, StringRef> 9147 shouldNotPrintDirectly(const ASTContext &Context, 9148 QualType IntendedTy, 9149 const Expr *E) { 9150 // Use a 'while' to peel off layers of typedefs. 9151 QualType TyTy = IntendedTy; 9152 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 9153 StringRef Name = UserTy->getDecl()->getName(); 9154 QualType CastTy = llvm::StringSwitch<QualType>(Name) 9155 .Case("CFIndex", Context.getNSIntegerType()) 9156 .Case("NSInteger", Context.getNSIntegerType()) 9157 .Case("NSUInteger", Context.getNSUIntegerType()) 9158 .Case("SInt32", Context.IntTy) 9159 .Case("UInt32", Context.UnsignedIntTy) 9160 .Default(QualType()); 9161 9162 if (!CastTy.isNull()) 9163 return std::make_pair(CastTy, Name); 9164 9165 TyTy = UserTy->desugar(); 9166 } 9167 9168 // Strip parens if necessary. 9169 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 9170 return shouldNotPrintDirectly(Context, 9171 PE->getSubExpr()->getType(), 9172 PE->getSubExpr()); 9173 9174 // If this is a conditional expression, then its result type is constructed 9175 // via usual arithmetic conversions and thus there might be no necessary 9176 // typedef sugar there. Recurse to operands to check for NSInteger & 9177 // Co. usage condition. 9178 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 9179 QualType TrueTy, FalseTy; 9180 StringRef TrueName, FalseName; 9181 9182 std::tie(TrueTy, TrueName) = 9183 shouldNotPrintDirectly(Context, 9184 CO->getTrueExpr()->getType(), 9185 CO->getTrueExpr()); 9186 std::tie(FalseTy, FalseName) = 9187 shouldNotPrintDirectly(Context, 9188 CO->getFalseExpr()->getType(), 9189 CO->getFalseExpr()); 9190 9191 if (TrueTy == FalseTy) 9192 return std::make_pair(TrueTy, TrueName); 9193 else if (TrueTy.isNull()) 9194 return std::make_pair(FalseTy, FalseName); 9195 else if (FalseTy.isNull()) 9196 return std::make_pair(TrueTy, TrueName); 9197 } 9198 9199 return std::make_pair(QualType(), StringRef()); 9200 } 9201 9202 /// Return true if \p ICE is an implicit argument promotion of an arithmetic 9203 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked 9204 /// type do not count. 9205 static bool 9206 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { 9207 QualType From = ICE->getSubExpr()->getType(); 9208 QualType To = ICE->getType(); 9209 // It's an integer promotion if the destination type is the promoted 9210 // source type. 9211 if (ICE->getCastKind() == CK_IntegralCast && 9212 From->isPromotableIntegerType() && 9213 S.Context.getPromotedIntegerType(From) == To) 9214 return true; 9215 // Look through vector types, since we do default argument promotion for 9216 // those in OpenCL. 9217 if (const auto *VecTy = From->getAs<ExtVectorType>()) 9218 From = VecTy->getElementType(); 9219 if (const auto *VecTy = To->getAs<ExtVectorType>()) 9220 To = VecTy->getElementType(); 9221 // It's a floating promotion if the source type is a lower rank. 9222 return ICE->getCastKind() == CK_FloatingCast && 9223 S.Context.getFloatingTypeOrder(From, To) < 0; 9224 } 9225 9226 bool 9227 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 9228 const char *StartSpecifier, 9229 unsigned SpecifierLen, 9230 const Expr *E) { 9231 using namespace analyze_format_string; 9232 using namespace analyze_printf; 9233 9234 // Now type check the data expression that matches the 9235 // format specifier. 9236 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 9237 if (!AT.isValid()) 9238 return true; 9239 9240 QualType ExprTy = E->getType(); 9241 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 9242 ExprTy = TET->getUnderlyingExpr()->getType(); 9243 } 9244 9245 // Diagnose attempts to print a boolean value as a character. Unlike other 9246 // -Wformat diagnostics, this is fine from a type perspective, but it still 9247 // doesn't make sense. 9248 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg && 9249 E->isKnownToHaveBooleanValue()) { 9250 const CharSourceRange &CSR = 9251 getSpecifierRange(StartSpecifier, SpecifierLen); 9252 SmallString<4> FSString; 9253 llvm::raw_svector_ostream os(FSString); 9254 FS.toString(os); 9255 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character) 9256 << FSString, 9257 E->getExprLoc(), false, CSR); 9258 return true; 9259 } 9260 9261 analyze_printf::ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy); 9262 if (Match == analyze_printf::ArgType::Match) 9263 return true; 9264 9265 // Look through argument promotions for our error message's reported type. 9266 // This includes the integral and floating promotions, but excludes array 9267 // and function pointer decay (seeing that an argument intended to be a 9268 // string has type 'char [6]' is probably more confusing than 'char *') and 9269 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type). 9270 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 9271 if (isArithmeticArgumentPromotion(S, ICE)) { 9272 E = ICE->getSubExpr(); 9273 ExprTy = E->getType(); 9274 9275 // Check if we didn't match because of an implicit cast from a 'char' 9276 // or 'short' to an 'int'. This is done because printf is a varargs 9277 // function. 9278 if (ICE->getType() == S.Context.IntTy || 9279 ICE->getType() == S.Context.UnsignedIntTy) { 9280 // All further checking is done on the subexpression 9281 const analyze_printf::ArgType::MatchKind ImplicitMatch = 9282 AT.matchesType(S.Context, ExprTy); 9283 if (ImplicitMatch == analyze_printf::ArgType::Match) 9284 return true; 9285 if (ImplicitMatch == ArgType::NoMatchPedantic || 9286 ImplicitMatch == ArgType::NoMatchTypeConfusion) 9287 Match = ImplicitMatch; 9288 } 9289 } 9290 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 9291 // Special case for 'a', which has type 'int' in C. 9292 // Note, however, that we do /not/ want to treat multibyte constants like 9293 // 'MooV' as characters! This form is deprecated but still exists. In 9294 // addition, don't treat expressions as of type 'char' if one byte length 9295 // modifier is provided. 9296 if (ExprTy == S.Context.IntTy && 9297 FS.getLengthModifier().getKind() != LengthModifier::AsChar) 9298 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) 9299 ExprTy = S.Context.CharTy; 9300 } 9301 9302 // Look through enums to their underlying type. 9303 bool IsEnum = false; 9304 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 9305 ExprTy = EnumTy->getDecl()->getIntegerType(); 9306 IsEnum = true; 9307 } 9308 9309 // %C in an Objective-C context prints a unichar, not a wchar_t. 9310 // If the argument is an integer of some kind, believe the %C and suggest 9311 // a cast instead of changing the conversion specifier. 9312 QualType IntendedTy = ExprTy; 9313 if (isObjCContext() && 9314 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 9315 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 9316 !ExprTy->isCharType()) { 9317 // 'unichar' is defined as a typedef of unsigned short, but we should 9318 // prefer using the typedef if it is visible. 9319 IntendedTy = S.Context.UnsignedShortTy; 9320 9321 // While we are here, check if the value is an IntegerLiteral that happens 9322 // to be within the valid range. 9323 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 9324 const llvm::APInt &V = IL->getValue(); 9325 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 9326 return true; 9327 } 9328 9329 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 9330 Sema::LookupOrdinaryName); 9331 if (S.LookupName(Result, S.getCurScope())) { 9332 NamedDecl *ND = Result.getFoundDecl(); 9333 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 9334 if (TD->getUnderlyingType() == IntendedTy) 9335 IntendedTy = S.Context.getTypedefType(TD); 9336 } 9337 } 9338 } 9339 9340 // Special-case some of Darwin's platform-independence types by suggesting 9341 // casts to primitive types that are known to be large enough. 9342 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 9343 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 9344 QualType CastTy; 9345 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 9346 if (!CastTy.isNull()) { 9347 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 9348 // (long in ASTContext). Only complain to pedants. 9349 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 9350 (AT.isSizeT() || AT.isPtrdiffT()) && 9351 AT.matchesType(S.Context, CastTy)) 9352 Match = ArgType::NoMatchPedantic; 9353 IntendedTy = CastTy; 9354 ShouldNotPrintDirectly = true; 9355 } 9356 } 9357 9358 // We may be able to offer a FixItHint if it is a supported type. 9359 PrintfSpecifier fixedFS = FS; 9360 bool Success = 9361 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 9362 9363 if (Success) { 9364 // Get the fix string from the fixed format specifier 9365 SmallString<16> buf; 9366 llvm::raw_svector_ostream os(buf); 9367 fixedFS.toString(os); 9368 9369 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 9370 9371 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) { 9372 unsigned Diag; 9373 switch (Match) { 9374 case ArgType::Match: llvm_unreachable("expected non-matching"); 9375 case ArgType::NoMatchPedantic: 9376 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 9377 break; 9378 case ArgType::NoMatchTypeConfusion: 9379 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 9380 break; 9381 case ArgType::NoMatch: 9382 Diag = diag::warn_format_conversion_argument_type_mismatch; 9383 break; 9384 } 9385 9386 // In this case, the specifier is wrong and should be changed to match 9387 // the argument. 9388 EmitFormatDiagnostic(S.PDiag(Diag) 9389 << AT.getRepresentativeTypeName(S.Context) 9390 << IntendedTy << IsEnum << E->getSourceRange(), 9391 E->getBeginLoc(), 9392 /*IsStringLocation*/ false, SpecRange, 9393 FixItHint::CreateReplacement(SpecRange, os.str())); 9394 } else { 9395 // The canonical type for formatting this value is different from the 9396 // actual type of the expression. (This occurs, for example, with Darwin's 9397 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 9398 // should be printed as 'long' for 64-bit compatibility.) 9399 // Rather than emitting a normal format/argument mismatch, we want to 9400 // add a cast to the recommended type (and correct the format string 9401 // if necessary). 9402 SmallString<16> CastBuf; 9403 llvm::raw_svector_ostream CastFix(CastBuf); 9404 CastFix << "("; 9405 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 9406 CastFix << ")"; 9407 9408 SmallVector<FixItHint,4> Hints; 9409 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly) 9410 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 9411 9412 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 9413 // If there's already a cast present, just replace it. 9414 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 9415 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 9416 9417 } else if (!requiresParensToAddCast(E)) { 9418 // If the expression has high enough precedence, 9419 // just write the C-style cast. 9420 Hints.push_back( 9421 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 9422 } else { 9423 // Otherwise, add parens around the expression as well as the cast. 9424 CastFix << "("; 9425 Hints.push_back( 9426 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 9427 9428 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc()); 9429 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 9430 } 9431 9432 if (ShouldNotPrintDirectly) { 9433 // The expression has a type that should not be printed directly. 9434 // We extract the name from the typedef because we don't want to show 9435 // the underlying type in the diagnostic. 9436 StringRef Name; 9437 if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy)) 9438 Name = TypedefTy->getDecl()->getName(); 9439 else 9440 Name = CastTyName; 9441 unsigned Diag = Match == ArgType::NoMatchPedantic 9442 ? diag::warn_format_argument_needs_cast_pedantic 9443 : diag::warn_format_argument_needs_cast; 9444 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 9445 << E->getSourceRange(), 9446 E->getBeginLoc(), /*IsStringLocation=*/false, 9447 SpecRange, Hints); 9448 } else { 9449 // In this case, the expression could be printed using a different 9450 // specifier, but we've decided that the specifier is probably correct 9451 // and we should cast instead. Just use the normal warning message. 9452 EmitFormatDiagnostic( 9453 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 9454 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 9455 << E->getSourceRange(), 9456 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 9457 } 9458 } 9459 } else { 9460 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 9461 SpecifierLen); 9462 // Since the warning for passing non-POD types to variadic functions 9463 // was deferred until now, we emit a warning for non-POD 9464 // arguments here. 9465 switch (S.isValidVarArgType(ExprTy)) { 9466 case Sema::VAK_Valid: 9467 case Sema::VAK_ValidInCXX11: { 9468 unsigned Diag; 9469 switch (Match) { 9470 case ArgType::Match: llvm_unreachable("expected non-matching"); 9471 case ArgType::NoMatchPedantic: 9472 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 9473 break; 9474 case ArgType::NoMatchTypeConfusion: 9475 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 9476 break; 9477 case ArgType::NoMatch: 9478 Diag = diag::warn_format_conversion_argument_type_mismatch; 9479 break; 9480 } 9481 9482 EmitFormatDiagnostic( 9483 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 9484 << IsEnum << CSR << E->getSourceRange(), 9485 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 9486 break; 9487 } 9488 case Sema::VAK_Undefined: 9489 case Sema::VAK_MSVCUndefined: 9490 EmitFormatDiagnostic(S.PDiag(diag::warn_non_pod_vararg_with_format_string) 9491 << S.getLangOpts().CPlusPlus11 << ExprTy 9492 << CallType 9493 << AT.getRepresentativeTypeName(S.Context) << CSR 9494 << E->getSourceRange(), 9495 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 9496 checkForCStrMembers(AT, E); 9497 break; 9498 9499 case Sema::VAK_Invalid: 9500 if (ExprTy->isObjCObjectType()) 9501 EmitFormatDiagnostic( 9502 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 9503 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 9504 << AT.getRepresentativeTypeName(S.Context) << CSR 9505 << E->getSourceRange(), 9506 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 9507 else 9508 // FIXME: If this is an initializer list, suggest removing the braces 9509 // or inserting a cast to the target type. 9510 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 9511 << isa<InitListExpr>(E) << ExprTy << CallType 9512 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 9513 break; 9514 } 9515 9516 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 9517 "format string specifier index out of range"); 9518 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 9519 } 9520 9521 return true; 9522 } 9523 9524 //===--- CHECK: Scanf format string checking ------------------------------===// 9525 9526 namespace { 9527 9528 class CheckScanfHandler : public CheckFormatHandler { 9529 public: 9530 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 9531 const Expr *origFormatExpr, Sema::FormatStringType type, 9532 unsigned firstDataArg, unsigned numDataArgs, 9533 const char *beg, bool hasVAListArg, 9534 ArrayRef<const Expr *> Args, unsigned formatIdx, 9535 bool inFunctionCall, Sema::VariadicCallType CallType, 9536 llvm::SmallBitVector &CheckedVarArgs, 9537 UncoveredArgHandler &UncoveredArg) 9538 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 9539 numDataArgs, beg, hasVAListArg, Args, formatIdx, 9540 inFunctionCall, CallType, CheckedVarArgs, 9541 UncoveredArg) {} 9542 9543 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 9544 const char *startSpecifier, 9545 unsigned specifierLen) override; 9546 9547 bool HandleInvalidScanfConversionSpecifier( 9548 const analyze_scanf::ScanfSpecifier &FS, 9549 const char *startSpecifier, 9550 unsigned specifierLen) override; 9551 9552 void HandleIncompleteScanList(const char *start, const char *end) override; 9553 }; 9554 9555 } // namespace 9556 9557 void CheckScanfHandler::HandleIncompleteScanList(const char *start, 9558 const char *end) { 9559 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 9560 getLocationOfByte(end), /*IsStringLocation*/true, 9561 getSpecifierRange(start, end - start)); 9562 } 9563 9564 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 9565 const analyze_scanf::ScanfSpecifier &FS, 9566 const char *startSpecifier, 9567 unsigned specifierLen) { 9568 const analyze_scanf::ScanfConversionSpecifier &CS = 9569 FS.getConversionSpecifier(); 9570 9571 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 9572 getLocationOfByte(CS.getStart()), 9573 startSpecifier, specifierLen, 9574 CS.getStart(), CS.getLength()); 9575 } 9576 9577 bool CheckScanfHandler::HandleScanfSpecifier( 9578 const analyze_scanf::ScanfSpecifier &FS, 9579 const char *startSpecifier, 9580 unsigned specifierLen) { 9581 using namespace analyze_scanf; 9582 using namespace analyze_format_string; 9583 9584 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 9585 9586 // Handle case where '%' and '*' don't consume an argument. These shouldn't 9587 // be used to decide if we are using positional arguments consistently. 9588 if (FS.consumesDataArgument()) { 9589 if (atFirstArg) { 9590 atFirstArg = false; 9591 usesPositionalArgs = FS.usesPositionalArg(); 9592 } 9593 else if (usesPositionalArgs != FS.usesPositionalArg()) { 9594 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 9595 startSpecifier, specifierLen); 9596 return false; 9597 } 9598 } 9599 9600 // Check if the field with is non-zero. 9601 const OptionalAmount &Amt = FS.getFieldWidth(); 9602 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 9603 if (Amt.getConstantAmount() == 0) { 9604 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 9605 Amt.getConstantLength()); 9606 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 9607 getLocationOfByte(Amt.getStart()), 9608 /*IsStringLocation*/true, R, 9609 FixItHint::CreateRemoval(R)); 9610 } 9611 } 9612 9613 if (!FS.consumesDataArgument()) { 9614 // FIXME: Technically specifying a precision or field width here 9615 // makes no sense. Worth issuing a warning at some point. 9616 return true; 9617 } 9618 9619 // Consume the argument. 9620 unsigned argIndex = FS.getArgIndex(); 9621 if (argIndex < NumDataArgs) { 9622 // The check to see if the argIndex is valid will come later. 9623 // We set the bit here because we may exit early from this 9624 // function if we encounter some other error. 9625 CoveredArgs.set(argIndex); 9626 } 9627 9628 // Check the length modifier is valid with the given conversion specifier. 9629 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 9630 S.getLangOpts())) 9631 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9632 diag::warn_format_nonsensical_length); 9633 else if (!FS.hasStandardLengthModifier()) 9634 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 9635 else if (!FS.hasStandardLengthConversionCombination()) 9636 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9637 diag::warn_format_non_standard_conversion_spec); 9638 9639 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 9640 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 9641 9642 // The remaining checks depend on the data arguments. 9643 if (HasVAListArg) 9644 return true; 9645 9646 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 9647 return false; 9648 9649 // Check that the argument type matches the format specifier. 9650 const Expr *Ex = getDataArg(argIndex); 9651 if (!Ex) 9652 return true; 9653 9654 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 9655 9656 if (!AT.isValid()) { 9657 return true; 9658 } 9659 9660 analyze_format_string::ArgType::MatchKind Match = 9661 AT.matchesType(S.Context, Ex->getType()); 9662 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 9663 if (Match == analyze_format_string::ArgType::Match) 9664 return true; 9665 9666 ScanfSpecifier fixedFS = FS; 9667 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 9668 S.getLangOpts(), S.Context); 9669 9670 unsigned Diag = 9671 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 9672 : diag::warn_format_conversion_argument_type_mismatch; 9673 9674 if (Success) { 9675 // Get the fix string from the fixed format specifier. 9676 SmallString<128> buf; 9677 llvm::raw_svector_ostream os(buf); 9678 fixedFS.toString(os); 9679 9680 EmitFormatDiagnostic( 9681 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 9682 << Ex->getType() << false << Ex->getSourceRange(), 9683 Ex->getBeginLoc(), 9684 /*IsStringLocation*/ false, 9685 getSpecifierRange(startSpecifier, specifierLen), 9686 FixItHint::CreateReplacement( 9687 getSpecifierRange(startSpecifier, specifierLen), os.str())); 9688 } else { 9689 EmitFormatDiagnostic(S.PDiag(Diag) 9690 << AT.getRepresentativeTypeName(S.Context) 9691 << Ex->getType() << false << Ex->getSourceRange(), 9692 Ex->getBeginLoc(), 9693 /*IsStringLocation*/ false, 9694 getSpecifierRange(startSpecifier, specifierLen)); 9695 } 9696 9697 return true; 9698 } 9699 9700 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 9701 const Expr *OrigFormatExpr, 9702 ArrayRef<const Expr *> Args, 9703 bool HasVAListArg, unsigned format_idx, 9704 unsigned firstDataArg, 9705 Sema::FormatStringType Type, 9706 bool inFunctionCall, 9707 Sema::VariadicCallType CallType, 9708 llvm::SmallBitVector &CheckedVarArgs, 9709 UncoveredArgHandler &UncoveredArg, 9710 bool IgnoreStringsWithoutSpecifiers) { 9711 // CHECK: is the format string a wide literal? 9712 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 9713 CheckFormatHandler::EmitFormatDiagnostic( 9714 S, inFunctionCall, Args[format_idx], 9715 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 9716 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 9717 return; 9718 } 9719 9720 // Str - The format string. NOTE: this is NOT null-terminated! 9721 StringRef StrRef = FExpr->getString(); 9722 const char *Str = StrRef.data(); 9723 // Account for cases where the string literal is truncated in a declaration. 9724 const ConstantArrayType *T = 9725 S.Context.getAsConstantArrayType(FExpr->getType()); 9726 assert(T && "String literal not of constant array type!"); 9727 size_t TypeSize = T->getSize().getZExtValue(); 9728 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 9729 const unsigned numDataArgs = Args.size() - firstDataArg; 9730 9731 if (IgnoreStringsWithoutSpecifiers && 9732 !analyze_format_string::parseFormatStringHasFormattingSpecifiers( 9733 Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 9734 return; 9735 9736 // Emit a warning if the string literal is truncated and does not contain an 9737 // embedded null character. 9738 if (TypeSize <= StrRef.size() && 9739 StrRef.substr(0, TypeSize).find('\0') == StringRef::npos) { 9740 CheckFormatHandler::EmitFormatDiagnostic( 9741 S, inFunctionCall, Args[format_idx], 9742 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 9743 FExpr->getBeginLoc(), 9744 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 9745 return; 9746 } 9747 9748 // CHECK: empty format string? 9749 if (StrLen == 0 && numDataArgs > 0) { 9750 CheckFormatHandler::EmitFormatDiagnostic( 9751 S, inFunctionCall, Args[format_idx], 9752 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 9753 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 9754 return; 9755 } 9756 9757 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 9758 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog || 9759 Type == Sema::FST_OSTrace) { 9760 CheckPrintfHandler H( 9761 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 9762 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, 9763 HasVAListArg, Args, format_idx, inFunctionCall, CallType, 9764 CheckedVarArgs, UncoveredArg); 9765 9766 if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen, 9767 S.getLangOpts(), 9768 S.Context.getTargetInfo(), 9769 Type == Sema::FST_FreeBSDKPrintf)) 9770 H.DoneProcessing(); 9771 } else if (Type == Sema::FST_Scanf) { 9772 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 9773 numDataArgs, Str, HasVAListArg, Args, format_idx, 9774 inFunctionCall, CallType, CheckedVarArgs, UncoveredArg); 9775 9776 if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen, 9777 S.getLangOpts(), 9778 S.Context.getTargetInfo())) 9779 H.DoneProcessing(); 9780 } // TODO: handle other formats 9781 } 9782 9783 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 9784 // Str - The format string. NOTE: this is NOT null-terminated! 9785 StringRef StrRef = FExpr->getString(); 9786 const char *Str = StrRef.data(); 9787 // Account for cases where the string literal is truncated in a declaration. 9788 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 9789 assert(T && "String literal not of constant array type!"); 9790 size_t TypeSize = T->getSize().getZExtValue(); 9791 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 9792 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 9793 getLangOpts(), 9794 Context.getTargetInfo()); 9795 } 9796 9797 //===--- CHECK: Warn on use of wrong absolute value function. -------------===// 9798 9799 // Returns the related absolute value function that is larger, of 0 if one 9800 // does not exist. 9801 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 9802 switch (AbsFunction) { 9803 default: 9804 return 0; 9805 9806 case Builtin::BI__builtin_abs: 9807 return Builtin::BI__builtin_labs; 9808 case Builtin::BI__builtin_labs: 9809 return Builtin::BI__builtin_llabs; 9810 case Builtin::BI__builtin_llabs: 9811 return 0; 9812 9813 case Builtin::BI__builtin_fabsf: 9814 return Builtin::BI__builtin_fabs; 9815 case Builtin::BI__builtin_fabs: 9816 return Builtin::BI__builtin_fabsl; 9817 case Builtin::BI__builtin_fabsl: 9818 return 0; 9819 9820 case Builtin::BI__builtin_cabsf: 9821 return Builtin::BI__builtin_cabs; 9822 case Builtin::BI__builtin_cabs: 9823 return Builtin::BI__builtin_cabsl; 9824 case Builtin::BI__builtin_cabsl: 9825 return 0; 9826 9827 case Builtin::BIabs: 9828 return Builtin::BIlabs; 9829 case Builtin::BIlabs: 9830 return Builtin::BIllabs; 9831 case Builtin::BIllabs: 9832 return 0; 9833 9834 case Builtin::BIfabsf: 9835 return Builtin::BIfabs; 9836 case Builtin::BIfabs: 9837 return Builtin::BIfabsl; 9838 case Builtin::BIfabsl: 9839 return 0; 9840 9841 case Builtin::BIcabsf: 9842 return Builtin::BIcabs; 9843 case Builtin::BIcabs: 9844 return Builtin::BIcabsl; 9845 case Builtin::BIcabsl: 9846 return 0; 9847 } 9848 } 9849 9850 // Returns the argument type of the absolute value function. 9851 static QualType getAbsoluteValueArgumentType(ASTContext &Context, 9852 unsigned AbsType) { 9853 if (AbsType == 0) 9854 return QualType(); 9855 9856 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 9857 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 9858 if (Error != ASTContext::GE_None) 9859 return QualType(); 9860 9861 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 9862 if (!FT) 9863 return QualType(); 9864 9865 if (FT->getNumParams() != 1) 9866 return QualType(); 9867 9868 return FT->getParamType(0); 9869 } 9870 9871 // Returns the best absolute value function, or zero, based on type and 9872 // current absolute value function. 9873 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 9874 unsigned AbsFunctionKind) { 9875 unsigned BestKind = 0; 9876 uint64_t ArgSize = Context.getTypeSize(ArgType); 9877 for (unsigned Kind = AbsFunctionKind; Kind != 0; 9878 Kind = getLargerAbsoluteValueFunction(Kind)) { 9879 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 9880 if (Context.getTypeSize(ParamType) >= ArgSize) { 9881 if (BestKind == 0) 9882 BestKind = Kind; 9883 else if (Context.hasSameType(ParamType, ArgType)) { 9884 BestKind = Kind; 9885 break; 9886 } 9887 } 9888 } 9889 return BestKind; 9890 } 9891 9892 enum AbsoluteValueKind { 9893 AVK_Integer, 9894 AVK_Floating, 9895 AVK_Complex 9896 }; 9897 9898 static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 9899 if (T->isIntegralOrEnumerationType()) 9900 return AVK_Integer; 9901 if (T->isRealFloatingType()) 9902 return AVK_Floating; 9903 if (T->isAnyComplexType()) 9904 return AVK_Complex; 9905 9906 llvm_unreachable("Type not integer, floating, or complex"); 9907 } 9908 9909 // Changes the absolute value function to a different type. Preserves whether 9910 // the function is a builtin. 9911 static unsigned changeAbsFunction(unsigned AbsKind, 9912 AbsoluteValueKind ValueKind) { 9913 switch (ValueKind) { 9914 case AVK_Integer: 9915 switch (AbsKind) { 9916 default: 9917 return 0; 9918 case Builtin::BI__builtin_fabsf: 9919 case Builtin::BI__builtin_fabs: 9920 case Builtin::BI__builtin_fabsl: 9921 case Builtin::BI__builtin_cabsf: 9922 case Builtin::BI__builtin_cabs: 9923 case Builtin::BI__builtin_cabsl: 9924 return Builtin::BI__builtin_abs; 9925 case Builtin::BIfabsf: 9926 case Builtin::BIfabs: 9927 case Builtin::BIfabsl: 9928 case Builtin::BIcabsf: 9929 case Builtin::BIcabs: 9930 case Builtin::BIcabsl: 9931 return Builtin::BIabs; 9932 } 9933 case AVK_Floating: 9934 switch (AbsKind) { 9935 default: 9936 return 0; 9937 case Builtin::BI__builtin_abs: 9938 case Builtin::BI__builtin_labs: 9939 case Builtin::BI__builtin_llabs: 9940 case Builtin::BI__builtin_cabsf: 9941 case Builtin::BI__builtin_cabs: 9942 case Builtin::BI__builtin_cabsl: 9943 return Builtin::BI__builtin_fabsf; 9944 case Builtin::BIabs: 9945 case Builtin::BIlabs: 9946 case Builtin::BIllabs: 9947 case Builtin::BIcabsf: 9948 case Builtin::BIcabs: 9949 case Builtin::BIcabsl: 9950 return Builtin::BIfabsf; 9951 } 9952 case AVK_Complex: 9953 switch (AbsKind) { 9954 default: 9955 return 0; 9956 case Builtin::BI__builtin_abs: 9957 case Builtin::BI__builtin_labs: 9958 case Builtin::BI__builtin_llabs: 9959 case Builtin::BI__builtin_fabsf: 9960 case Builtin::BI__builtin_fabs: 9961 case Builtin::BI__builtin_fabsl: 9962 return Builtin::BI__builtin_cabsf; 9963 case Builtin::BIabs: 9964 case Builtin::BIlabs: 9965 case Builtin::BIllabs: 9966 case Builtin::BIfabsf: 9967 case Builtin::BIfabs: 9968 case Builtin::BIfabsl: 9969 return Builtin::BIcabsf; 9970 } 9971 } 9972 llvm_unreachable("Unable to convert function"); 9973 } 9974 9975 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 9976 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 9977 if (!FnInfo) 9978 return 0; 9979 9980 switch (FDecl->getBuiltinID()) { 9981 default: 9982 return 0; 9983 case Builtin::BI__builtin_abs: 9984 case Builtin::BI__builtin_fabs: 9985 case Builtin::BI__builtin_fabsf: 9986 case Builtin::BI__builtin_fabsl: 9987 case Builtin::BI__builtin_labs: 9988 case Builtin::BI__builtin_llabs: 9989 case Builtin::BI__builtin_cabs: 9990 case Builtin::BI__builtin_cabsf: 9991 case Builtin::BI__builtin_cabsl: 9992 case Builtin::BIabs: 9993 case Builtin::BIlabs: 9994 case Builtin::BIllabs: 9995 case Builtin::BIfabs: 9996 case Builtin::BIfabsf: 9997 case Builtin::BIfabsl: 9998 case Builtin::BIcabs: 9999 case Builtin::BIcabsf: 10000 case Builtin::BIcabsl: 10001 return FDecl->getBuiltinID(); 10002 } 10003 llvm_unreachable("Unknown Builtin type"); 10004 } 10005 10006 // If the replacement is valid, emit a note with replacement function. 10007 // Additionally, suggest including the proper header if not already included. 10008 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 10009 unsigned AbsKind, QualType ArgType) { 10010 bool EmitHeaderHint = true; 10011 const char *HeaderName = nullptr; 10012 const char *FunctionName = nullptr; 10013 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 10014 FunctionName = "std::abs"; 10015 if (ArgType->isIntegralOrEnumerationType()) { 10016 HeaderName = "cstdlib"; 10017 } else if (ArgType->isRealFloatingType()) { 10018 HeaderName = "cmath"; 10019 } else { 10020 llvm_unreachable("Invalid Type"); 10021 } 10022 10023 // Lookup all std::abs 10024 if (NamespaceDecl *Std = S.getStdNamespace()) { 10025 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 10026 R.suppressDiagnostics(); 10027 S.LookupQualifiedName(R, Std); 10028 10029 for (const auto *I : R) { 10030 const FunctionDecl *FDecl = nullptr; 10031 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 10032 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 10033 } else { 10034 FDecl = dyn_cast<FunctionDecl>(I); 10035 } 10036 if (!FDecl) 10037 continue; 10038 10039 // Found std::abs(), check that they are the right ones. 10040 if (FDecl->getNumParams() != 1) 10041 continue; 10042 10043 // Check that the parameter type can handle the argument. 10044 QualType ParamType = FDecl->getParamDecl(0)->getType(); 10045 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 10046 S.Context.getTypeSize(ArgType) <= 10047 S.Context.getTypeSize(ParamType)) { 10048 // Found a function, don't need the header hint. 10049 EmitHeaderHint = false; 10050 break; 10051 } 10052 } 10053 } 10054 } else { 10055 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 10056 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 10057 10058 if (HeaderName) { 10059 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 10060 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 10061 R.suppressDiagnostics(); 10062 S.LookupName(R, S.getCurScope()); 10063 10064 if (R.isSingleResult()) { 10065 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 10066 if (FD && FD->getBuiltinID() == AbsKind) { 10067 EmitHeaderHint = false; 10068 } else { 10069 return; 10070 } 10071 } else if (!R.empty()) { 10072 return; 10073 } 10074 } 10075 } 10076 10077 S.Diag(Loc, diag::note_replace_abs_function) 10078 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 10079 10080 if (!HeaderName) 10081 return; 10082 10083 if (!EmitHeaderHint) 10084 return; 10085 10086 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 10087 << FunctionName; 10088 } 10089 10090 template <std::size_t StrLen> 10091 static bool IsStdFunction(const FunctionDecl *FDecl, 10092 const char (&Str)[StrLen]) { 10093 if (!FDecl) 10094 return false; 10095 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 10096 return false; 10097 if (!FDecl->isInStdNamespace()) 10098 return false; 10099 10100 return true; 10101 } 10102 10103 // Warn when using the wrong abs() function. 10104 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 10105 const FunctionDecl *FDecl) { 10106 if (Call->getNumArgs() != 1) 10107 return; 10108 10109 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 10110 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 10111 if (AbsKind == 0 && !IsStdAbs) 10112 return; 10113 10114 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 10115 QualType ParamType = Call->getArg(0)->getType(); 10116 10117 // Unsigned types cannot be negative. Suggest removing the absolute value 10118 // function call. 10119 if (ArgType->isUnsignedIntegerType()) { 10120 const char *FunctionName = 10121 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 10122 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 10123 Diag(Call->getExprLoc(), diag::note_remove_abs) 10124 << FunctionName 10125 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 10126 return; 10127 } 10128 10129 // Taking the absolute value of a pointer is very suspicious, they probably 10130 // wanted to index into an array, dereference a pointer, call a function, etc. 10131 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 10132 unsigned DiagType = 0; 10133 if (ArgType->isFunctionType()) 10134 DiagType = 1; 10135 else if (ArgType->isArrayType()) 10136 DiagType = 2; 10137 10138 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 10139 return; 10140 } 10141 10142 // std::abs has overloads which prevent most of the absolute value problems 10143 // from occurring. 10144 if (IsStdAbs) 10145 return; 10146 10147 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 10148 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 10149 10150 // The argument and parameter are the same kind. Check if they are the right 10151 // size. 10152 if (ArgValueKind == ParamValueKind) { 10153 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 10154 return; 10155 10156 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 10157 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 10158 << FDecl << ArgType << ParamType; 10159 10160 if (NewAbsKind == 0) 10161 return; 10162 10163 emitReplacement(*this, Call->getExprLoc(), 10164 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 10165 return; 10166 } 10167 10168 // ArgValueKind != ParamValueKind 10169 // The wrong type of absolute value function was used. Attempt to find the 10170 // proper one. 10171 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 10172 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 10173 if (NewAbsKind == 0) 10174 return; 10175 10176 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 10177 << FDecl << ParamValueKind << ArgValueKind; 10178 10179 emitReplacement(*this, Call->getExprLoc(), 10180 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 10181 } 10182 10183 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 10184 void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 10185 const FunctionDecl *FDecl) { 10186 if (!Call || !FDecl) return; 10187 10188 // Ignore template specializations and macros. 10189 if (inTemplateInstantiation()) return; 10190 if (Call->getExprLoc().isMacroID()) return; 10191 10192 // Only care about the one template argument, two function parameter std::max 10193 if (Call->getNumArgs() != 2) return; 10194 if (!IsStdFunction(FDecl, "max")) return; 10195 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 10196 if (!ArgList) return; 10197 if (ArgList->size() != 1) return; 10198 10199 // Check that template type argument is unsigned integer. 10200 const auto& TA = ArgList->get(0); 10201 if (TA.getKind() != TemplateArgument::Type) return; 10202 QualType ArgType = TA.getAsType(); 10203 if (!ArgType->isUnsignedIntegerType()) return; 10204 10205 // See if either argument is a literal zero. 10206 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 10207 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 10208 if (!MTE) return false; 10209 const auto *Num = dyn_cast<IntegerLiteral>(MTE->getSubExpr()); 10210 if (!Num) return false; 10211 if (Num->getValue() != 0) return false; 10212 return true; 10213 }; 10214 10215 const Expr *FirstArg = Call->getArg(0); 10216 const Expr *SecondArg = Call->getArg(1); 10217 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 10218 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 10219 10220 // Only warn when exactly one argument is zero. 10221 if (IsFirstArgZero == IsSecondArgZero) return; 10222 10223 SourceRange FirstRange = FirstArg->getSourceRange(); 10224 SourceRange SecondRange = SecondArg->getSourceRange(); 10225 10226 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 10227 10228 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 10229 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 10230 10231 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 10232 SourceRange RemovalRange; 10233 if (IsFirstArgZero) { 10234 RemovalRange = SourceRange(FirstRange.getBegin(), 10235 SecondRange.getBegin().getLocWithOffset(-1)); 10236 } else { 10237 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 10238 SecondRange.getEnd()); 10239 } 10240 10241 Diag(Call->getExprLoc(), diag::note_remove_max_call) 10242 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 10243 << FixItHint::CreateRemoval(RemovalRange); 10244 } 10245 10246 //===--- CHECK: Standard memory functions ---------------------------------===// 10247 10248 /// Takes the expression passed to the size_t parameter of functions 10249 /// such as memcmp, strncat, etc and warns if it's a comparison. 10250 /// 10251 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 10252 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 10253 IdentifierInfo *FnName, 10254 SourceLocation FnLoc, 10255 SourceLocation RParenLoc) { 10256 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 10257 if (!Size) 10258 return false; 10259 10260 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 10261 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 10262 return false; 10263 10264 SourceRange SizeRange = Size->getSourceRange(); 10265 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 10266 << SizeRange << FnName; 10267 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 10268 << FnName 10269 << FixItHint::CreateInsertion( 10270 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 10271 << FixItHint::CreateRemoval(RParenLoc); 10272 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 10273 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 10274 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 10275 ")"); 10276 10277 return true; 10278 } 10279 10280 /// Determine whether the given type is or contains a dynamic class type 10281 /// (e.g., whether it has a vtable). 10282 static const CXXRecordDecl *getContainedDynamicClass(QualType T, 10283 bool &IsContained) { 10284 // Look through array types while ignoring qualifiers. 10285 const Type *Ty = T->getBaseElementTypeUnsafe(); 10286 IsContained = false; 10287 10288 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 10289 RD = RD ? RD->getDefinition() : nullptr; 10290 if (!RD || RD->isInvalidDecl()) 10291 return nullptr; 10292 10293 if (RD->isDynamicClass()) 10294 return RD; 10295 10296 // Check all the fields. If any bases were dynamic, the class is dynamic. 10297 // It's impossible for a class to transitively contain itself by value, so 10298 // infinite recursion is impossible. 10299 for (auto *FD : RD->fields()) { 10300 bool SubContained; 10301 if (const CXXRecordDecl *ContainedRD = 10302 getContainedDynamicClass(FD->getType(), SubContained)) { 10303 IsContained = true; 10304 return ContainedRD; 10305 } 10306 } 10307 10308 return nullptr; 10309 } 10310 10311 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 10312 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 10313 if (Unary->getKind() == UETT_SizeOf) 10314 return Unary; 10315 return nullptr; 10316 } 10317 10318 /// If E is a sizeof expression, returns its argument expression, 10319 /// otherwise returns NULL. 10320 static const Expr *getSizeOfExprArg(const Expr *E) { 10321 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 10322 if (!SizeOf->isArgumentType()) 10323 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 10324 return nullptr; 10325 } 10326 10327 /// If E is a sizeof expression, returns its argument type. 10328 static QualType getSizeOfArgType(const Expr *E) { 10329 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 10330 return SizeOf->getTypeOfArgument(); 10331 return QualType(); 10332 } 10333 10334 namespace { 10335 10336 struct SearchNonTrivialToInitializeField 10337 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 10338 using Super = 10339 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 10340 10341 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 10342 10343 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 10344 SourceLocation SL) { 10345 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 10346 asDerived().visitArray(PDIK, AT, SL); 10347 return; 10348 } 10349 10350 Super::visitWithKind(PDIK, FT, SL); 10351 } 10352 10353 void visitARCStrong(QualType FT, SourceLocation SL) { 10354 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 10355 } 10356 void visitARCWeak(QualType FT, SourceLocation SL) { 10357 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 10358 } 10359 void visitStruct(QualType FT, SourceLocation SL) { 10360 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 10361 visit(FD->getType(), FD->getLocation()); 10362 } 10363 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 10364 const ArrayType *AT, SourceLocation SL) { 10365 visit(getContext().getBaseElementType(AT), SL); 10366 } 10367 void visitTrivial(QualType FT, SourceLocation SL) {} 10368 10369 static void diag(QualType RT, const Expr *E, Sema &S) { 10370 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 10371 } 10372 10373 ASTContext &getContext() { return S.getASTContext(); } 10374 10375 const Expr *E; 10376 Sema &S; 10377 }; 10378 10379 struct SearchNonTrivialToCopyField 10380 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 10381 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 10382 10383 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 10384 10385 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 10386 SourceLocation SL) { 10387 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 10388 asDerived().visitArray(PCK, AT, SL); 10389 return; 10390 } 10391 10392 Super::visitWithKind(PCK, FT, SL); 10393 } 10394 10395 void visitARCStrong(QualType FT, SourceLocation SL) { 10396 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 10397 } 10398 void visitARCWeak(QualType FT, SourceLocation SL) { 10399 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 10400 } 10401 void visitStruct(QualType FT, SourceLocation SL) { 10402 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 10403 visit(FD->getType(), FD->getLocation()); 10404 } 10405 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 10406 SourceLocation SL) { 10407 visit(getContext().getBaseElementType(AT), SL); 10408 } 10409 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 10410 SourceLocation SL) {} 10411 void visitTrivial(QualType FT, SourceLocation SL) {} 10412 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 10413 10414 static void diag(QualType RT, const Expr *E, Sema &S) { 10415 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 10416 } 10417 10418 ASTContext &getContext() { return S.getASTContext(); } 10419 10420 const Expr *E; 10421 Sema &S; 10422 }; 10423 10424 } 10425 10426 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 10427 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 10428 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 10429 10430 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 10431 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 10432 return false; 10433 10434 return doesExprLikelyComputeSize(BO->getLHS()) || 10435 doesExprLikelyComputeSize(BO->getRHS()); 10436 } 10437 10438 return getAsSizeOfExpr(SizeofExpr) != nullptr; 10439 } 10440 10441 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 10442 /// 10443 /// \code 10444 /// #define MACRO 0 10445 /// foo(MACRO); 10446 /// foo(0); 10447 /// \endcode 10448 /// 10449 /// This should return true for the first call to foo, but not for the second 10450 /// (regardless of whether foo is a macro or function). 10451 static bool isArgumentExpandedFromMacro(SourceManager &SM, 10452 SourceLocation CallLoc, 10453 SourceLocation ArgLoc) { 10454 if (!CallLoc.isMacroID()) 10455 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 10456 10457 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 10458 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 10459 } 10460 10461 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 10462 /// last two arguments transposed. 10463 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 10464 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 10465 return; 10466 10467 const Expr *SizeArg = 10468 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 10469 10470 auto isLiteralZero = [](const Expr *E) { 10471 return isa<IntegerLiteral>(E) && cast<IntegerLiteral>(E)->getValue() == 0; 10472 }; 10473 10474 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 10475 SourceLocation CallLoc = Call->getRParenLoc(); 10476 SourceManager &SM = S.getSourceManager(); 10477 if (isLiteralZero(SizeArg) && 10478 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 10479 10480 SourceLocation DiagLoc = SizeArg->getExprLoc(); 10481 10482 // Some platforms #define bzero to __builtin_memset. See if this is the 10483 // case, and if so, emit a better diagnostic. 10484 if (BId == Builtin::BIbzero || 10485 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 10486 CallLoc, SM, S.getLangOpts()) == "bzero")) { 10487 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 10488 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 10489 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 10490 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 10491 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 10492 } 10493 return; 10494 } 10495 10496 // If the second argument to a memset is a sizeof expression and the third 10497 // isn't, this is also likely an error. This should catch 10498 // 'memset(buf, sizeof(buf), 0xff)'. 10499 if (BId == Builtin::BImemset && 10500 doesExprLikelyComputeSize(Call->getArg(1)) && 10501 !doesExprLikelyComputeSize(Call->getArg(2))) { 10502 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 10503 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 10504 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 10505 return; 10506 } 10507 } 10508 10509 /// Check for dangerous or invalid arguments to memset(). 10510 /// 10511 /// This issues warnings on known problematic, dangerous or unspecified 10512 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 10513 /// function calls. 10514 /// 10515 /// \param Call The call expression to diagnose. 10516 void Sema::CheckMemaccessArguments(const CallExpr *Call, 10517 unsigned BId, 10518 IdentifierInfo *FnName) { 10519 assert(BId != 0); 10520 10521 // It is possible to have a non-standard definition of memset. Validate 10522 // we have enough arguments, and if not, abort further checking. 10523 unsigned ExpectedNumArgs = 10524 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 10525 if (Call->getNumArgs() < ExpectedNumArgs) 10526 return; 10527 10528 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 10529 BId == Builtin::BIstrndup ? 1 : 2); 10530 unsigned LenArg = 10531 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 10532 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 10533 10534 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 10535 Call->getBeginLoc(), Call->getRParenLoc())) 10536 return; 10537 10538 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 10539 CheckMemaccessSize(*this, BId, Call); 10540 10541 // We have special checking when the length is a sizeof expression. 10542 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 10543 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 10544 llvm::FoldingSetNodeID SizeOfArgID; 10545 10546 // Although widely used, 'bzero' is not a standard function. Be more strict 10547 // with the argument types before allowing diagnostics and only allow the 10548 // form bzero(ptr, sizeof(...)). 10549 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 10550 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 10551 return; 10552 10553 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 10554 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 10555 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 10556 10557 QualType DestTy = Dest->getType(); 10558 QualType PointeeTy; 10559 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 10560 PointeeTy = DestPtrTy->getPointeeType(); 10561 10562 // Never warn about void type pointers. This can be used to suppress 10563 // false positives. 10564 if (PointeeTy->isVoidType()) 10565 continue; 10566 10567 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 10568 // actually comparing the expressions for equality. Because computing the 10569 // expression IDs can be expensive, we only do this if the diagnostic is 10570 // enabled. 10571 if (SizeOfArg && 10572 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 10573 SizeOfArg->getExprLoc())) { 10574 // We only compute IDs for expressions if the warning is enabled, and 10575 // cache the sizeof arg's ID. 10576 if (SizeOfArgID == llvm::FoldingSetNodeID()) 10577 SizeOfArg->Profile(SizeOfArgID, Context, true); 10578 llvm::FoldingSetNodeID DestID; 10579 Dest->Profile(DestID, Context, true); 10580 if (DestID == SizeOfArgID) { 10581 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 10582 // over sizeof(src) as well. 10583 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 10584 StringRef ReadableName = FnName->getName(); 10585 10586 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 10587 if (UnaryOp->getOpcode() == UO_AddrOf) 10588 ActionIdx = 1; // If its an address-of operator, just remove it. 10589 if (!PointeeTy->isIncompleteType() && 10590 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 10591 ActionIdx = 2; // If the pointee's size is sizeof(char), 10592 // suggest an explicit length. 10593 10594 // If the function is defined as a builtin macro, do not show macro 10595 // expansion. 10596 SourceLocation SL = SizeOfArg->getExprLoc(); 10597 SourceRange DSR = Dest->getSourceRange(); 10598 SourceRange SSR = SizeOfArg->getSourceRange(); 10599 SourceManager &SM = getSourceManager(); 10600 10601 if (SM.isMacroArgExpansion(SL)) { 10602 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 10603 SL = SM.getSpellingLoc(SL); 10604 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 10605 SM.getSpellingLoc(DSR.getEnd())); 10606 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 10607 SM.getSpellingLoc(SSR.getEnd())); 10608 } 10609 10610 DiagRuntimeBehavior(SL, SizeOfArg, 10611 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 10612 << ReadableName 10613 << PointeeTy 10614 << DestTy 10615 << DSR 10616 << SSR); 10617 DiagRuntimeBehavior(SL, SizeOfArg, 10618 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 10619 << ActionIdx 10620 << SSR); 10621 10622 break; 10623 } 10624 } 10625 10626 // Also check for cases where the sizeof argument is the exact same 10627 // type as the memory argument, and where it points to a user-defined 10628 // record type. 10629 if (SizeOfArgTy != QualType()) { 10630 if (PointeeTy->isRecordType() && 10631 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 10632 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 10633 PDiag(diag::warn_sizeof_pointer_type_memaccess) 10634 << FnName << SizeOfArgTy << ArgIdx 10635 << PointeeTy << Dest->getSourceRange() 10636 << LenExpr->getSourceRange()); 10637 break; 10638 } 10639 } 10640 } else if (DestTy->isArrayType()) { 10641 PointeeTy = DestTy; 10642 } 10643 10644 if (PointeeTy == QualType()) 10645 continue; 10646 10647 // Always complain about dynamic classes. 10648 bool IsContained; 10649 if (const CXXRecordDecl *ContainedRD = 10650 getContainedDynamicClass(PointeeTy, IsContained)) { 10651 10652 unsigned OperationType = 0; 10653 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp; 10654 // "overwritten" if we're warning about the destination for any call 10655 // but memcmp; otherwise a verb appropriate to the call. 10656 if (ArgIdx != 0 || IsCmp) { 10657 if (BId == Builtin::BImemcpy) 10658 OperationType = 1; 10659 else if(BId == Builtin::BImemmove) 10660 OperationType = 2; 10661 else if (IsCmp) 10662 OperationType = 3; 10663 } 10664 10665 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 10666 PDiag(diag::warn_dyn_class_memaccess) 10667 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName 10668 << IsContained << ContainedRD << OperationType 10669 << Call->getCallee()->getSourceRange()); 10670 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 10671 BId != Builtin::BImemset) 10672 DiagRuntimeBehavior( 10673 Dest->getExprLoc(), Dest, 10674 PDiag(diag::warn_arc_object_memaccess) 10675 << ArgIdx << FnName << PointeeTy 10676 << Call->getCallee()->getSourceRange()); 10677 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 10678 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 10679 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 10680 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 10681 PDiag(diag::warn_cstruct_memaccess) 10682 << ArgIdx << FnName << PointeeTy << 0); 10683 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 10684 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 10685 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 10686 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 10687 PDiag(diag::warn_cstruct_memaccess) 10688 << ArgIdx << FnName << PointeeTy << 1); 10689 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 10690 } else { 10691 continue; 10692 } 10693 } else 10694 continue; 10695 10696 DiagRuntimeBehavior( 10697 Dest->getExprLoc(), Dest, 10698 PDiag(diag::note_bad_memaccess_silence) 10699 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 10700 break; 10701 } 10702 } 10703 10704 // A little helper routine: ignore addition and subtraction of integer literals. 10705 // This intentionally does not ignore all integer constant expressions because 10706 // we don't want to remove sizeof(). 10707 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 10708 Ex = Ex->IgnoreParenCasts(); 10709 10710 while (true) { 10711 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 10712 if (!BO || !BO->isAdditiveOp()) 10713 break; 10714 10715 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 10716 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 10717 10718 if (isa<IntegerLiteral>(RHS)) 10719 Ex = LHS; 10720 else if (isa<IntegerLiteral>(LHS)) 10721 Ex = RHS; 10722 else 10723 break; 10724 } 10725 10726 return Ex; 10727 } 10728 10729 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 10730 ASTContext &Context) { 10731 // Only handle constant-sized or VLAs, but not flexible members. 10732 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 10733 // Only issue the FIXIT for arrays of size > 1. 10734 if (CAT->getSize().getSExtValue() <= 1) 10735 return false; 10736 } else if (!Ty->isVariableArrayType()) { 10737 return false; 10738 } 10739 return true; 10740 } 10741 10742 // Warn if the user has made the 'size' argument to strlcpy or strlcat 10743 // be the size of the source, instead of the destination. 10744 void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 10745 IdentifierInfo *FnName) { 10746 10747 // Don't crash if the user has the wrong number of arguments 10748 unsigned NumArgs = Call->getNumArgs(); 10749 if ((NumArgs != 3) && (NumArgs != 4)) 10750 return; 10751 10752 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 10753 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 10754 const Expr *CompareWithSrc = nullptr; 10755 10756 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 10757 Call->getBeginLoc(), Call->getRParenLoc())) 10758 return; 10759 10760 // Look for 'strlcpy(dst, x, sizeof(x))' 10761 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 10762 CompareWithSrc = Ex; 10763 else { 10764 // Look for 'strlcpy(dst, x, strlen(x))' 10765 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 10766 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 10767 SizeCall->getNumArgs() == 1) 10768 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 10769 } 10770 } 10771 10772 if (!CompareWithSrc) 10773 return; 10774 10775 // Determine if the argument to sizeof/strlen is equal to the source 10776 // argument. In principle there's all kinds of things you could do 10777 // here, for instance creating an == expression and evaluating it with 10778 // EvaluateAsBooleanCondition, but this uses a more direct technique: 10779 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 10780 if (!SrcArgDRE) 10781 return; 10782 10783 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 10784 if (!CompareWithSrcDRE || 10785 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 10786 return; 10787 10788 const Expr *OriginalSizeArg = Call->getArg(2); 10789 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 10790 << OriginalSizeArg->getSourceRange() << FnName; 10791 10792 // Output a FIXIT hint if the destination is an array (rather than a 10793 // pointer to an array). This could be enhanced to handle some 10794 // pointers if we know the actual size, like if DstArg is 'array+2' 10795 // we could say 'sizeof(array)-2'. 10796 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 10797 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 10798 return; 10799 10800 SmallString<128> sizeString; 10801 llvm::raw_svector_ostream OS(sizeString); 10802 OS << "sizeof("; 10803 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 10804 OS << ")"; 10805 10806 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 10807 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 10808 OS.str()); 10809 } 10810 10811 /// Check if two expressions refer to the same declaration. 10812 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 10813 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 10814 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 10815 return D1->getDecl() == D2->getDecl(); 10816 return false; 10817 } 10818 10819 static const Expr *getStrlenExprArg(const Expr *E) { 10820 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 10821 const FunctionDecl *FD = CE->getDirectCallee(); 10822 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 10823 return nullptr; 10824 return CE->getArg(0)->IgnoreParenCasts(); 10825 } 10826 return nullptr; 10827 } 10828 10829 // Warn on anti-patterns as the 'size' argument to strncat. 10830 // The correct size argument should look like following: 10831 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 10832 void Sema::CheckStrncatArguments(const CallExpr *CE, 10833 IdentifierInfo *FnName) { 10834 // Don't crash if the user has the wrong number of arguments. 10835 if (CE->getNumArgs() < 3) 10836 return; 10837 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 10838 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 10839 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 10840 10841 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 10842 CE->getRParenLoc())) 10843 return; 10844 10845 // Identify common expressions, which are wrongly used as the size argument 10846 // to strncat and may lead to buffer overflows. 10847 unsigned PatternType = 0; 10848 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 10849 // - sizeof(dst) 10850 if (referToTheSameDecl(SizeOfArg, DstArg)) 10851 PatternType = 1; 10852 // - sizeof(src) 10853 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 10854 PatternType = 2; 10855 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 10856 if (BE->getOpcode() == BO_Sub) { 10857 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 10858 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 10859 // - sizeof(dst) - strlen(dst) 10860 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 10861 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 10862 PatternType = 1; 10863 // - sizeof(src) - (anything) 10864 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 10865 PatternType = 2; 10866 } 10867 } 10868 10869 if (PatternType == 0) 10870 return; 10871 10872 // Generate the diagnostic. 10873 SourceLocation SL = LenArg->getBeginLoc(); 10874 SourceRange SR = LenArg->getSourceRange(); 10875 SourceManager &SM = getSourceManager(); 10876 10877 // If the function is defined as a builtin macro, do not show macro expansion. 10878 if (SM.isMacroArgExpansion(SL)) { 10879 SL = SM.getSpellingLoc(SL); 10880 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 10881 SM.getSpellingLoc(SR.getEnd())); 10882 } 10883 10884 // Check if the destination is an array (rather than a pointer to an array). 10885 QualType DstTy = DstArg->getType(); 10886 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 10887 Context); 10888 if (!isKnownSizeArray) { 10889 if (PatternType == 1) 10890 Diag(SL, diag::warn_strncat_wrong_size) << SR; 10891 else 10892 Diag(SL, diag::warn_strncat_src_size) << SR; 10893 return; 10894 } 10895 10896 if (PatternType == 1) 10897 Diag(SL, diag::warn_strncat_large_size) << SR; 10898 else 10899 Diag(SL, diag::warn_strncat_src_size) << SR; 10900 10901 SmallString<128> sizeString; 10902 llvm::raw_svector_ostream OS(sizeString); 10903 OS << "sizeof("; 10904 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 10905 OS << ") - "; 10906 OS << "strlen("; 10907 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 10908 OS << ") - 1"; 10909 10910 Diag(SL, diag::note_strncat_wrong_size) 10911 << FixItHint::CreateReplacement(SR, OS.str()); 10912 } 10913 10914 namespace { 10915 void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName, 10916 const UnaryOperator *UnaryExpr, const Decl *D) { 10917 if (isa<FieldDecl, FunctionDecl, VarDecl>(D)) { 10918 S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object) 10919 << CalleeName << 0 /*object: */ << cast<NamedDecl>(D); 10920 return; 10921 } 10922 } 10923 10924 void CheckFreeArgumentsAddressof(Sema &S, const std::string &CalleeName, 10925 const UnaryOperator *UnaryExpr) { 10926 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(UnaryExpr->getSubExpr())) { 10927 const Decl *D = Lvalue->getDecl(); 10928 if (isa<DeclaratorDecl>(D)) 10929 if (!dyn_cast<DeclaratorDecl>(D)->getType()->isReferenceType()) 10930 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, D); 10931 } 10932 10933 if (const auto *Lvalue = dyn_cast<MemberExpr>(UnaryExpr->getSubExpr())) 10934 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, 10935 Lvalue->getMemberDecl()); 10936 } 10937 10938 void CheckFreeArgumentsPlus(Sema &S, const std::string &CalleeName, 10939 const UnaryOperator *UnaryExpr) { 10940 const auto *Lambda = dyn_cast<LambdaExpr>( 10941 UnaryExpr->getSubExpr()->IgnoreImplicitAsWritten()->IgnoreParens()); 10942 if (!Lambda) 10943 return; 10944 10945 S.Diag(Lambda->getBeginLoc(), diag::warn_free_nonheap_object) 10946 << CalleeName << 2 /*object: lambda expression*/; 10947 } 10948 10949 void CheckFreeArgumentsStackArray(Sema &S, const std::string &CalleeName, 10950 const DeclRefExpr *Lvalue) { 10951 const auto *Var = dyn_cast<VarDecl>(Lvalue->getDecl()); 10952 if (Var == nullptr) 10953 return; 10954 10955 S.Diag(Lvalue->getBeginLoc(), diag::warn_free_nonheap_object) 10956 << CalleeName << 0 /*object: */ << Var; 10957 } 10958 10959 void CheckFreeArgumentsCast(Sema &S, const std::string &CalleeName, 10960 const CastExpr *Cast) { 10961 SmallString<128> SizeString; 10962 llvm::raw_svector_ostream OS(SizeString); 10963 10964 clang::CastKind Kind = Cast->getCastKind(); 10965 if (Kind == clang::CK_BitCast && 10966 !Cast->getSubExpr()->getType()->isFunctionPointerType()) 10967 return; 10968 if (Kind == clang::CK_IntegralToPointer && 10969 !isa<IntegerLiteral>( 10970 Cast->getSubExpr()->IgnoreParenImpCasts()->IgnoreParens())) 10971 return; 10972 10973 switch (Cast->getCastKind()) { 10974 case clang::CK_BitCast: 10975 case clang::CK_IntegralToPointer: 10976 case clang::CK_FunctionToPointerDecay: 10977 OS << '\''; 10978 Cast->printPretty(OS, nullptr, S.getPrintingPolicy()); 10979 OS << '\''; 10980 break; 10981 default: 10982 return; 10983 } 10984 10985 S.Diag(Cast->getBeginLoc(), diag::warn_free_nonheap_object) 10986 << CalleeName << 0 /*object: */ << OS.str(); 10987 } 10988 } // namespace 10989 10990 /// Alerts the user that they are attempting to free a non-malloc'd object. 10991 void Sema::CheckFreeArguments(const CallExpr *E) { 10992 const std::string CalleeName = 10993 dyn_cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString(); 10994 10995 { // Prefer something that doesn't involve a cast to make things simpler. 10996 const Expr *Arg = E->getArg(0)->IgnoreParenCasts(); 10997 if (const auto *UnaryExpr = dyn_cast<UnaryOperator>(Arg)) 10998 switch (UnaryExpr->getOpcode()) { 10999 case UnaryOperator::Opcode::UO_AddrOf: 11000 return CheckFreeArgumentsAddressof(*this, CalleeName, UnaryExpr); 11001 case UnaryOperator::Opcode::UO_Plus: 11002 return CheckFreeArgumentsPlus(*this, CalleeName, UnaryExpr); 11003 default: 11004 break; 11005 } 11006 11007 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Arg)) 11008 if (Lvalue->getType()->isArrayType()) 11009 return CheckFreeArgumentsStackArray(*this, CalleeName, Lvalue); 11010 11011 if (const auto *Label = dyn_cast<AddrLabelExpr>(Arg)) { 11012 Diag(Label->getBeginLoc(), diag::warn_free_nonheap_object) 11013 << CalleeName << 0 /*object: */ << Label->getLabel()->getIdentifier(); 11014 return; 11015 } 11016 11017 if (isa<BlockExpr>(Arg)) { 11018 Diag(Arg->getBeginLoc(), diag::warn_free_nonheap_object) 11019 << CalleeName << 1 /*object: block*/; 11020 return; 11021 } 11022 } 11023 // Maybe the cast was important, check after the other cases. 11024 if (const auto *Cast = dyn_cast<CastExpr>(E->getArg(0))) 11025 return CheckFreeArgumentsCast(*this, CalleeName, Cast); 11026 } 11027 11028 void 11029 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 11030 SourceLocation ReturnLoc, 11031 bool isObjCMethod, 11032 const AttrVec *Attrs, 11033 const FunctionDecl *FD) { 11034 // Check if the return value is null but should not be. 11035 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 11036 (!isObjCMethod && isNonNullType(Context, lhsType))) && 11037 CheckNonNullExpr(*this, RetValExp)) 11038 Diag(ReturnLoc, diag::warn_null_ret) 11039 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 11040 11041 // C++11 [basic.stc.dynamic.allocation]p4: 11042 // If an allocation function declared with a non-throwing 11043 // exception-specification fails to allocate storage, it shall return 11044 // a null pointer. Any other allocation function that fails to allocate 11045 // storage shall indicate failure only by throwing an exception [...] 11046 if (FD) { 11047 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 11048 if (Op == OO_New || Op == OO_Array_New) { 11049 const FunctionProtoType *Proto 11050 = FD->getType()->castAs<FunctionProtoType>(); 11051 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 11052 CheckNonNullExpr(*this, RetValExp)) 11053 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 11054 << FD << getLangOpts().CPlusPlus11; 11055 } 11056 } 11057 11058 // PPC MMA non-pointer types are not allowed as return type. Checking the type 11059 // here prevent the user from using a PPC MMA type as trailing return type. 11060 if (Context.getTargetInfo().getTriple().isPPC64()) 11061 CheckPPCMMAType(RetValExp->getType(), ReturnLoc); 11062 } 11063 11064 //===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===// 11065 11066 /// Check for comparisons of floating point operands using != and ==. 11067 /// Issue a warning if these are no self-comparisons, as they are not likely 11068 /// to do what the programmer intended. 11069 void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) { 11070 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 11071 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 11072 11073 // Special case: check for x == x (which is OK). 11074 // Do not emit warnings for such cases. 11075 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 11076 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 11077 if (DRL->getDecl() == DRR->getDecl()) 11078 return; 11079 11080 // Special case: check for comparisons against literals that can be exactly 11081 // represented by APFloat. In such cases, do not emit a warning. This 11082 // is a heuristic: often comparison against such literals are used to 11083 // detect if a value in a variable has not changed. This clearly can 11084 // lead to false negatives. 11085 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 11086 if (FLL->isExact()) 11087 return; 11088 } else 11089 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 11090 if (FLR->isExact()) 11091 return; 11092 11093 // Check for comparisons with builtin types. 11094 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 11095 if (CL->getBuiltinCallee()) 11096 return; 11097 11098 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 11099 if (CR->getBuiltinCallee()) 11100 return; 11101 11102 // Emit the diagnostic. 11103 Diag(Loc, diag::warn_floatingpoint_eq) 11104 << LHS->getSourceRange() << RHS->getSourceRange(); 11105 } 11106 11107 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 11108 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 11109 11110 namespace { 11111 11112 /// Structure recording the 'active' range of an integer-valued 11113 /// expression. 11114 struct IntRange { 11115 /// The number of bits active in the int. Note that this includes exactly one 11116 /// sign bit if !NonNegative. 11117 unsigned Width; 11118 11119 /// True if the int is known not to have negative values. If so, all leading 11120 /// bits before Width are known zero, otherwise they are known to be the 11121 /// same as the MSB within Width. 11122 bool NonNegative; 11123 11124 IntRange(unsigned Width, bool NonNegative) 11125 : Width(Width), NonNegative(NonNegative) {} 11126 11127 /// Number of bits excluding the sign bit. 11128 unsigned valueBits() const { 11129 return NonNegative ? Width : Width - 1; 11130 } 11131 11132 /// Returns the range of the bool type. 11133 static IntRange forBoolType() { 11134 return IntRange(1, true); 11135 } 11136 11137 /// Returns the range of an opaque value of the given integral type. 11138 static IntRange forValueOfType(ASTContext &C, QualType T) { 11139 return forValueOfCanonicalType(C, 11140 T->getCanonicalTypeInternal().getTypePtr()); 11141 } 11142 11143 /// Returns the range of an opaque value of a canonical integral type. 11144 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 11145 assert(T->isCanonicalUnqualified()); 11146 11147 if (const VectorType *VT = dyn_cast<VectorType>(T)) 11148 T = VT->getElementType().getTypePtr(); 11149 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 11150 T = CT->getElementType().getTypePtr(); 11151 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 11152 T = AT->getValueType().getTypePtr(); 11153 11154 if (!C.getLangOpts().CPlusPlus) { 11155 // For enum types in C code, use the underlying datatype. 11156 if (const EnumType *ET = dyn_cast<EnumType>(T)) 11157 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 11158 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 11159 // For enum types in C++, use the known bit width of the enumerators. 11160 EnumDecl *Enum = ET->getDecl(); 11161 // In C++11, enums can have a fixed underlying type. Use this type to 11162 // compute the range. 11163 if (Enum->isFixed()) { 11164 return IntRange(C.getIntWidth(QualType(T, 0)), 11165 !ET->isSignedIntegerOrEnumerationType()); 11166 } 11167 11168 unsigned NumPositive = Enum->getNumPositiveBits(); 11169 unsigned NumNegative = Enum->getNumNegativeBits(); 11170 11171 if (NumNegative == 0) 11172 return IntRange(NumPositive, true/*NonNegative*/); 11173 else 11174 return IntRange(std::max(NumPositive + 1, NumNegative), 11175 false/*NonNegative*/); 11176 } 11177 11178 if (const auto *EIT = dyn_cast<ExtIntType>(T)) 11179 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 11180 11181 const BuiltinType *BT = cast<BuiltinType>(T); 11182 assert(BT->isInteger()); 11183 11184 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 11185 } 11186 11187 /// Returns the "target" range of a canonical integral type, i.e. 11188 /// the range of values expressible in the type. 11189 /// 11190 /// This matches forValueOfCanonicalType except that enums have the 11191 /// full range of their type, not the range of their enumerators. 11192 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 11193 assert(T->isCanonicalUnqualified()); 11194 11195 if (const VectorType *VT = dyn_cast<VectorType>(T)) 11196 T = VT->getElementType().getTypePtr(); 11197 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 11198 T = CT->getElementType().getTypePtr(); 11199 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 11200 T = AT->getValueType().getTypePtr(); 11201 if (const EnumType *ET = dyn_cast<EnumType>(T)) 11202 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 11203 11204 if (const auto *EIT = dyn_cast<ExtIntType>(T)) 11205 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 11206 11207 const BuiltinType *BT = cast<BuiltinType>(T); 11208 assert(BT->isInteger()); 11209 11210 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 11211 } 11212 11213 /// Returns the supremum of two ranges: i.e. their conservative merge. 11214 static IntRange join(IntRange L, IntRange R) { 11215 bool Unsigned = L.NonNegative && R.NonNegative; 11216 return IntRange(std::max(L.valueBits(), R.valueBits()) + !Unsigned, 11217 L.NonNegative && R.NonNegative); 11218 } 11219 11220 /// Return the range of a bitwise-AND of the two ranges. 11221 static IntRange bit_and(IntRange L, IntRange R) { 11222 unsigned Bits = std::max(L.Width, R.Width); 11223 bool NonNegative = false; 11224 if (L.NonNegative) { 11225 Bits = std::min(Bits, L.Width); 11226 NonNegative = true; 11227 } 11228 if (R.NonNegative) { 11229 Bits = std::min(Bits, R.Width); 11230 NonNegative = true; 11231 } 11232 return IntRange(Bits, NonNegative); 11233 } 11234 11235 /// Return the range of a sum of the two ranges. 11236 static IntRange sum(IntRange L, IntRange R) { 11237 bool Unsigned = L.NonNegative && R.NonNegative; 11238 return IntRange(std::max(L.valueBits(), R.valueBits()) + 1 + !Unsigned, 11239 Unsigned); 11240 } 11241 11242 /// Return the range of a difference of the two ranges. 11243 static IntRange difference(IntRange L, IntRange R) { 11244 // We need a 1-bit-wider range if: 11245 // 1) LHS can be negative: least value can be reduced. 11246 // 2) RHS can be negative: greatest value can be increased. 11247 bool CanWiden = !L.NonNegative || !R.NonNegative; 11248 bool Unsigned = L.NonNegative && R.Width == 0; 11249 return IntRange(std::max(L.valueBits(), R.valueBits()) + CanWiden + 11250 !Unsigned, 11251 Unsigned); 11252 } 11253 11254 /// Return the range of a product of the two ranges. 11255 static IntRange product(IntRange L, IntRange R) { 11256 // If both LHS and RHS can be negative, we can form 11257 // -2^L * -2^R = 2^(L + R) 11258 // which requires L + R + 1 value bits to represent. 11259 bool CanWiden = !L.NonNegative && !R.NonNegative; 11260 bool Unsigned = L.NonNegative && R.NonNegative; 11261 return IntRange(L.valueBits() + R.valueBits() + CanWiden + !Unsigned, 11262 Unsigned); 11263 } 11264 11265 /// Return the range of a remainder operation between the two ranges. 11266 static IntRange rem(IntRange L, IntRange R) { 11267 // The result of a remainder can't be larger than the result of 11268 // either side. The sign of the result is the sign of the LHS. 11269 bool Unsigned = L.NonNegative; 11270 return IntRange(std::min(L.valueBits(), R.valueBits()) + !Unsigned, 11271 Unsigned); 11272 } 11273 }; 11274 11275 } // namespace 11276 11277 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 11278 unsigned MaxWidth) { 11279 if (value.isSigned() && value.isNegative()) 11280 return IntRange(value.getMinSignedBits(), false); 11281 11282 if (value.getBitWidth() > MaxWidth) 11283 value = value.trunc(MaxWidth); 11284 11285 // isNonNegative() just checks the sign bit without considering 11286 // signedness. 11287 return IntRange(value.getActiveBits(), true); 11288 } 11289 11290 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 11291 unsigned MaxWidth) { 11292 if (result.isInt()) 11293 return GetValueRange(C, result.getInt(), MaxWidth); 11294 11295 if (result.isVector()) { 11296 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 11297 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 11298 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 11299 R = IntRange::join(R, El); 11300 } 11301 return R; 11302 } 11303 11304 if (result.isComplexInt()) { 11305 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 11306 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 11307 return IntRange::join(R, I); 11308 } 11309 11310 // This can happen with lossless casts to intptr_t of "based" lvalues. 11311 // Assume it might use arbitrary bits. 11312 // FIXME: The only reason we need to pass the type in here is to get 11313 // the sign right on this one case. It would be nice if APValue 11314 // preserved this. 11315 assert(result.isLValue() || result.isAddrLabelDiff()); 11316 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 11317 } 11318 11319 static QualType GetExprType(const Expr *E) { 11320 QualType Ty = E->getType(); 11321 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 11322 Ty = AtomicRHS->getValueType(); 11323 return Ty; 11324 } 11325 11326 /// Pseudo-evaluate the given integer expression, estimating the 11327 /// range of values it might take. 11328 /// 11329 /// \param MaxWidth The width to which the value will be truncated. 11330 /// \param Approximate If \c true, return a likely range for the result: in 11331 /// particular, assume that arithmetic on narrower types doesn't leave 11332 /// those types. If \c false, return a range including all possible 11333 /// result values. 11334 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth, 11335 bool InConstantContext, bool Approximate) { 11336 E = E->IgnoreParens(); 11337 11338 // Try a full evaluation first. 11339 Expr::EvalResult result; 11340 if (E->EvaluateAsRValue(result, C, InConstantContext)) 11341 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 11342 11343 // I think we only want to look through implicit casts here; if the 11344 // user has an explicit widening cast, we should treat the value as 11345 // being of the new, wider type. 11346 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 11347 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 11348 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext, 11349 Approximate); 11350 11351 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 11352 11353 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 11354 CE->getCastKind() == CK_BooleanToSignedIntegral; 11355 11356 // Assume that non-integer casts can span the full range of the type. 11357 if (!isIntegerCast) 11358 return OutputTypeRange; 11359 11360 IntRange SubRange = GetExprRange(C, CE->getSubExpr(), 11361 std::min(MaxWidth, OutputTypeRange.Width), 11362 InConstantContext, Approximate); 11363 11364 // Bail out if the subexpr's range is as wide as the cast type. 11365 if (SubRange.Width >= OutputTypeRange.Width) 11366 return OutputTypeRange; 11367 11368 // Otherwise, we take the smaller width, and we're non-negative if 11369 // either the output type or the subexpr is. 11370 return IntRange(SubRange.Width, 11371 SubRange.NonNegative || OutputTypeRange.NonNegative); 11372 } 11373 11374 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 11375 // If we can fold the condition, just take that operand. 11376 bool CondResult; 11377 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 11378 return GetExprRange(C, 11379 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(), 11380 MaxWidth, InConstantContext, Approximate); 11381 11382 // Otherwise, conservatively merge. 11383 // GetExprRange requires an integer expression, but a throw expression 11384 // results in a void type. 11385 Expr *E = CO->getTrueExpr(); 11386 IntRange L = E->getType()->isVoidType() 11387 ? IntRange{0, true} 11388 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 11389 E = CO->getFalseExpr(); 11390 IntRange R = E->getType()->isVoidType() 11391 ? IntRange{0, true} 11392 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 11393 return IntRange::join(L, R); 11394 } 11395 11396 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 11397 IntRange (*Combine)(IntRange, IntRange) = IntRange::join; 11398 11399 switch (BO->getOpcode()) { 11400 case BO_Cmp: 11401 llvm_unreachable("builtin <=> should have class type"); 11402 11403 // Boolean-valued operations are single-bit and positive. 11404 case BO_LAnd: 11405 case BO_LOr: 11406 case BO_LT: 11407 case BO_GT: 11408 case BO_LE: 11409 case BO_GE: 11410 case BO_EQ: 11411 case BO_NE: 11412 return IntRange::forBoolType(); 11413 11414 // The type of the assignments is the type of the LHS, so the RHS 11415 // is not necessarily the same type. 11416 case BO_MulAssign: 11417 case BO_DivAssign: 11418 case BO_RemAssign: 11419 case BO_AddAssign: 11420 case BO_SubAssign: 11421 case BO_XorAssign: 11422 case BO_OrAssign: 11423 // TODO: bitfields? 11424 return IntRange::forValueOfType(C, GetExprType(E)); 11425 11426 // Simple assignments just pass through the RHS, which will have 11427 // been coerced to the LHS type. 11428 case BO_Assign: 11429 // TODO: bitfields? 11430 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 11431 Approximate); 11432 11433 // Operations with opaque sources are black-listed. 11434 case BO_PtrMemD: 11435 case BO_PtrMemI: 11436 return IntRange::forValueOfType(C, GetExprType(E)); 11437 11438 // Bitwise-and uses the *infinum* of the two source ranges. 11439 case BO_And: 11440 case BO_AndAssign: 11441 Combine = IntRange::bit_and; 11442 break; 11443 11444 // Left shift gets black-listed based on a judgement call. 11445 case BO_Shl: 11446 // ...except that we want to treat '1 << (blah)' as logically 11447 // positive. It's an important idiom. 11448 if (IntegerLiteral *I 11449 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 11450 if (I->getValue() == 1) { 11451 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 11452 return IntRange(R.Width, /*NonNegative*/ true); 11453 } 11454 } 11455 LLVM_FALLTHROUGH; 11456 11457 case BO_ShlAssign: 11458 return IntRange::forValueOfType(C, GetExprType(E)); 11459 11460 // Right shift by a constant can narrow its left argument. 11461 case BO_Shr: 11462 case BO_ShrAssign: { 11463 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext, 11464 Approximate); 11465 11466 // If the shift amount is a positive constant, drop the width by 11467 // that much. 11468 if (Optional<llvm::APSInt> shift = 11469 BO->getRHS()->getIntegerConstantExpr(C)) { 11470 if (shift->isNonNegative()) { 11471 unsigned zext = shift->getZExtValue(); 11472 if (zext >= L.Width) 11473 L.Width = (L.NonNegative ? 0 : 1); 11474 else 11475 L.Width -= zext; 11476 } 11477 } 11478 11479 return L; 11480 } 11481 11482 // Comma acts as its right operand. 11483 case BO_Comma: 11484 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 11485 Approximate); 11486 11487 case BO_Add: 11488 if (!Approximate) 11489 Combine = IntRange::sum; 11490 break; 11491 11492 case BO_Sub: 11493 if (BO->getLHS()->getType()->isPointerType()) 11494 return IntRange::forValueOfType(C, GetExprType(E)); 11495 if (!Approximate) 11496 Combine = IntRange::difference; 11497 break; 11498 11499 case BO_Mul: 11500 if (!Approximate) 11501 Combine = IntRange::product; 11502 break; 11503 11504 // The width of a division result is mostly determined by the size 11505 // of the LHS. 11506 case BO_Div: { 11507 // Don't 'pre-truncate' the operands. 11508 unsigned opWidth = C.getIntWidth(GetExprType(E)); 11509 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, 11510 Approximate); 11511 11512 // If the divisor is constant, use that. 11513 if (Optional<llvm::APSInt> divisor = 11514 BO->getRHS()->getIntegerConstantExpr(C)) { 11515 unsigned log2 = divisor->logBase2(); // floor(log_2(divisor)) 11516 if (log2 >= L.Width) 11517 L.Width = (L.NonNegative ? 0 : 1); 11518 else 11519 L.Width = std::min(L.Width - log2, MaxWidth); 11520 return L; 11521 } 11522 11523 // Otherwise, just use the LHS's width. 11524 // FIXME: This is wrong if the LHS could be its minimal value and the RHS 11525 // could be -1. 11526 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, 11527 Approximate); 11528 return IntRange(L.Width, L.NonNegative && R.NonNegative); 11529 } 11530 11531 case BO_Rem: 11532 Combine = IntRange::rem; 11533 break; 11534 11535 // The default behavior is okay for these. 11536 case BO_Xor: 11537 case BO_Or: 11538 break; 11539 } 11540 11541 // Combine the two ranges, but limit the result to the type in which we 11542 // performed the computation. 11543 QualType T = GetExprType(E); 11544 unsigned opWidth = C.getIntWidth(T); 11545 IntRange L = 11546 GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, Approximate); 11547 IntRange R = 11548 GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, Approximate); 11549 IntRange C = Combine(L, R); 11550 C.NonNegative |= T->isUnsignedIntegerOrEnumerationType(); 11551 C.Width = std::min(C.Width, MaxWidth); 11552 return C; 11553 } 11554 11555 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 11556 switch (UO->getOpcode()) { 11557 // Boolean-valued operations are white-listed. 11558 case UO_LNot: 11559 return IntRange::forBoolType(); 11560 11561 // Operations with opaque sources are black-listed. 11562 case UO_Deref: 11563 case UO_AddrOf: // should be impossible 11564 return IntRange::forValueOfType(C, GetExprType(E)); 11565 11566 default: 11567 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext, 11568 Approximate); 11569 } 11570 } 11571 11572 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 11573 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext, 11574 Approximate); 11575 11576 if (const auto *BitField = E->getSourceBitField()) 11577 return IntRange(BitField->getBitWidthValue(C), 11578 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 11579 11580 return IntRange::forValueOfType(C, GetExprType(E)); 11581 } 11582 11583 static IntRange GetExprRange(ASTContext &C, const Expr *E, 11584 bool InConstantContext, bool Approximate) { 11585 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext, 11586 Approximate); 11587 } 11588 11589 /// Checks whether the given value, which currently has the given 11590 /// source semantics, has the same value when coerced through the 11591 /// target semantics. 11592 static bool IsSameFloatAfterCast(const llvm::APFloat &value, 11593 const llvm::fltSemantics &Src, 11594 const llvm::fltSemantics &Tgt) { 11595 llvm::APFloat truncated = value; 11596 11597 bool ignored; 11598 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 11599 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 11600 11601 return truncated.bitwiseIsEqual(value); 11602 } 11603 11604 /// Checks whether the given value, which currently has the given 11605 /// source semantics, has the same value when coerced through the 11606 /// target semantics. 11607 /// 11608 /// The value might be a vector of floats (or a complex number). 11609 static bool IsSameFloatAfterCast(const APValue &value, 11610 const llvm::fltSemantics &Src, 11611 const llvm::fltSemantics &Tgt) { 11612 if (value.isFloat()) 11613 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 11614 11615 if (value.isVector()) { 11616 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 11617 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 11618 return false; 11619 return true; 11620 } 11621 11622 assert(value.isComplexFloat()); 11623 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 11624 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 11625 } 11626 11627 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC, 11628 bool IsListInit = false); 11629 11630 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 11631 // Suppress cases where we are comparing against an enum constant. 11632 if (const DeclRefExpr *DR = 11633 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 11634 if (isa<EnumConstantDecl>(DR->getDecl())) 11635 return true; 11636 11637 // Suppress cases where the value is expanded from a macro, unless that macro 11638 // is how a language represents a boolean literal. This is the case in both C 11639 // and Objective-C. 11640 SourceLocation BeginLoc = E->getBeginLoc(); 11641 if (BeginLoc.isMacroID()) { 11642 StringRef MacroName = Lexer::getImmediateMacroName( 11643 BeginLoc, S.getSourceManager(), S.getLangOpts()); 11644 return MacroName != "YES" && MacroName != "NO" && 11645 MacroName != "true" && MacroName != "false"; 11646 } 11647 11648 return false; 11649 } 11650 11651 static bool isKnownToHaveUnsignedValue(Expr *E) { 11652 return E->getType()->isIntegerType() && 11653 (!E->getType()->isSignedIntegerType() || 11654 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 11655 } 11656 11657 namespace { 11658 /// The promoted range of values of a type. In general this has the 11659 /// following structure: 11660 /// 11661 /// |-----------| . . . |-----------| 11662 /// ^ ^ ^ ^ 11663 /// Min HoleMin HoleMax Max 11664 /// 11665 /// ... where there is only a hole if a signed type is promoted to unsigned 11666 /// (in which case Min and Max are the smallest and largest representable 11667 /// values). 11668 struct PromotedRange { 11669 // Min, or HoleMax if there is a hole. 11670 llvm::APSInt PromotedMin; 11671 // Max, or HoleMin if there is a hole. 11672 llvm::APSInt PromotedMax; 11673 11674 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 11675 if (R.Width == 0) 11676 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 11677 else if (R.Width >= BitWidth && !Unsigned) { 11678 // Promotion made the type *narrower*. This happens when promoting 11679 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 11680 // Treat all values of 'signed int' as being in range for now. 11681 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 11682 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 11683 } else { 11684 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 11685 .extOrTrunc(BitWidth); 11686 PromotedMin.setIsUnsigned(Unsigned); 11687 11688 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 11689 .extOrTrunc(BitWidth); 11690 PromotedMax.setIsUnsigned(Unsigned); 11691 } 11692 } 11693 11694 // Determine whether this range is contiguous (has no hole). 11695 bool isContiguous() const { return PromotedMin <= PromotedMax; } 11696 11697 // Where a constant value is within the range. 11698 enum ComparisonResult { 11699 LT = 0x1, 11700 LE = 0x2, 11701 GT = 0x4, 11702 GE = 0x8, 11703 EQ = 0x10, 11704 NE = 0x20, 11705 InRangeFlag = 0x40, 11706 11707 Less = LE | LT | NE, 11708 Min = LE | InRangeFlag, 11709 InRange = InRangeFlag, 11710 Max = GE | InRangeFlag, 11711 Greater = GE | GT | NE, 11712 11713 OnlyValue = LE | GE | EQ | InRangeFlag, 11714 InHole = NE 11715 }; 11716 11717 ComparisonResult compare(const llvm::APSInt &Value) const { 11718 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 11719 Value.isUnsigned() == PromotedMin.isUnsigned()); 11720 if (!isContiguous()) { 11721 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 11722 if (Value.isMinValue()) return Min; 11723 if (Value.isMaxValue()) return Max; 11724 if (Value >= PromotedMin) return InRange; 11725 if (Value <= PromotedMax) return InRange; 11726 return InHole; 11727 } 11728 11729 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 11730 case -1: return Less; 11731 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 11732 case 1: 11733 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 11734 case -1: return InRange; 11735 case 0: return Max; 11736 case 1: return Greater; 11737 } 11738 } 11739 11740 llvm_unreachable("impossible compare result"); 11741 } 11742 11743 static llvm::Optional<StringRef> 11744 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 11745 if (Op == BO_Cmp) { 11746 ComparisonResult LTFlag = LT, GTFlag = GT; 11747 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 11748 11749 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 11750 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 11751 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 11752 return llvm::None; 11753 } 11754 11755 ComparisonResult TrueFlag, FalseFlag; 11756 if (Op == BO_EQ) { 11757 TrueFlag = EQ; 11758 FalseFlag = NE; 11759 } else if (Op == BO_NE) { 11760 TrueFlag = NE; 11761 FalseFlag = EQ; 11762 } else { 11763 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 11764 TrueFlag = LT; 11765 FalseFlag = GE; 11766 } else { 11767 TrueFlag = GT; 11768 FalseFlag = LE; 11769 } 11770 if (Op == BO_GE || Op == BO_LE) 11771 std::swap(TrueFlag, FalseFlag); 11772 } 11773 if (R & TrueFlag) 11774 return StringRef("true"); 11775 if (R & FalseFlag) 11776 return StringRef("false"); 11777 return llvm::None; 11778 } 11779 }; 11780 } 11781 11782 static bool HasEnumType(Expr *E) { 11783 // Strip off implicit integral promotions. 11784 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 11785 if (ICE->getCastKind() != CK_IntegralCast && 11786 ICE->getCastKind() != CK_NoOp) 11787 break; 11788 E = ICE->getSubExpr(); 11789 } 11790 11791 return E->getType()->isEnumeralType(); 11792 } 11793 11794 static int classifyConstantValue(Expr *Constant) { 11795 // The values of this enumeration are used in the diagnostics 11796 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 11797 enum ConstantValueKind { 11798 Miscellaneous = 0, 11799 LiteralTrue, 11800 LiteralFalse 11801 }; 11802 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 11803 return BL->getValue() ? ConstantValueKind::LiteralTrue 11804 : ConstantValueKind::LiteralFalse; 11805 return ConstantValueKind::Miscellaneous; 11806 } 11807 11808 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 11809 Expr *Constant, Expr *Other, 11810 const llvm::APSInt &Value, 11811 bool RhsConstant) { 11812 if (S.inTemplateInstantiation()) 11813 return false; 11814 11815 Expr *OriginalOther = Other; 11816 11817 Constant = Constant->IgnoreParenImpCasts(); 11818 Other = Other->IgnoreParenImpCasts(); 11819 11820 // Suppress warnings on tautological comparisons between values of the same 11821 // enumeration type. There are only two ways we could warn on this: 11822 // - If the constant is outside the range of representable values of 11823 // the enumeration. In such a case, we should warn about the cast 11824 // to enumeration type, not about the comparison. 11825 // - If the constant is the maximum / minimum in-range value. For an 11826 // enumeratin type, such comparisons can be meaningful and useful. 11827 if (Constant->getType()->isEnumeralType() && 11828 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 11829 return false; 11830 11831 IntRange OtherValueRange = GetExprRange( 11832 S.Context, Other, S.isConstantEvaluated(), /*Approximate*/ false); 11833 11834 QualType OtherT = Other->getType(); 11835 if (const auto *AT = OtherT->getAs<AtomicType>()) 11836 OtherT = AT->getValueType(); 11837 IntRange OtherTypeRange = IntRange::forValueOfType(S.Context, OtherT); 11838 11839 // Special case for ObjC BOOL on targets where its a typedef for a signed char 11840 // (Namely, macOS). FIXME: IntRange::forValueOfType should do this. 11841 bool IsObjCSignedCharBool = S.getLangOpts().ObjC && 11842 S.NSAPIObj->isObjCBOOLType(OtherT) && 11843 OtherT->isSpecificBuiltinType(BuiltinType::SChar); 11844 11845 // Whether we're treating Other as being a bool because of the form of 11846 // expression despite it having another type (typically 'int' in C). 11847 bool OtherIsBooleanDespiteType = 11848 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 11849 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool) 11850 OtherTypeRange = OtherValueRange = IntRange::forBoolType(); 11851 11852 // Check if all values in the range of possible values of this expression 11853 // lead to the same comparison outcome. 11854 PromotedRange OtherPromotedValueRange(OtherValueRange, Value.getBitWidth(), 11855 Value.isUnsigned()); 11856 auto Cmp = OtherPromotedValueRange.compare(Value); 11857 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 11858 if (!Result) 11859 return false; 11860 11861 // Also consider the range determined by the type alone. This allows us to 11862 // classify the warning under the proper diagnostic group. 11863 bool TautologicalTypeCompare = false; 11864 { 11865 PromotedRange OtherPromotedTypeRange(OtherTypeRange, Value.getBitWidth(), 11866 Value.isUnsigned()); 11867 auto TypeCmp = OtherPromotedTypeRange.compare(Value); 11868 if (auto TypeResult = PromotedRange::constantValue(E->getOpcode(), TypeCmp, 11869 RhsConstant)) { 11870 TautologicalTypeCompare = true; 11871 Cmp = TypeCmp; 11872 Result = TypeResult; 11873 } 11874 } 11875 11876 // Don't warn if the non-constant operand actually always evaluates to the 11877 // same value. 11878 if (!TautologicalTypeCompare && OtherValueRange.Width == 0) 11879 return false; 11880 11881 // Suppress the diagnostic for an in-range comparison if the constant comes 11882 // from a macro or enumerator. We don't want to diagnose 11883 // 11884 // some_long_value <= INT_MAX 11885 // 11886 // when sizeof(int) == sizeof(long). 11887 bool InRange = Cmp & PromotedRange::InRangeFlag; 11888 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 11889 return false; 11890 11891 // A comparison of an unsigned bit-field against 0 is really a type problem, 11892 // even though at the type level the bit-field might promote to 'signed int'. 11893 if (Other->refersToBitField() && InRange && Value == 0 && 11894 Other->getType()->isUnsignedIntegerOrEnumerationType()) 11895 TautologicalTypeCompare = true; 11896 11897 // If this is a comparison to an enum constant, include that 11898 // constant in the diagnostic. 11899 const EnumConstantDecl *ED = nullptr; 11900 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 11901 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 11902 11903 // Should be enough for uint128 (39 decimal digits) 11904 SmallString<64> PrettySourceValue; 11905 llvm::raw_svector_ostream OS(PrettySourceValue); 11906 if (ED) { 11907 OS << '\'' << *ED << "' (" << Value << ")"; 11908 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>( 11909 Constant->IgnoreParenImpCasts())) { 11910 OS << (BL->getValue() ? "YES" : "NO"); 11911 } else { 11912 OS << Value; 11913 } 11914 11915 if (!TautologicalTypeCompare) { 11916 S.Diag(E->getOperatorLoc(), diag::warn_tautological_compare_value_range) 11917 << RhsConstant << OtherValueRange.Width << OtherValueRange.NonNegative 11918 << E->getOpcodeStr() << OS.str() << *Result 11919 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 11920 return true; 11921 } 11922 11923 if (IsObjCSignedCharBool) { 11924 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 11925 S.PDiag(diag::warn_tautological_compare_objc_bool) 11926 << OS.str() << *Result); 11927 return true; 11928 } 11929 11930 // FIXME: We use a somewhat different formatting for the in-range cases and 11931 // cases involving boolean values for historical reasons. We should pick a 11932 // consistent way of presenting these diagnostics. 11933 if (!InRange || Other->isKnownToHaveBooleanValue()) { 11934 11935 S.DiagRuntimeBehavior( 11936 E->getOperatorLoc(), E, 11937 S.PDiag(!InRange ? diag::warn_out_of_range_compare 11938 : diag::warn_tautological_bool_compare) 11939 << OS.str() << classifyConstantValue(Constant) << OtherT 11940 << OtherIsBooleanDespiteType << *Result 11941 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 11942 } else { 11943 bool IsCharTy = OtherT.withoutLocalFastQualifiers() == S.Context.CharTy; 11944 unsigned Diag = 11945 (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 11946 ? (HasEnumType(OriginalOther) 11947 ? diag::warn_unsigned_enum_always_true_comparison 11948 : IsCharTy ? diag::warn_unsigned_char_always_true_comparison 11949 : diag::warn_unsigned_always_true_comparison) 11950 : diag::warn_tautological_constant_compare; 11951 11952 S.Diag(E->getOperatorLoc(), Diag) 11953 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 11954 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 11955 } 11956 11957 return true; 11958 } 11959 11960 /// Analyze the operands of the given comparison. Implements the 11961 /// fallback case from AnalyzeComparison. 11962 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 11963 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 11964 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 11965 } 11966 11967 /// Implements -Wsign-compare. 11968 /// 11969 /// \param E the binary operator to check for warnings 11970 static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 11971 // The type the comparison is being performed in. 11972 QualType T = E->getLHS()->getType(); 11973 11974 // Only analyze comparison operators where both sides have been converted to 11975 // the same type. 11976 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 11977 return AnalyzeImpConvsInComparison(S, E); 11978 11979 // Don't analyze value-dependent comparisons directly. 11980 if (E->isValueDependent()) 11981 return AnalyzeImpConvsInComparison(S, E); 11982 11983 Expr *LHS = E->getLHS(); 11984 Expr *RHS = E->getRHS(); 11985 11986 if (T->isIntegralType(S.Context)) { 11987 Optional<llvm::APSInt> RHSValue = RHS->getIntegerConstantExpr(S.Context); 11988 Optional<llvm::APSInt> LHSValue = LHS->getIntegerConstantExpr(S.Context); 11989 11990 // We don't care about expressions whose result is a constant. 11991 if (RHSValue && LHSValue) 11992 return AnalyzeImpConvsInComparison(S, E); 11993 11994 // We only care about expressions where just one side is literal 11995 if ((bool)RHSValue ^ (bool)LHSValue) { 11996 // Is the constant on the RHS or LHS? 11997 const bool RhsConstant = (bool)RHSValue; 11998 Expr *Const = RhsConstant ? RHS : LHS; 11999 Expr *Other = RhsConstant ? LHS : RHS; 12000 const llvm::APSInt &Value = RhsConstant ? *RHSValue : *LHSValue; 12001 12002 // Check whether an integer constant comparison results in a value 12003 // of 'true' or 'false'. 12004 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 12005 return AnalyzeImpConvsInComparison(S, E); 12006 } 12007 } 12008 12009 if (!T->hasUnsignedIntegerRepresentation()) { 12010 // We don't do anything special if this isn't an unsigned integral 12011 // comparison: we're only interested in integral comparisons, and 12012 // signed comparisons only happen in cases we don't care to warn about. 12013 return AnalyzeImpConvsInComparison(S, E); 12014 } 12015 12016 LHS = LHS->IgnoreParenImpCasts(); 12017 RHS = RHS->IgnoreParenImpCasts(); 12018 12019 if (!S.getLangOpts().CPlusPlus) { 12020 // Avoid warning about comparison of integers with different signs when 12021 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 12022 // the type of `E`. 12023 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 12024 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 12025 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 12026 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 12027 } 12028 12029 // Check to see if one of the (unmodified) operands is of different 12030 // signedness. 12031 Expr *signedOperand, *unsignedOperand; 12032 if (LHS->getType()->hasSignedIntegerRepresentation()) { 12033 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 12034 "unsigned comparison between two signed integer expressions?"); 12035 signedOperand = LHS; 12036 unsignedOperand = RHS; 12037 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 12038 signedOperand = RHS; 12039 unsignedOperand = LHS; 12040 } else { 12041 return AnalyzeImpConvsInComparison(S, E); 12042 } 12043 12044 // Otherwise, calculate the effective range of the signed operand. 12045 IntRange signedRange = GetExprRange( 12046 S.Context, signedOperand, S.isConstantEvaluated(), /*Approximate*/ true); 12047 12048 // Go ahead and analyze implicit conversions in the operands. Note 12049 // that we skip the implicit conversions on both sides. 12050 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 12051 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 12052 12053 // If the signed range is non-negative, -Wsign-compare won't fire. 12054 if (signedRange.NonNegative) 12055 return; 12056 12057 // For (in)equality comparisons, if the unsigned operand is a 12058 // constant which cannot collide with a overflowed signed operand, 12059 // then reinterpreting the signed operand as unsigned will not 12060 // change the result of the comparison. 12061 if (E->isEqualityOp()) { 12062 unsigned comparisonWidth = S.Context.getIntWidth(T); 12063 IntRange unsignedRange = 12064 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated(), 12065 /*Approximate*/ true); 12066 12067 // We should never be unable to prove that the unsigned operand is 12068 // non-negative. 12069 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 12070 12071 if (unsignedRange.Width < comparisonWidth) 12072 return; 12073 } 12074 12075 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 12076 S.PDiag(diag::warn_mixed_sign_comparison) 12077 << LHS->getType() << RHS->getType() 12078 << LHS->getSourceRange() << RHS->getSourceRange()); 12079 } 12080 12081 /// Analyzes an attempt to assign the given value to a bitfield. 12082 /// 12083 /// Returns true if there was something fishy about the attempt. 12084 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 12085 SourceLocation InitLoc) { 12086 assert(Bitfield->isBitField()); 12087 if (Bitfield->isInvalidDecl()) 12088 return false; 12089 12090 // White-list bool bitfields. 12091 QualType BitfieldType = Bitfield->getType(); 12092 if (BitfieldType->isBooleanType()) 12093 return false; 12094 12095 if (BitfieldType->isEnumeralType()) { 12096 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl(); 12097 // If the underlying enum type was not explicitly specified as an unsigned 12098 // type and the enum contain only positive values, MSVC++ will cause an 12099 // inconsistency by storing this as a signed type. 12100 if (S.getLangOpts().CPlusPlus11 && 12101 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 12102 BitfieldEnumDecl->getNumPositiveBits() > 0 && 12103 BitfieldEnumDecl->getNumNegativeBits() == 0) { 12104 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 12105 << BitfieldEnumDecl; 12106 } 12107 } 12108 12109 if (Bitfield->getType()->isBooleanType()) 12110 return false; 12111 12112 // Ignore value- or type-dependent expressions. 12113 if (Bitfield->getBitWidth()->isValueDependent() || 12114 Bitfield->getBitWidth()->isTypeDependent() || 12115 Init->isValueDependent() || 12116 Init->isTypeDependent()) 12117 return false; 12118 12119 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 12120 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 12121 12122 Expr::EvalResult Result; 12123 if (!OriginalInit->EvaluateAsInt(Result, S.Context, 12124 Expr::SE_AllowSideEffects)) { 12125 // The RHS is not constant. If the RHS has an enum type, make sure the 12126 // bitfield is wide enough to hold all the values of the enum without 12127 // truncation. 12128 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 12129 EnumDecl *ED = EnumTy->getDecl(); 12130 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 12131 12132 // Enum types are implicitly signed on Windows, so check if there are any 12133 // negative enumerators to see if the enum was intended to be signed or 12134 // not. 12135 bool SignedEnum = ED->getNumNegativeBits() > 0; 12136 12137 // Check for surprising sign changes when assigning enum values to a 12138 // bitfield of different signedness. If the bitfield is signed and we 12139 // have exactly the right number of bits to store this unsigned enum, 12140 // suggest changing the enum to an unsigned type. This typically happens 12141 // on Windows where unfixed enums always use an underlying type of 'int'. 12142 unsigned DiagID = 0; 12143 if (SignedEnum && !SignedBitfield) { 12144 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 12145 } else if (SignedBitfield && !SignedEnum && 12146 ED->getNumPositiveBits() == FieldWidth) { 12147 DiagID = diag::warn_signed_bitfield_enum_conversion; 12148 } 12149 12150 if (DiagID) { 12151 S.Diag(InitLoc, DiagID) << Bitfield << ED; 12152 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 12153 SourceRange TypeRange = 12154 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 12155 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 12156 << SignedEnum << TypeRange; 12157 } 12158 12159 // Compute the required bitwidth. If the enum has negative values, we need 12160 // one more bit than the normal number of positive bits to represent the 12161 // sign bit. 12162 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 12163 ED->getNumNegativeBits()) 12164 : ED->getNumPositiveBits(); 12165 12166 // Check the bitwidth. 12167 if (BitsNeeded > FieldWidth) { 12168 Expr *WidthExpr = Bitfield->getBitWidth(); 12169 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 12170 << Bitfield << ED; 12171 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 12172 << BitsNeeded << ED << WidthExpr->getSourceRange(); 12173 } 12174 } 12175 12176 return false; 12177 } 12178 12179 llvm::APSInt Value = Result.Val.getInt(); 12180 12181 unsigned OriginalWidth = Value.getBitWidth(); 12182 12183 if (!Value.isSigned() || Value.isNegative()) 12184 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 12185 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 12186 OriginalWidth = Value.getMinSignedBits(); 12187 12188 if (OriginalWidth <= FieldWidth) 12189 return false; 12190 12191 // Compute the value which the bitfield will contain. 12192 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 12193 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 12194 12195 // Check whether the stored value is equal to the original value. 12196 TruncatedValue = TruncatedValue.extend(OriginalWidth); 12197 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 12198 return false; 12199 12200 // Special-case bitfields of width 1: booleans are naturally 0/1, and 12201 // therefore don't strictly fit into a signed bitfield of width 1. 12202 if (FieldWidth == 1 && Value == 1) 12203 return false; 12204 12205 std::string PrettyValue = toString(Value, 10); 12206 std::string PrettyTrunc = toString(TruncatedValue, 10); 12207 12208 S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant) 12209 << PrettyValue << PrettyTrunc << OriginalInit->getType() 12210 << Init->getSourceRange(); 12211 12212 return true; 12213 } 12214 12215 /// Analyze the given simple or compound assignment for warning-worthy 12216 /// operations. 12217 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 12218 // Just recurse on the LHS. 12219 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12220 12221 // We want to recurse on the RHS as normal unless we're assigning to 12222 // a bitfield. 12223 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 12224 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 12225 E->getOperatorLoc())) { 12226 // Recurse, ignoring any implicit conversions on the RHS. 12227 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 12228 E->getOperatorLoc()); 12229 } 12230 } 12231 12232 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12233 12234 // Diagnose implicitly sequentially-consistent atomic assignment. 12235 if (E->getLHS()->getType()->isAtomicType()) 12236 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 12237 } 12238 12239 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 12240 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 12241 SourceLocation CContext, unsigned diag, 12242 bool pruneControlFlow = false) { 12243 if (pruneControlFlow) { 12244 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12245 S.PDiag(diag) 12246 << SourceType << T << E->getSourceRange() 12247 << SourceRange(CContext)); 12248 return; 12249 } 12250 S.Diag(E->getExprLoc(), diag) 12251 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 12252 } 12253 12254 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 12255 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 12256 SourceLocation CContext, 12257 unsigned diag, bool pruneControlFlow = false) { 12258 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 12259 } 12260 12261 static bool isObjCSignedCharBool(Sema &S, QualType Ty) { 12262 return Ty->isSpecificBuiltinType(BuiltinType::SChar) && 12263 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty); 12264 } 12265 12266 static void adornObjCBoolConversionDiagWithTernaryFixit( 12267 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) { 12268 Expr *Ignored = SourceExpr->IgnoreImplicit(); 12269 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored)) 12270 Ignored = OVE->getSourceExpr(); 12271 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) || 12272 isa<BinaryOperator>(Ignored) || 12273 isa<CXXOperatorCallExpr>(Ignored); 12274 SourceLocation EndLoc = S.getLocForEndOfToken(SourceExpr->getEndLoc()); 12275 if (NeedsParens) 12276 Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(") 12277 << FixItHint::CreateInsertion(EndLoc, ")"); 12278 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO"); 12279 } 12280 12281 /// Diagnose an implicit cast from a floating point value to an integer value. 12282 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 12283 SourceLocation CContext) { 12284 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 12285 const bool PruneWarnings = S.inTemplateInstantiation(); 12286 12287 Expr *InnerE = E->IgnoreParenImpCasts(); 12288 // We also want to warn on, e.g., "int i = -1.234" 12289 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 12290 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 12291 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 12292 12293 const bool IsLiteral = 12294 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 12295 12296 llvm::APFloat Value(0.0); 12297 bool IsConstant = 12298 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 12299 if (!IsConstant) { 12300 if (isObjCSignedCharBool(S, T)) { 12301 return adornObjCBoolConversionDiagWithTernaryFixit( 12302 S, E, 12303 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool) 12304 << E->getType()); 12305 } 12306 12307 return DiagnoseImpCast(S, E, T, CContext, 12308 diag::warn_impcast_float_integer, PruneWarnings); 12309 } 12310 12311 bool isExact = false; 12312 12313 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 12314 T->hasUnsignedIntegerRepresentation()); 12315 llvm::APFloat::opStatus Result = Value.convertToInteger( 12316 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 12317 12318 // FIXME: Force the precision of the source value down so we don't print 12319 // digits which are usually useless (we don't really care here if we 12320 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 12321 // would automatically print the shortest representation, but it's a bit 12322 // tricky to implement. 12323 SmallString<16> PrettySourceValue; 12324 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 12325 precision = (precision * 59 + 195) / 196; 12326 Value.toString(PrettySourceValue, precision); 12327 12328 if (isObjCSignedCharBool(S, T) && IntegerValue != 0 && IntegerValue != 1) { 12329 return adornObjCBoolConversionDiagWithTernaryFixit( 12330 S, E, 12331 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool) 12332 << PrettySourceValue); 12333 } 12334 12335 if (Result == llvm::APFloat::opOK && isExact) { 12336 if (IsLiteral) return; 12337 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 12338 PruneWarnings); 12339 } 12340 12341 // Conversion of a floating-point value to a non-bool integer where the 12342 // integral part cannot be represented by the integer type is undefined. 12343 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 12344 return DiagnoseImpCast( 12345 S, E, T, CContext, 12346 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 12347 : diag::warn_impcast_float_to_integer_out_of_range, 12348 PruneWarnings); 12349 12350 unsigned DiagID = 0; 12351 if (IsLiteral) { 12352 // Warn on floating point literal to integer. 12353 DiagID = diag::warn_impcast_literal_float_to_integer; 12354 } else if (IntegerValue == 0) { 12355 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 12356 return DiagnoseImpCast(S, E, T, CContext, 12357 diag::warn_impcast_float_integer, PruneWarnings); 12358 } 12359 // Warn on non-zero to zero conversion. 12360 DiagID = diag::warn_impcast_float_to_integer_zero; 12361 } else { 12362 if (IntegerValue.isUnsigned()) { 12363 if (!IntegerValue.isMaxValue()) { 12364 return DiagnoseImpCast(S, E, T, CContext, 12365 diag::warn_impcast_float_integer, PruneWarnings); 12366 } 12367 } else { // IntegerValue.isSigned() 12368 if (!IntegerValue.isMaxSignedValue() && 12369 !IntegerValue.isMinSignedValue()) { 12370 return DiagnoseImpCast(S, E, T, CContext, 12371 diag::warn_impcast_float_integer, PruneWarnings); 12372 } 12373 } 12374 // Warn on evaluatable floating point expression to integer conversion. 12375 DiagID = diag::warn_impcast_float_to_integer; 12376 } 12377 12378 SmallString<16> PrettyTargetValue; 12379 if (IsBool) 12380 PrettyTargetValue = Value.isZero() ? "false" : "true"; 12381 else 12382 IntegerValue.toString(PrettyTargetValue); 12383 12384 if (PruneWarnings) { 12385 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12386 S.PDiag(DiagID) 12387 << E->getType() << T.getUnqualifiedType() 12388 << PrettySourceValue << PrettyTargetValue 12389 << E->getSourceRange() << SourceRange(CContext)); 12390 } else { 12391 S.Diag(E->getExprLoc(), DiagID) 12392 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 12393 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 12394 } 12395 } 12396 12397 /// Analyze the given compound assignment for the possible losing of 12398 /// floating-point precision. 12399 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 12400 assert(isa<CompoundAssignOperator>(E) && 12401 "Must be compound assignment operation"); 12402 // Recurse on the LHS and RHS in here 12403 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12404 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12405 12406 if (E->getLHS()->getType()->isAtomicType()) 12407 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 12408 12409 // Now check the outermost expression 12410 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 12411 const auto *RBT = cast<CompoundAssignOperator>(E) 12412 ->getComputationResultType() 12413 ->getAs<BuiltinType>(); 12414 12415 // The below checks assume source is floating point. 12416 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 12417 12418 // If source is floating point but target is an integer. 12419 if (ResultBT->isInteger()) 12420 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(), 12421 E->getExprLoc(), diag::warn_impcast_float_integer); 12422 12423 if (!ResultBT->isFloatingPoint()) 12424 return; 12425 12426 // If both source and target are floating points, warn about losing precision. 12427 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 12428 QualType(ResultBT, 0), QualType(RBT, 0)); 12429 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 12430 // warn about dropping FP rank. 12431 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 12432 diag::warn_impcast_float_result_precision); 12433 } 12434 12435 static std::string PrettyPrintInRange(const llvm::APSInt &Value, 12436 IntRange Range) { 12437 if (!Range.Width) return "0"; 12438 12439 llvm::APSInt ValueInRange = Value; 12440 ValueInRange.setIsSigned(!Range.NonNegative); 12441 ValueInRange = ValueInRange.trunc(Range.Width); 12442 return toString(ValueInRange, 10); 12443 } 12444 12445 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 12446 if (!isa<ImplicitCastExpr>(Ex)) 12447 return false; 12448 12449 Expr *InnerE = Ex->IgnoreParenImpCasts(); 12450 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 12451 const Type *Source = 12452 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 12453 if (Target->isDependentType()) 12454 return false; 12455 12456 const BuiltinType *FloatCandidateBT = 12457 dyn_cast<BuiltinType>(ToBool ? Source : Target); 12458 const Type *BoolCandidateType = ToBool ? Target : Source; 12459 12460 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 12461 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 12462 } 12463 12464 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 12465 SourceLocation CC) { 12466 unsigned NumArgs = TheCall->getNumArgs(); 12467 for (unsigned i = 0; i < NumArgs; ++i) { 12468 Expr *CurrA = TheCall->getArg(i); 12469 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 12470 continue; 12471 12472 bool IsSwapped = ((i > 0) && 12473 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 12474 IsSwapped |= ((i < (NumArgs - 1)) && 12475 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 12476 if (IsSwapped) { 12477 // Warn on this floating-point to bool conversion. 12478 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 12479 CurrA->getType(), CC, 12480 diag::warn_impcast_floating_point_to_bool); 12481 } 12482 } 12483 } 12484 12485 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 12486 SourceLocation CC) { 12487 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 12488 E->getExprLoc())) 12489 return; 12490 12491 // Don't warn on functions which have return type nullptr_t. 12492 if (isa<CallExpr>(E)) 12493 return; 12494 12495 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 12496 const Expr::NullPointerConstantKind NullKind = 12497 E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull); 12498 if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr) 12499 return; 12500 12501 // Return if target type is a safe conversion. 12502 if (T->isAnyPointerType() || T->isBlockPointerType() || 12503 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 12504 return; 12505 12506 SourceLocation Loc = E->getSourceRange().getBegin(); 12507 12508 // Venture through the macro stacks to get to the source of macro arguments. 12509 // The new location is a better location than the complete location that was 12510 // passed in. 12511 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 12512 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 12513 12514 // __null is usually wrapped in a macro. Go up a macro if that is the case. 12515 if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) { 12516 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 12517 Loc, S.SourceMgr, S.getLangOpts()); 12518 if (MacroName == "NULL") 12519 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 12520 } 12521 12522 // Only warn if the null and context location are in the same macro expansion. 12523 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 12524 return; 12525 12526 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 12527 << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC) 12528 << FixItHint::CreateReplacement(Loc, 12529 S.getFixItZeroLiteralForType(T, Loc)); 12530 } 12531 12532 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 12533 ObjCArrayLiteral *ArrayLiteral); 12534 12535 static void 12536 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 12537 ObjCDictionaryLiteral *DictionaryLiteral); 12538 12539 /// Check a single element within a collection literal against the 12540 /// target element type. 12541 static void checkObjCCollectionLiteralElement(Sema &S, 12542 QualType TargetElementType, 12543 Expr *Element, 12544 unsigned ElementKind) { 12545 // Skip a bitcast to 'id' or qualified 'id'. 12546 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 12547 if (ICE->getCastKind() == CK_BitCast && 12548 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 12549 Element = ICE->getSubExpr(); 12550 } 12551 12552 QualType ElementType = Element->getType(); 12553 ExprResult ElementResult(Element); 12554 if (ElementType->getAs<ObjCObjectPointerType>() && 12555 S.CheckSingleAssignmentConstraints(TargetElementType, 12556 ElementResult, 12557 false, false) 12558 != Sema::Compatible) { 12559 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 12560 << ElementType << ElementKind << TargetElementType 12561 << Element->getSourceRange(); 12562 } 12563 12564 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 12565 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 12566 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 12567 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 12568 } 12569 12570 /// Check an Objective-C array literal being converted to the given 12571 /// target type. 12572 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 12573 ObjCArrayLiteral *ArrayLiteral) { 12574 if (!S.NSArrayDecl) 12575 return; 12576 12577 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 12578 if (!TargetObjCPtr) 12579 return; 12580 12581 if (TargetObjCPtr->isUnspecialized() || 12582 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 12583 != S.NSArrayDecl->getCanonicalDecl()) 12584 return; 12585 12586 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 12587 if (TypeArgs.size() != 1) 12588 return; 12589 12590 QualType TargetElementType = TypeArgs[0]; 12591 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 12592 checkObjCCollectionLiteralElement(S, TargetElementType, 12593 ArrayLiteral->getElement(I), 12594 0); 12595 } 12596 } 12597 12598 /// Check an Objective-C dictionary literal being converted to the given 12599 /// target type. 12600 static void 12601 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 12602 ObjCDictionaryLiteral *DictionaryLiteral) { 12603 if (!S.NSDictionaryDecl) 12604 return; 12605 12606 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 12607 if (!TargetObjCPtr) 12608 return; 12609 12610 if (TargetObjCPtr->isUnspecialized() || 12611 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 12612 != S.NSDictionaryDecl->getCanonicalDecl()) 12613 return; 12614 12615 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 12616 if (TypeArgs.size() != 2) 12617 return; 12618 12619 QualType TargetKeyType = TypeArgs[0]; 12620 QualType TargetObjectType = TypeArgs[1]; 12621 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 12622 auto Element = DictionaryLiteral->getKeyValueElement(I); 12623 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 12624 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 12625 } 12626 } 12627 12628 // Helper function to filter out cases for constant width constant conversion. 12629 // Don't warn on char array initialization or for non-decimal values. 12630 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 12631 SourceLocation CC) { 12632 // If initializing from a constant, and the constant starts with '0', 12633 // then it is a binary, octal, or hexadecimal. Allow these constants 12634 // to fill all the bits, even if there is a sign change. 12635 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 12636 const char FirstLiteralCharacter = 12637 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 12638 if (FirstLiteralCharacter == '0') 12639 return false; 12640 } 12641 12642 // If the CC location points to a '{', and the type is char, then assume 12643 // assume it is an array initialization. 12644 if (CC.isValid() && T->isCharType()) { 12645 const char FirstContextCharacter = 12646 S.getSourceManager().getCharacterData(CC)[0]; 12647 if (FirstContextCharacter == '{') 12648 return false; 12649 } 12650 12651 return true; 12652 } 12653 12654 static const IntegerLiteral *getIntegerLiteral(Expr *E) { 12655 const auto *IL = dyn_cast<IntegerLiteral>(E); 12656 if (!IL) { 12657 if (auto *UO = dyn_cast<UnaryOperator>(E)) { 12658 if (UO->getOpcode() == UO_Minus) 12659 return dyn_cast<IntegerLiteral>(UO->getSubExpr()); 12660 } 12661 } 12662 12663 return IL; 12664 } 12665 12666 static void DiagnoseIntInBoolContext(Sema &S, Expr *E) { 12667 E = E->IgnoreParenImpCasts(); 12668 SourceLocation ExprLoc = E->getExprLoc(); 12669 12670 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 12671 BinaryOperator::Opcode Opc = BO->getOpcode(); 12672 Expr::EvalResult Result; 12673 // Do not diagnose unsigned shifts. 12674 if (Opc == BO_Shl) { 12675 const auto *LHS = getIntegerLiteral(BO->getLHS()); 12676 const auto *RHS = getIntegerLiteral(BO->getRHS()); 12677 if (LHS && LHS->getValue() == 0) 12678 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0; 12679 else if (!E->isValueDependent() && LHS && RHS && 12680 RHS->getValue().isNonNegative() && 12681 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) 12682 S.Diag(ExprLoc, diag::warn_left_shift_always) 12683 << (Result.Val.getInt() != 0); 12684 else if (E->getType()->isSignedIntegerType()) 12685 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E; 12686 } 12687 } 12688 12689 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 12690 const auto *LHS = getIntegerLiteral(CO->getTrueExpr()); 12691 const auto *RHS = getIntegerLiteral(CO->getFalseExpr()); 12692 if (!LHS || !RHS) 12693 return; 12694 if ((LHS->getValue() == 0 || LHS->getValue() == 1) && 12695 (RHS->getValue() == 0 || RHS->getValue() == 1)) 12696 // Do not diagnose common idioms. 12697 return; 12698 if (LHS->getValue() != 0 && RHS->getValue() != 0) 12699 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true); 12700 } 12701 } 12702 12703 static void CheckImplicitConversion(Sema &S, Expr *E, QualType T, 12704 SourceLocation CC, 12705 bool *ICContext = nullptr, 12706 bool IsListInit = false) { 12707 if (E->isTypeDependent() || E->isValueDependent()) return; 12708 12709 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 12710 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 12711 if (Source == Target) return; 12712 if (Target->isDependentType()) return; 12713 12714 // If the conversion context location is invalid don't complain. We also 12715 // don't want to emit a warning if the issue occurs from the expansion of 12716 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 12717 // delay this check as long as possible. Once we detect we are in that 12718 // scenario, we just return. 12719 if (CC.isInvalid()) 12720 return; 12721 12722 if (Source->isAtomicType()) 12723 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 12724 12725 // Diagnose implicit casts to bool. 12726 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 12727 if (isa<StringLiteral>(E)) 12728 // Warn on string literal to bool. Checks for string literals in logical 12729 // and expressions, for instance, assert(0 && "error here"), are 12730 // prevented by a check in AnalyzeImplicitConversions(). 12731 return DiagnoseImpCast(S, E, T, CC, 12732 diag::warn_impcast_string_literal_to_bool); 12733 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 12734 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 12735 // This covers the literal expressions that evaluate to Objective-C 12736 // objects. 12737 return DiagnoseImpCast(S, E, T, CC, 12738 diag::warn_impcast_objective_c_literal_to_bool); 12739 } 12740 if (Source->isPointerType() || Source->canDecayToPointerType()) { 12741 // Warn on pointer to bool conversion that is always true. 12742 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 12743 SourceRange(CC)); 12744 } 12745 } 12746 12747 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL 12748 // is a typedef for signed char (macOS), then that constant value has to be 1 12749 // or 0. 12750 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) { 12751 Expr::EvalResult Result; 12752 if (E->EvaluateAsInt(Result, S.getASTContext(), 12753 Expr::SE_AllowSideEffects)) { 12754 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) { 12755 adornObjCBoolConversionDiagWithTernaryFixit( 12756 S, E, 12757 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool) 12758 << toString(Result.Val.getInt(), 10)); 12759 } 12760 return; 12761 } 12762 } 12763 12764 // Check implicit casts from Objective-C collection literals to specialized 12765 // collection types, e.g., NSArray<NSString *> *. 12766 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 12767 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 12768 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 12769 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 12770 12771 // Strip vector types. 12772 if (isa<VectorType>(Source)) { 12773 if (Target->isVLSTBuiltinType() && 12774 (S.Context.areCompatibleSveTypes(QualType(Target, 0), 12775 QualType(Source, 0)) || 12776 S.Context.areLaxCompatibleSveTypes(QualType(Target, 0), 12777 QualType(Source, 0)))) 12778 return; 12779 12780 if (!isa<VectorType>(Target)) { 12781 if (S.SourceMgr.isInSystemMacro(CC)) 12782 return; 12783 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 12784 } 12785 12786 // If the vector cast is cast between two vectors of the same size, it is 12787 // a bitcast, not a conversion. 12788 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 12789 return; 12790 12791 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 12792 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 12793 } 12794 if (auto VecTy = dyn_cast<VectorType>(Target)) 12795 Target = VecTy->getElementType().getTypePtr(); 12796 12797 // Strip complex types. 12798 if (isa<ComplexType>(Source)) { 12799 if (!isa<ComplexType>(Target)) { 12800 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 12801 return; 12802 12803 return DiagnoseImpCast(S, E, T, CC, 12804 S.getLangOpts().CPlusPlus 12805 ? diag::err_impcast_complex_scalar 12806 : diag::warn_impcast_complex_scalar); 12807 } 12808 12809 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 12810 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 12811 } 12812 12813 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 12814 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 12815 12816 // If the source is floating point... 12817 if (SourceBT && SourceBT->isFloatingPoint()) { 12818 // ...and the target is floating point... 12819 if (TargetBT && TargetBT->isFloatingPoint()) { 12820 // ...then warn if we're dropping FP rank. 12821 12822 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 12823 QualType(SourceBT, 0), QualType(TargetBT, 0)); 12824 if (Order > 0) { 12825 // Don't warn about float constants that are precisely 12826 // representable in the target type. 12827 Expr::EvalResult result; 12828 if (E->EvaluateAsRValue(result, S.Context)) { 12829 // Value might be a float, a float vector, or a float complex. 12830 if (IsSameFloatAfterCast(result.Val, 12831 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 12832 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 12833 return; 12834 } 12835 12836 if (S.SourceMgr.isInSystemMacro(CC)) 12837 return; 12838 12839 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 12840 } 12841 // ... or possibly if we're increasing rank, too 12842 else if (Order < 0) { 12843 if (S.SourceMgr.isInSystemMacro(CC)) 12844 return; 12845 12846 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 12847 } 12848 return; 12849 } 12850 12851 // If the target is integral, always warn. 12852 if (TargetBT && TargetBT->isInteger()) { 12853 if (S.SourceMgr.isInSystemMacro(CC)) 12854 return; 12855 12856 DiagnoseFloatingImpCast(S, E, T, CC); 12857 } 12858 12859 // Detect the case where a call result is converted from floating-point to 12860 // to bool, and the final argument to the call is converted from bool, to 12861 // discover this typo: 12862 // 12863 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 12864 // 12865 // FIXME: This is an incredibly special case; is there some more general 12866 // way to detect this class of misplaced-parentheses bug? 12867 if (Target->isBooleanType() && isa<CallExpr>(E)) { 12868 // Check last argument of function call to see if it is an 12869 // implicit cast from a type matching the type the result 12870 // is being cast to. 12871 CallExpr *CEx = cast<CallExpr>(E); 12872 if (unsigned NumArgs = CEx->getNumArgs()) { 12873 Expr *LastA = CEx->getArg(NumArgs - 1); 12874 Expr *InnerE = LastA->IgnoreParenImpCasts(); 12875 if (isa<ImplicitCastExpr>(LastA) && 12876 InnerE->getType()->isBooleanType()) { 12877 // Warn on this floating-point to bool conversion 12878 DiagnoseImpCast(S, E, T, CC, 12879 diag::warn_impcast_floating_point_to_bool); 12880 } 12881 } 12882 } 12883 return; 12884 } 12885 12886 // Valid casts involving fixed point types should be accounted for here. 12887 if (Source->isFixedPointType()) { 12888 if (Target->isUnsaturatedFixedPointType()) { 12889 Expr::EvalResult Result; 12890 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects, 12891 S.isConstantEvaluated())) { 12892 llvm::APFixedPoint Value = Result.Val.getFixedPoint(); 12893 llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(T); 12894 llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(T); 12895 if (Value > MaxVal || Value < MinVal) { 12896 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12897 S.PDiag(diag::warn_impcast_fixed_point_range) 12898 << Value.toString() << T 12899 << E->getSourceRange() 12900 << clang::SourceRange(CC)); 12901 return; 12902 } 12903 } 12904 } else if (Target->isIntegerType()) { 12905 Expr::EvalResult Result; 12906 if (!S.isConstantEvaluated() && 12907 E->EvaluateAsFixedPoint(Result, S.Context, 12908 Expr::SE_AllowSideEffects)) { 12909 llvm::APFixedPoint FXResult = Result.Val.getFixedPoint(); 12910 12911 bool Overflowed; 12912 llvm::APSInt IntResult = FXResult.convertToInt( 12913 S.Context.getIntWidth(T), 12914 Target->isSignedIntegerOrEnumerationType(), &Overflowed); 12915 12916 if (Overflowed) { 12917 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12918 S.PDiag(diag::warn_impcast_fixed_point_range) 12919 << FXResult.toString() << T 12920 << E->getSourceRange() 12921 << clang::SourceRange(CC)); 12922 return; 12923 } 12924 } 12925 } 12926 } else if (Target->isUnsaturatedFixedPointType()) { 12927 if (Source->isIntegerType()) { 12928 Expr::EvalResult Result; 12929 if (!S.isConstantEvaluated() && 12930 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 12931 llvm::APSInt Value = Result.Val.getInt(); 12932 12933 bool Overflowed; 12934 llvm::APFixedPoint IntResult = llvm::APFixedPoint::getFromIntValue( 12935 Value, S.Context.getFixedPointSemantics(T), &Overflowed); 12936 12937 if (Overflowed) { 12938 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12939 S.PDiag(diag::warn_impcast_fixed_point_range) 12940 << toString(Value, /*Radix=*/10) << T 12941 << E->getSourceRange() 12942 << clang::SourceRange(CC)); 12943 return; 12944 } 12945 } 12946 } 12947 } 12948 12949 // If we are casting an integer type to a floating point type without 12950 // initialization-list syntax, we might lose accuracy if the floating 12951 // point type has a narrower significand than the integer type. 12952 if (SourceBT && TargetBT && SourceBT->isIntegerType() && 12953 TargetBT->isFloatingType() && !IsListInit) { 12954 // Determine the number of precision bits in the source integer type. 12955 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated(), 12956 /*Approximate*/ true); 12957 unsigned int SourcePrecision = SourceRange.Width; 12958 12959 // Determine the number of precision bits in the 12960 // target floating point type. 12961 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision( 12962 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 12963 12964 if (SourcePrecision > 0 && TargetPrecision > 0 && 12965 SourcePrecision > TargetPrecision) { 12966 12967 if (Optional<llvm::APSInt> SourceInt = 12968 E->getIntegerConstantExpr(S.Context)) { 12969 // If the source integer is a constant, convert it to the target 12970 // floating point type. Issue a warning if the value changes 12971 // during the whole conversion. 12972 llvm::APFloat TargetFloatValue( 12973 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 12974 llvm::APFloat::opStatus ConversionStatus = 12975 TargetFloatValue.convertFromAPInt( 12976 *SourceInt, SourceBT->isSignedInteger(), 12977 llvm::APFloat::rmNearestTiesToEven); 12978 12979 if (ConversionStatus != llvm::APFloat::opOK) { 12980 SmallString<32> PrettySourceValue; 12981 SourceInt->toString(PrettySourceValue, 10); 12982 SmallString<32> PrettyTargetValue; 12983 TargetFloatValue.toString(PrettyTargetValue, TargetPrecision); 12984 12985 S.DiagRuntimeBehavior( 12986 E->getExprLoc(), E, 12987 S.PDiag(diag::warn_impcast_integer_float_precision_constant) 12988 << PrettySourceValue << PrettyTargetValue << E->getType() << T 12989 << E->getSourceRange() << clang::SourceRange(CC)); 12990 } 12991 } else { 12992 // Otherwise, the implicit conversion may lose precision. 12993 DiagnoseImpCast(S, E, T, CC, 12994 diag::warn_impcast_integer_float_precision); 12995 } 12996 } 12997 } 12998 12999 DiagnoseNullConversion(S, E, T, CC); 13000 13001 S.DiscardMisalignedMemberAddress(Target, E); 13002 13003 if (Target->isBooleanType()) 13004 DiagnoseIntInBoolContext(S, E); 13005 13006 if (!Source->isIntegerType() || !Target->isIntegerType()) 13007 return; 13008 13009 // TODO: remove this early return once the false positives for constant->bool 13010 // in templates, macros, etc, are reduced or removed. 13011 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 13012 return; 13013 13014 if (isObjCSignedCharBool(S, T) && !Source->isCharType() && 13015 !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) { 13016 return adornObjCBoolConversionDiagWithTernaryFixit( 13017 S, E, 13018 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool) 13019 << E->getType()); 13020 } 13021 13022 IntRange SourceTypeRange = 13023 IntRange::forTargetOfCanonicalType(S.Context, Source); 13024 IntRange LikelySourceRange = 13025 GetExprRange(S.Context, E, S.isConstantEvaluated(), /*Approximate*/ true); 13026 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 13027 13028 if (LikelySourceRange.Width > TargetRange.Width) { 13029 // If the source is a constant, use a default-on diagnostic. 13030 // TODO: this should happen for bitfield stores, too. 13031 Expr::EvalResult Result; 13032 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects, 13033 S.isConstantEvaluated())) { 13034 llvm::APSInt Value(32); 13035 Value = Result.Val.getInt(); 13036 13037 if (S.SourceMgr.isInSystemMacro(CC)) 13038 return; 13039 13040 std::string PrettySourceValue = toString(Value, 10); 13041 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 13042 13043 S.DiagRuntimeBehavior( 13044 E->getExprLoc(), E, 13045 S.PDiag(diag::warn_impcast_integer_precision_constant) 13046 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13047 << E->getSourceRange() << SourceRange(CC)); 13048 return; 13049 } 13050 13051 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 13052 if (S.SourceMgr.isInSystemMacro(CC)) 13053 return; 13054 13055 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 13056 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 13057 /* pruneControlFlow */ true); 13058 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 13059 } 13060 13061 if (TargetRange.Width > SourceTypeRange.Width) { 13062 if (auto *UO = dyn_cast<UnaryOperator>(E)) 13063 if (UO->getOpcode() == UO_Minus) 13064 if (Source->isUnsignedIntegerType()) { 13065 if (Target->isUnsignedIntegerType()) 13066 return DiagnoseImpCast(S, E, T, CC, 13067 diag::warn_impcast_high_order_zero_bits); 13068 if (Target->isSignedIntegerType()) 13069 return DiagnoseImpCast(S, E, T, CC, 13070 diag::warn_impcast_nonnegative_result); 13071 } 13072 } 13073 13074 if (TargetRange.Width == LikelySourceRange.Width && 13075 !TargetRange.NonNegative && LikelySourceRange.NonNegative && 13076 Source->isSignedIntegerType()) { 13077 // Warn when doing a signed to signed conversion, warn if the positive 13078 // source value is exactly the width of the target type, which will 13079 // cause a negative value to be stored. 13080 13081 Expr::EvalResult Result; 13082 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) && 13083 !S.SourceMgr.isInSystemMacro(CC)) { 13084 llvm::APSInt Value = Result.Val.getInt(); 13085 if (isSameWidthConstantConversion(S, E, T, CC)) { 13086 std::string PrettySourceValue = toString(Value, 10); 13087 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 13088 13089 S.DiagRuntimeBehavior( 13090 E->getExprLoc(), E, 13091 S.PDiag(diag::warn_impcast_integer_precision_constant) 13092 << PrettySourceValue << PrettyTargetValue << E->getType() << T 13093 << E->getSourceRange() << SourceRange(CC)); 13094 return; 13095 } 13096 } 13097 13098 // Fall through for non-constants to give a sign conversion warning. 13099 } 13100 13101 if ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) || 13102 (!TargetRange.NonNegative && LikelySourceRange.NonNegative && 13103 LikelySourceRange.Width == TargetRange.Width)) { 13104 if (S.SourceMgr.isInSystemMacro(CC)) 13105 return; 13106 13107 unsigned DiagID = diag::warn_impcast_integer_sign; 13108 13109 // Traditionally, gcc has warned about this under -Wsign-compare. 13110 // We also want to warn about it in -Wconversion. 13111 // So if -Wconversion is off, use a completely identical diagnostic 13112 // in the sign-compare group. 13113 // The conditional-checking code will 13114 if (ICContext) { 13115 DiagID = diag::warn_impcast_integer_sign_conditional; 13116 *ICContext = true; 13117 } 13118 13119 return DiagnoseImpCast(S, E, T, CC, DiagID); 13120 } 13121 13122 // Diagnose conversions between different enumeration types. 13123 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 13124 // type, to give us better diagnostics. 13125 QualType SourceType = E->getType(); 13126 if (!S.getLangOpts().CPlusPlus) { 13127 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 13128 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 13129 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 13130 SourceType = S.Context.getTypeDeclType(Enum); 13131 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 13132 } 13133 } 13134 13135 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 13136 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 13137 if (SourceEnum->getDecl()->hasNameForLinkage() && 13138 TargetEnum->getDecl()->hasNameForLinkage() && 13139 SourceEnum != TargetEnum) { 13140 if (S.SourceMgr.isInSystemMacro(CC)) 13141 return; 13142 13143 return DiagnoseImpCast(S, E, SourceType, T, CC, 13144 diag::warn_impcast_different_enum_types); 13145 } 13146 } 13147 13148 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 13149 SourceLocation CC, QualType T); 13150 13151 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 13152 SourceLocation CC, bool &ICContext) { 13153 E = E->IgnoreParenImpCasts(); 13154 13155 if (auto *CO = dyn_cast<AbstractConditionalOperator>(E)) 13156 return CheckConditionalOperator(S, CO, CC, T); 13157 13158 AnalyzeImplicitConversions(S, E, CC); 13159 if (E->getType() != T) 13160 return CheckImplicitConversion(S, E, T, CC, &ICContext); 13161 } 13162 13163 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 13164 SourceLocation CC, QualType T) { 13165 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 13166 13167 Expr *TrueExpr = E->getTrueExpr(); 13168 if (auto *BCO = dyn_cast<BinaryConditionalOperator>(E)) 13169 TrueExpr = BCO->getCommon(); 13170 13171 bool Suspicious = false; 13172 CheckConditionalOperand(S, TrueExpr, T, CC, Suspicious); 13173 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 13174 13175 if (T->isBooleanType()) 13176 DiagnoseIntInBoolContext(S, E); 13177 13178 // If -Wconversion would have warned about either of the candidates 13179 // for a signedness conversion to the context type... 13180 if (!Suspicious) return; 13181 13182 // ...but it's currently ignored... 13183 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 13184 return; 13185 13186 // ...then check whether it would have warned about either of the 13187 // candidates for a signedness conversion to the condition type. 13188 if (E->getType() == T) return; 13189 13190 Suspicious = false; 13191 CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(), 13192 E->getType(), CC, &Suspicious); 13193 if (!Suspicious) 13194 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 13195 E->getType(), CC, &Suspicious); 13196 } 13197 13198 /// Check conversion of given expression to boolean. 13199 /// Input argument E is a logical expression. 13200 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 13201 if (S.getLangOpts().Bool) 13202 return; 13203 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 13204 return; 13205 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 13206 } 13207 13208 namespace { 13209 struct AnalyzeImplicitConversionsWorkItem { 13210 Expr *E; 13211 SourceLocation CC; 13212 bool IsListInit; 13213 }; 13214 } 13215 13216 /// Data recursive variant of AnalyzeImplicitConversions. Subexpressions 13217 /// that should be visited are added to WorkList. 13218 static void AnalyzeImplicitConversions( 13219 Sema &S, AnalyzeImplicitConversionsWorkItem Item, 13220 llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) { 13221 Expr *OrigE = Item.E; 13222 SourceLocation CC = Item.CC; 13223 13224 QualType T = OrigE->getType(); 13225 Expr *E = OrigE->IgnoreParenImpCasts(); 13226 13227 // Propagate whether we are in a C++ list initialization expression. 13228 // If so, we do not issue warnings for implicit int-float conversion 13229 // precision loss, because C++11 narrowing already handles it. 13230 bool IsListInit = Item.IsListInit || 13231 (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus); 13232 13233 if (E->isTypeDependent() || E->isValueDependent()) 13234 return; 13235 13236 Expr *SourceExpr = E; 13237 // Examine, but don't traverse into the source expression of an 13238 // OpaqueValueExpr, since it may have multiple parents and we don't want to 13239 // emit duplicate diagnostics. Its fine to examine the form or attempt to 13240 // evaluate it in the context of checking the specific conversion to T though. 13241 if (auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 13242 if (auto *Src = OVE->getSourceExpr()) 13243 SourceExpr = Src; 13244 13245 if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr)) 13246 if (UO->getOpcode() == UO_Not && 13247 UO->getSubExpr()->isKnownToHaveBooleanValue()) 13248 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool) 13249 << OrigE->getSourceRange() << T->isBooleanType() 13250 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!"); 13251 13252 // For conditional operators, we analyze the arguments as if they 13253 // were being fed directly into the output. 13254 if (auto *CO = dyn_cast<AbstractConditionalOperator>(SourceExpr)) { 13255 CheckConditionalOperator(S, CO, CC, T); 13256 return; 13257 } 13258 13259 // Check implicit argument conversions for function calls. 13260 if (CallExpr *Call = dyn_cast<CallExpr>(SourceExpr)) 13261 CheckImplicitArgumentConversions(S, Call, CC); 13262 13263 // Go ahead and check any implicit conversions we might have skipped. 13264 // The non-canonical typecheck is just an optimization; 13265 // CheckImplicitConversion will filter out dead implicit conversions. 13266 if (SourceExpr->getType() != T) 13267 CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit); 13268 13269 // Now continue drilling into this expression. 13270 13271 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 13272 // The bound subexpressions in a PseudoObjectExpr are not reachable 13273 // as transitive children. 13274 // FIXME: Use a more uniform representation for this. 13275 for (auto *SE : POE->semantics()) 13276 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 13277 WorkList.push_back({OVE->getSourceExpr(), CC, IsListInit}); 13278 } 13279 13280 // Skip past explicit casts. 13281 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 13282 E = CE->getSubExpr()->IgnoreParenImpCasts(); 13283 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 13284 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 13285 WorkList.push_back({E, CC, IsListInit}); 13286 return; 13287 } 13288 13289 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 13290 // Do a somewhat different check with comparison operators. 13291 if (BO->isComparisonOp()) 13292 return AnalyzeComparison(S, BO); 13293 13294 // And with simple assignments. 13295 if (BO->getOpcode() == BO_Assign) 13296 return AnalyzeAssignment(S, BO); 13297 // And with compound assignments. 13298 if (BO->isAssignmentOp()) 13299 return AnalyzeCompoundAssignment(S, BO); 13300 } 13301 13302 // These break the otherwise-useful invariant below. Fortunately, 13303 // we don't really need to recurse into them, because any internal 13304 // expressions should have been analyzed already when they were 13305 // built into statements. 13306 if (isa<StmtExpr>(E)) return; 13307 13308 // Don't descend into unevaluated contexts. 13309 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 13310 13311 // Now just recurse over the expression's children. 13312 CC = E->getExprLoc(); 13313 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 13314 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 13315 for (Stmt *SubStmt : E->children()) { 13316 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 13317 if (!ChildExpr) 13318 continue; 13319 13320 if (IsLogicalAndOperator && 13321 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 13322 // Ignore checking string literals that are in logical and operators. 13323 // This is a common pattern for asserts. 13324 continue; 13325 WorkList.push_back({ChildExpr, CC, IsListInit}); 13326 } 13327 13328 if (BO && BO->isLogicalOp()) { 13329 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 13330 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 13331 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 13332 13333 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 13334 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 13335 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 13336 } 13337 13338 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 13339 if (U->getOpcode() == UO_LNot) { 13340 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 13341 } else if (U->getOpcode() != UO_AddrOf) { 13342 if (U->getSubExpr()->getType()->isAtomicType()) 13343 S.Diag(U->getSubExpr()->getBeginLoc(), 13344 diag::warn_atomic_implicit_seq_cst); 13345 } 13346 } 13347 } 13348 13349 /// AnalyzeImplicitConversions - Find and report any interesting 13350 /// implicit conversions in the given expression. There are a couple 13351 /// of competing diagnostics here, -Wconversion and -Wsign-compare. 13352 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC, 13353 bool IsListInit/*= false*/) { 13354 llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList; 13355 WorkList.push_back({OrigE, CC, IsListInit}); 13356 while (!WorkList.empty()) 13357 AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList); 13358 } 13359 13360 /// Diagnose integer type and any valid implicit conversion to it. 13361 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 13362 // Taking into account implicit conversions, 13363 // allow any integer. 13364 if (!E->getType()->isIntegerType()) { 13365 S.Diag(E->getBeginLoc(), 13366 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 13367 return true; 13368 } 13369 // Potentially emit standard warnings for implicit conversions if enabled 13370 // using -Wconversion. 13371 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 13372 return false; 13373 } 13374 13375 // Helper function for Sema::DiagnoseAlwaysNonNullPointer. 13376 // Returns true when emitting a warning about taking the address of a reference. 13377 static bool CheckForReference(Sema &SemaRef, const Expr *E, 13378 const PartialDiagnostic &PD) { 13379 E = E->IgnoreParenImpCasts(); 13380 13381 const FunctionDecl *FD = nullptr; 13382 13383 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 13384 if (!DRE->getDecl()->getType()->isReferenceType()) 13385 return false; 13386 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 13387 if (!M->getMemberDecl()->getType()->isReferenceType()) 13388 return false; 13389 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 13390 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 13391 return false; 13392 FD = Call->getDirectCallee(); 13393 } else { 13394 return false; 13395 } 13396 13397 SemaRef.Diag(E->getExprLoc(), PD); 13398 13399 // If possible, point to location of function. 13400 if (FD) { 13401 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 13402 } 13403 13404 return true; 13405 } 13406 13407 // Returns true if the SourceLocation is expanded from any macro body. 13408 // Returns false if the SourceLocation is invalid, is from not in a macro 13409 // expansion, or is from expanded from a top-level macro argument. 13410 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 13411 if (Loc.isInvalid()) 13412 return false; 13413 13414 while (Loc.isMacroID()) { 13415 if (SM.isMacroBodyExpansion(Loc)) 13416 return true; 13417 Loc = SM.getImmediateMacroCallerLoc(Loc); 13418 } 13419 13420 return false; 13421 } 13422 13423 /// Diagnose pointers that are always non-null. 13424 /// \param E the expression containing the pointer 13425 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 13426 /// compared to a null pointer 13427 /// \param IsEqual True when the comparison is equal to a null pointer 13428 /// \param Range Extra SourceRange to highlight in the diagnostic 13429 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 13430 Expr::NullPointerConstantKind NullKind, 13431 bool IsEqual, SourceRange Range) { 13432 if (!E) 13433 return; 13434 13435 // Don't warn inside macros. 13436 if (E->getExprLoc().isMacroID()) { 13437 const SourceManager &SM = getSourceManager(); 13438 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 13439 IsInAnyMacroBody(SM, Range.getBegin())) 13440 return; 13441 } 13442 E = E->IgnoreImpCasts(); 13443 13444 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 13445 13446 if (isa<CXXThisExpr>(E)) { 13447 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 13448 : diag::warn_this_bool_conversion; 13449 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 13450 return; 13451 } 13452 13453 bool IsAddressOf = false; 13454 13455 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 13456 if (UO->getOpcode() != UO_AddrOf) 13457 return; 13458 IsAddressOf = true; 13459 E = UO->getSubExpr(); 13460 } 13461 13462 if (IsAddressOf) { 13463 unsigned DiagID = IsCompare 13464 ? diag::warn_address_of_reference_null_compare 13465 : diag::warn_address_of_reference_bool_conversion; 13466 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 13467 << IsEqual; 13468 if (CheckForReference(*this, E, PD)) { 13469 return; 13470 } 13471 } 13472 13473 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 13474 bool IsParam = isa<NonNullAttr>(NonnullAttr); 13475 std::string Str; 13476 llvm::raw_string_ostream S(Str); 13477 E->printPretty(S, nullptr, getPrintingPolicy()); 13478 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 13479 : diag::warn_cast_nonnull_to_bool; 13480 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 13481 << E->getSourceRange() << Range << IsEqual; 13482 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 13483 }; 13484 13485 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 13486 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 13487 if (auto *Callee = Call->getDirectCallee()) { 13488 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 13489 ComplainAboutNonnullParamOrCall(A); 13490 return; 13491 } 13492 } 13493 } 13494 13495 // Expect to find a single Decl. Skip anything more complicated. 13496 ValueDecl *D = nullptr; 13497 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 13498 D = R->getDecl(); 13499 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 13500 D = M->getMemberDecl(); 13501 } 13502 13503 // Weak Decls can be null. 13504 if (!D || D->isWeak()) 13505 return; 13506 13507 // Check for parameter decl with nonnull attribute 13508 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 13509 if (getCurFunction() && 13510 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 13511 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 13512 ComplainAboutNonnullParamOrCall(A); 13513 return; 13514 } 13515 13516 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 13517 // Skip function template not specialized yet. 13518 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 13519 return; 13520 auto ParamIter = llvm::find(FD->parameters(), PV); 13521 assert(ParamIter != FD->param_end()); 13522 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 13523 13524 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 13525 if (!NonNull->args_size()) { 13526 ComplainAboutNonnullParamOrCall(NonNull); 13527 return; 13528 } 13529 13530 for (const ParamIdx &ArgNo : NonNull->args()) { 13531 if (ArgNo.getASTIndex() == ParamNo) { 13532 ComplainAboutNonnullParamOrCall(NonNull); 13533 return; 13534 } 13535 } 13536 } 13537 } 13538 } 13539 } 13540 13541 QualType T = D->getType(); 13542 const bool IsArray = T->isArrayType(); 13543 const bool IsFunction = T->isFunctionType(); 13544 13545 // Address of function is used to silence the function warning. 13546 if (IsAddressOf && IsFunction) { 13547 return; 13548 } 13549 13550 // Found nothing. 13551 if (!IsAddressOf && !IsFunction && !IsArray) 13552 return; 13553 13554 // Pretty print the expression for the diagnostic. 13555 std::string Str; 13556 llvm::raw_string_ostream S(Str); 13557 E->printPretty(S, nullptr, getPrintingPolicy()); 13558 13559 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 13560 : diag::warn_impcast_pointer_to_bool; 13561 enum { 13562 AddressOf, 13563 FunctionPointer, 13564 ArrayPointer 13565 } DiagType; 13566 if (IsAddressOf) 13567 DiagType = AddressOf; 13568 else if (IsFunction) 13569 DiagType = FunctionPointer; 13570 else if (IsArray) 13571 DiagType = ArrayPointer; 13572 else 13573 llvm_unreachable("Could not determine diagnostic."); 13574 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 13575 << Range << IsEqual; 13576 13577 if (!IsFunction) 13578 return; 13579 13580 // Suggest '&' to silence the function warning. 13581 Diag(E->getExprLoc(), diag::note_function_warning_silence) 13582 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 13583 13584 // Check to see if '()' fixit should be emitted. 13585 QualType ReturnType; 13586 UnresolvedSet<4> NonTemplateOverloads; 13587 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 13588 if (ReturnType.isNull()) 13589 return; 13590 13591 if (IsCompare) { 13592 // There are two cases here. If there is null constant, the only suggest 13593 // for a pointer return type. If the null is 0, then suggest if the return 13594 // type is a pointer or an integer type. 13595 if (!ReturnType->isPointerType()) { 13596 if (NullKind == Expr::NPCK_ZeroExpression || 13597 NullKind == Expr::NPCK_ZeroLiteral) { 13598 if (!ReturnType->isIntegerType()) 13599 return; 13600 } else { 13601 return; 13602 } 13603 } 13604 } else { // !IsCompare 13605 // For function to bool, only suggest if the function pointer has bool 13606 // return type. 13607 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 13608 return; 13609 } 13610 Diag(E->getExprLoc(), diag::note_function_to_function_call) 13611 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 13612 } 13613 13614 /// Diagnoses "dangerous" implicit conversions within the given 13615 /// expression (which is a full expression). Implements -Wconversion 13616 /// and -Wsign-compare. 13617 /// 13618 /// \param CC the "context" location of the implicit conversion, i.e. 13619 /// the most location of the syntactic entity requiring the implicit 13620 /// conversion 13621 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 13622 // Don't diagnose in unevaluated contexts. 13623 if (isUnevaluatedContext()) 13624 return; 13625 13626 // Don't diagnose for value- or type-dependent expressions. 13627 if (E->isTypeDependent() || E->isValueDependent()) 13628 return; 13629 13630 // Check for array bounds violations in cases where the check isn't triggered 13631 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 13632 // ArraySubscriptExpr is on the RHS of a variable initialization. 13633 CheckArrayAccess(E); 13634 13635 // This is not the right CC for (e.g.) a variable initialization. 13636 AnalyzeImplicitConversions(*this, E, CC); 13637 } 13638 13639 /// CheckBoolLikeConversion - Check conversion of given expression to boolean. 13640 /// Input argument E is a logical expression. 13641 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 13642 ::CheckBoolLikeConversion(*this, E, CC); 13643 } 13644 13645 /// Diagnose when expression is an integer constant expression and its evaluation 13646 /// results in integer overflow 13647 void Sema::CheckForIntOverflow (Expr *E) { 13648 // Use a work list to deal with nested struct initializers. 13649 SmallVector<Expr *, 2> Exprs(1, E); 13650 13651 do { 13652 Expr *OriginalE = Exprs.pop_back_val(); 13653 Expr *E = OriginalE->IgnoreParenCasts(); 13654 13655 if (isa<BinaryOperator>(E)) { 13656 E->EvaluateForOverflow(Context); 13657 continue; 13658 } 13659 13660 if (auto InitList = dyn_cast<InitListExpr>(OriginalE)) 13661 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 13662 else if (isa<ObjCBoxedExpr>(OriginalE)) 13663 E->EvaluateForOverflow(Context); 13664 else if (auto Call = dyn_cast<CallExpr>(E)) 13665 Exprs.append(Call->arg_begin(), Call->arg_end()); 13666 else if (auto Message = dyn_cast<ObjCMessageExpr>(E)) 13667 Exprs.append(Message->arg_begin(), Message->arg_end()); 13668 } while (!Exprs.empty()); 13669 } 13670 13671 namespace { 13672 13673 /// Visitor for expressions which looks for unsequenced operations on the 13674 /// same object. 13675 class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> { 13676 using Base = ConstEvaluatedExprVisitor<SequenceChecker>; 13677 13678 /// A tree of sequenced regions within an expression. Two regions are 13679 /// unsequenced if one is an ancestor or a descendent of the other. When we 13680 /// finish processing an expression with sequencing, such as a comma 13681 /// expression, we fold its tree nodes into its parent, since they are 13682 /// unsequenced with respect to nodes we will visit later. 13683 class SequenceTree { 13684 struct Value { 13685 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 13686 unsigned Parent : 31; 13687 unsigned Merged : 1; 13688 }; 13689 SmallVector<Value, 8> Values; 13690 13691 public: 13692 /// A region within an expression which may be sequenced with respect 13693 /// to some other region. 13694 class Seq { 13695 friend class SequenceTree; 13696 13697 unsigned Index; 13698 13699 explicit Seq(unsigned N) : Index(N) {} 13700 13701 public: 13702 Seq() : Index(0) {} 13703 }; 13704 13705 SequenceTree() { Values.push_back(Value(0)); } 13706 Seq root() const { return Seq(0); } 13707 13708 /// Create a new sequence of operations, which is an unsequenced 13709 /// subset of \p Parent. This sequence of operations is sequenced with 13710 /// respect to other children of \p Parent. 13711 Seq allocate(Seq Parent) { 13712 Values.push_back(Value(Parent.Index)); 13713 return Seq(Values.size() - 1); 13714 } 13715 13716 /// Merge a sequence of operations into its parent. 13717 void merge(Seq S) { 13718 Values[S.Index].Merged = true; 13719 } 13720 13721 /// Determine whether two operations are unsequenced. This operation 13722 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 13723 /// should have been merged into its parent as appropriate. 13724 bool isUnsequenced(Seq Cur, Seq Old) { 13725 unsigned C = representative(Cur.Index); 13726 unsigned Target = representative(Old.Index); 13727 while (C >= Target) { 13728 if (C == Target) 13729 return true; 13730 C = Values[C].Parent; 13731 } 13732 return false; 13733 } 13734 13735 private: 13736 /// Pick a representative for a sequence. 13737 unsigned representative(unsigned K) { 13738 if (Values[K].Merged) 13739 // Perform path compression as we go. 13740 return Values[K].Parent = representative(Values[K].Parent); 13741 return K; 13742 } 13743 }; 13744 13745 /// An object for which we can track unsequenced uses. 13746 using Object = const NamedDecl *; 13747 13748 /// Different flavors of object usage which we track. We only track the 13749 /// least-sequenced usage of each kind. 13750 enum UsageKind { 13751 /// A read of an object. Multiple unsequenced reads are OK. 13752 UK_Use, 13753 13754 /// A modification of an object which is sequenced before the value 13755 /// computation of the expression, such as ++n in C++. 13756 UK_ModAsValue, 13757 13758 /// A modification of an object which is not sequenced before the value 13759 /// computation of the expression, such as n++. 13760 UK_ModAsSideEffect, 13761 13762 UK_Count = UK_ModAsSideEffect + 1 13763 }; 13764 13765 /// Bundle together a sequencing region and the expression corresponding 13766 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo. 13767 struct Usage { 13768 const Expr *UsageExpr; 13769 SequenceTree::Seq Seq; 13770 13771 Usage() : UsageExpr(nullptr), Seq() {} 13772 }; 13773 13774 struct UsageInfo { 13775 Usage Uses[UK_Count]; 13776 13777 /// Have we issued a diagnostic for this object already? 13778 bool Diagnosed; 13779 13780 UsageInfo() : Uses(), Diagnosed(false) {} 13781 }; 13782 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 13783 13784 Sema &SemaRef; 13785 13786 /// Sequenced regions within the expression. 13787 SequenceTree Tree; 13788 13789 /// Declaration modifications and references which we have seen. 13790 UsageInfoMap UsageMap; 13791 13792 /// The region we are currently within. 13793 SequenceTree::Seq Region; 13794 13795 /// Filled in with declarations which were modified as a side-effect 13796 /// (that is, post-increment operations). 13797 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 13798 13799 /// Expressions to check later. We defer checking these to reduce 13800 /// stack usage. 13801 SmallVectorImpl<const Expr *> &WorkList; 13802 13803 /// RAII object wrapping the visitation of a sequenced subexpression of an 13804 /// expression. At the end of this process, the side-effects of the evaluation 13805 /// become sequenced with respect to the value computation of the result, so 13806 /// we downgrade any UK_ModAsSideEffect within the evaluation to 13807 /// UK_ModAsValue. 13808 struct SequencedSubexpression { 13809 SequencedSubexpression(SequenceChecker &Self) 13810 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 13811 Self.ModAsSideEffect = &ModAsSideEffect; 13812 } 13813 13814 ~SequencedSubexpression() { 13815 for (const std::pair<Object, Usage> &M : llvm::reverse(ModAsSideEffect)) { 13816 // Add a new usage with usage kind UK_ModAsValue, and then restore 13817 // the previous usage with UK_ModAsSideEffect (thus clearing it if 13818 // the previous one was empty). 13819 UsageInfo &UI = Self.UsageMap[M.first]; 13820 auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect]; 13821 Self.addUsage(M.first, UI, SideEffectUsage.UsageExpr, UK_ModAsValue); 13822 SideEffectUsage = M.second; 13823 } 13824 Self.ModAsSideEffect = OldModAsSideEffect; 13825 } 13826 13827 SequenceChecker &Self; 13828 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 13829 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 13830 }; 13831 13832 /// RAII object wrapping the visitation of a subexpression which we might 13833 /// choose to evaluate as a constant. If any subexpression is evaluated and 13834 /// found to be non-constant, this allows us to suppress the evaluation of 13835 /// the outer expression. 13836 class EvaluationTracker { 13837 public: 13838 EvaluationTracker(SequenceChecker &Self) 13839 : Self(Self), Prev(Self.EvalTracker) { 13840 Self.EvalTracker = this; 13841 } 13842 13843 ~EvaluationTracker() { 13844 Self.EvalTracker = Prev; 13845 if (Prev) 13846 Prev->EvalOK &= EvalOK; 13847 } 13848 13849 bool evaluate(const Expr *E, bool &Result) { 13850 if (!EvalOK || E->isValueDependent()) 13851 return false; 13852 EvalOK = E->EvaluateAsBooleanCondition( 13853 Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated()); 13854 return EvalOK; 13855 } 13856 13857 private: 13858 SequenceChecker &Self; 13859 EvaluationTracker *Prev; 13860 bool EvalOK = true; 13861 } *EvalTracker = nullptr; 13862 13863 /// Find the object which is produced by the specified expression, 13864 /// if any. 13865 Object getObject(const Expr *E, bool Mod) const { 13866 E = E->IgnoreParenCasts(); 13867 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 13868 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 13869 return getObject(UO->getSubExpr(), Mod); 13870 } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 13871 if (BO->getOpcode() == BO_Comma) 13872 return getObject(BO->getRHS(), Mod); 13873 if (Mod && BO->isAssignmentOp()) 13874 return getObject(BO->getLHS(), Mod); 13875 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 13876 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 13877 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 13878 return ME->getMemberDecl(); 13879 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 13880 // FIXME: If this is a reference, map through to its value. 13881 return DRE->getDecl(); 13882 return nullptr; 13883 } 13884 13885 /// Note that an object \p O was modified or used by an expression 13886 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for 13887 /// the object \p O as obtained via the \p UsageMap. 13888 void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) { 13889 // Get the old usage for the given object and usage kind. 13890 Usage &U = UI.Uses[UK]; 13891 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) { 13892 // If we have a modification as side effect and are in a sequenced 13893 // subexpression, save the old Usage so that we can restore it later 13894 // in SequencedSubexpression::~SequencedSubexpression. 13895 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 13896 ModAsSideEffect->push_back(std::make_pair(O, U)); 13897 // Then record the new usage with the current sequencing region. 13898 U.UsageExpr = UsageExpr; 13899 U.Seq = Region; 13900 } 13901 } 13902 13903 /// Check whether a modification or use of an object \p O in an expression 13904 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is 13905 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap. 13906 /// \p IsModMod is true when we are checking for a mod-mod unsequenced 13907 /// usage and false we are checking for a mod-use unsequenced usage. 13908 void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, 13909 UsageKind OtherKind, bool IsModMod) { 13910 if (UI.Diagnosed) 13911 return; 13912 13913 const Usage &U = UI.Uses[OtherKind]; 13914 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) 13915 return; 13916 13917 const Expr *Mod = U.UsageExpr; 13918 const Expr *ModOrUse = UsageExpr; 13919 if (OtherKind == UK_Use) 13920 std::swap(Mod, ModOrUse); 13921 13922 SemaRef.DiagRuntimeBehavior( 13923 Mod->getExprLoc(), {Mod, ModOrUse}, 13924 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod 13925 : diag::warn_unsequenced_mod_use) 13926 << O << SourceRange(ModOrUse->getExprLoc())); 13927 UI.Diagnosed = true; 13928 } 13929 13930 // A note on note{Pre, Post}{Use, Mod}: 13931 // 13932 // (It helps to follow the algorithm with an expression such as 13933 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced 13934 // operations before C++17 and both are well-defined in C++17). 13935 // 13936 // When visiting a node which uses/modify an object we first call notePreUse 13937 // or notePreMod before visiting its sub-expression(s). At this point the 13938 // children of the current node have not yet been visited and so the eventual 13939 // uses/modifications resulting from the children of the current node have not 13940 // been recorded yet. 13941 // 13942 // We then visit the children of the current node. After that notePostUse or 13943 // notePostMod is called. These will 1) detect an unsequenced modification 13944 // as side effect (as in "k++ + k") and 2) add a new usage with the 13945 // appropriate usage kind. 13946 // 13947 // We also have to be careful that some operation sequences modification as 13948 // side effect as well (for example: || or ,). To account for this we wrap 13949 // the visitation of such a sub-expression (for example: the LHS of || or ,) 13950 // with SequencedSubexpression. SequencedSubexpression is an RAII object 13951 // which record usages which are modifications as side effect, and then 13952 // downgrade them (or more accurately restore the previous usage which was a 13953 // modification as side effect) when exiting the scope of the sequenced 13954 // subexpression. 13955 13956 void notePreUse(Object O, const Expr *UseExpr) { 13957 UsageInfo &UI = UsageMap[O]; 13958 // Uses conflict with other modifications. 13959 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false); 13960 } 13961 13962 void notePostUse(Object O, const Expr *UseExpr) { 13963 UsageInfo &UI = UsageMap[O]; 13964 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsSideEffect, 13965 /*IsModMod=*/false); 13966 addUsage(O, UI, UseExpr, /*UsageKind=*/UK_Use); 13967 } 13968 13969 void notePreMod(Object O, const Expr *ModExpr) { 13970 UsageInfo &UI = UsageMap[O]; 13971 // Modifications conflict with other modifications and with uses. 13972 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true); 13973 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false); 13974 } 13975 13976 void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) { 13977 UsageInfo &UI = UsageMap[O]; 13978 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsSideEffect, 13979 /*IsModMod=*/true); 13980 addUsage(O, UI, ModExpr, /*UsageKind=*/UK); 13981 } 13982 13983 public: 13984 SequenceChecker(Sema &S, const Expr *E, 13985 SmallVectorImpl<const Expr *> &WorkList) 13986 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 13987 Visit(E); 13988 // Silence a -Wunused-private-field since WorkList is now unused. 13989 // TODO: Evaluate if it can be used, and if not remove it. 13990 (void)this->WorkList; 13991 } 13992 13993 void VisitStmt(const Stmt *S) { 13994 // Skip all statements which aren't expressions for now. 13995 } 13996 13997 void VisitExpr(const Expr *E) { 13998 // By default, just recurse to evaluated subexpressions. 13999 Base::VisitStmt(E); 14000 } 14001 14002 void VisitCastExpr(const CastExpr *E) { 14003 Object O = Object(); 14004 if (E->getCastKind() == CK_LValueToRValue) 14005 O = getObject(E->getSubExpr(), false); 14006 14007 if (O) 14008 notePreUse(O, E); 14009 VisitExpr(E); 14010 if (O) 14011 notePostUse(O, E); 14012 } 14013 14014 void VisitSequencedExpressions(const Expr *SequencedBefore, 14015 const Expr *SequencedAfter) { 14016 SequenceTree::Seq BeforeRegion = Tree.allocate(Region); 14017 SequenceTree::Seq AfterRegion = Tree.allocate(Region); 14018 SequenceTree::Seq OldRegion = Region; 14019 14020 { 14021 SequencedSubexpression SeqBefore(*this); 14022 Region = BeforeRegion; 14023 Visit(SequencedBefore); 14024 } 14025 14026 Region = AfterRegion; 14027 Visit(SequencedAfter); 14028 14029 Region = OldRegion; 14030 14031 Tree.merge(BeforeRegion); 14032 Tree.merge(AfterRegion); 14033 } 14034 14035 void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) { 14036 // C++17 [expr.sub]p1: 14037 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The 14038 // expression E1 is sequenced before the expression E2. 14039 if (SemaRef.getLangOpts().CPlusPlus17) 14040 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS()); 14041 else { 14042 Visit(ASE->getLHS()); 14043 Visit(ASE->getRHS()); 14044 } 14045 } 14046 14047 void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 14048 void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 14049 void VisitBinPtrMem(const BinaryOperator *BO) { 14050 // C++17 [expr.mptr.oper]p4: 14051 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...] 14052 // the expression E1 is sequenced before the expression E2. 14053 if (SemaRef.getLangOpts().CPlusPlus17) 14054 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14055 else { 14056 Visit(BO->getLHS()); 14057 Visit(BO->getRHS()); 14058 } 14059 } 14060 14061 void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); } 14062 void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); } 14063 void VisitBinShlShr(const BinaryOperator *BO) { 14064 // C++17 [expr.shift]p4: 14065 // The expression E1 is sequenced before the expression E2. 14066 if (SemaRef.getLangOpts().CPlusPlus17) 14067 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14068 else { 14069 Visit(BO->getLHS()); 14070 Visit(BO->getRHS()); 14071 } 14072 } 14073 14074 void VisitBinComma(const BinaryOperator *BO) { 14075 // C++11 [expr.comma]p1: 14076 // Every value computation and side effect associated with the left 14077 // expression is sequenced before every value computation and side 14078 // effect associated with the right expression. 14079 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 14080 } 14081 14082 void VisitBinAssign(const BinaryOperator *BO) { 14083 SequenceTree::Seq RHSRegion; 14084 SequenceTree::Seq LHSRegion; 14085 if (SemaRef.getLangOpts().CPlusPlus17) { 14086 RHSRegion = Tree.allocate(Region); 14087 LHSRegion = Tree.allocate(Region); 14088 } else { 14089 RHSRegion = Region; 14090 LHSRegion = Region; 14091 } 14092 SequenceTree::Seq OldRegion = Region; 14093 14094 // C++11 [expr.ass]p1: 14095 // [...] the assignment is sequenced after the value computation 14096 // of the right and left operands, [...] 14097 // 14098 // so check it before inspecting the operands and update the 14099 // map afterwards. 14100 Object O = getObject(BO->getLHS(), /*Mod=*/true); 14101 if (O) 14102 notePreMod(O, BO); 14103 14104 if (SemaRef.getLangOpts().CPlusPlus17) { 14105 // C++17 [expr.ass]p1: 14106 // [...] The right operand is sequenced before the left operand. [...] 14107 { 14108 SequencedSubexpression SeqBefore(*this); 14109 Region = RHSRegion; 14110 Visit(BO->getRHS()); 14111 } 14112 14113 Region = LHSRegion; 14114 Visit(BO->getLHS()); 14115 14116 if (O && isa<CompoundAssignOperator>(BO)) 14117 notePostUse(O, BO); 14118 14119 } else { 14120 // C++11 does not specify any sequencing between the LHS and RHS. 14121 Region = LHSRegion; 14122 Visit(BO->getLHS()); 14123 14124 if (O && isa<CompoundAssignOperator>(BO)) 14125 notePostUse(O, BO); 14126 14127 Region = RHSRegion; 14128 Visit(BO->getRHS()); 14129 } 14130 14131 // C++11 [expr.ass]p1: 14132 // the assignment is sequenced [...] before the value computation of the 14133 // assignment expression. 14134 // C11 6.5.16/3 has no such rule. 14135 Region = OldRegion; 14136 if (O) 14137 notePostMod(O, BO, 14138 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 14139 : UK_ModAsSideEffect); 14140 if (SemaRef.getLangOpts().CPlusPlus17) { 14141 Tree.merge(RHSRegion); 14142 Tree.merge(LHSRegion); 14143 } 14144 } 14145 14146 void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) { 14147 VisitBinAssign(CAO); 14148 } 14149 14150 void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 14151 void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 14152 void VisitUnaryPreIncDec(const UnaryOperator *UO) { 14153 Object O = getObject(UO->getSubExpr(), true); 14154 if (!O) 14155 return VisitExpr(UO); 14156 14157 notePreMod(O, UO); 14158 Visit(UO->getSubExpr()); 14159 // C++11 [expr.pre.incr]p1: 14160 // the expression ++x is equivalent to x+=1 14161 notePostMod(O, UO, 14162 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 14163 : UK_ModAsSideEffect); 14164 } 14165 14166 void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 14167 void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 14168 void VisitUnaryPostIncDec(const UnaryOperator *UO) { 14169 Object O = getObject(UO->getSubExpr(), true); 14170 if (!O) 14171 return VisitExpr(UO); 14172 14173 notePreMod(O, UO); 14174 Visit(UO->getSubExpr()); 14175 notePostMod(O, UO, UK_ModAsSideEffect); 14176 } 14177 14178 void VisitBinLOr(const BinaryOperator *BO) { 14179 // C++11 [expr.log.or]p2: 14180 // If the second expression is evaluated, every value computation and 14181 // side effect associated with the first expression is sequenced before 14182 // every value computation and side effect associated with the 14183 // second expression. 14184 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 14185 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 14186 SequenceTree::Seq OldRegion = Region; 14187 14188 EvaluationTracker Eval(*this); 14189 { 14190 SequencedSubexpression Sequenced(*this); 14191 Region = LHSRegion; 14192 Visit(BO->getLHS()); 14193 } 14194 14195 // C++11 [expr.log.or]p1: 14196 // [...] the second operand is not evaluated if the first operand 14197 // evaluates to true. 14198 bool EvalResult = false; 14199 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 14200 bool ShouldVisitRHS = !EvalOK || (EvalOK && !EvalResult); 14201 if (ShouldVisitRHS) { 14202 Region = RHSRegion; 14203 Visit(BO->getRHS()); 14204 } 14205 14206 Region = OldRegion; 14207 Tree.merge(LHSRegion); 14208 Tree.merge(RHSRegion); 14209 } 14210 14211 void VisitBinLAnd(const BinaryOperator *BO) { 14212 // C++11 [expr.log.and]p2: 14213 // If the second expression is evaluated, every value computation and 14214 // side effect associated with the first expression is sequenced before 14215 // every value computation and side effect associated with the 14216 // second expression. 14217 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 14218 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 14219 SequenceTree::Seq OldRegion = Region; 14220 14221 EvaluationTracker Eval(*this); 14222 { 14223 SequencedSubexpression Sequenced(*this); 14224 Region = LHSRegion; 14225 Visit(BO->getLHS()); 14226 } 14227 14228 // C++11 [expr.log.and]p1: 14229 // [...] the second operand is not evaluated if the first operand is false. 14230 bool EvalResult = false; 14231 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 14232 bool ShouldVisitRHS = !EvalOK || (EvalOK && EvalResult); 14233 if (ShouldVisitRHS) { 14234 Region = RHSRegion; 14235 Visit(BO->getRHS()); 14236 } 14237 14238 Region = OldRegion; 14239 Tree.merge(LHSRegion); 14240 Tree.merge(RHSRegion); 14241 } 14242 14243 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) { 14244 // C++11 [expr.cond]p1: 14245 // [...] Every value computation and side effect associated with the first 14246 // expression is sequenced before every value computation and side effect 14247 // associated with the second or third expression. 14248 SequenceTree::Seq ConditionRegion = Tree.allocate(Region); 14249 14250 // No sequencing is specified between the true and false expression. 14251 // However since exactly one of both is going to be evaluated we can 14252 // consider them to be sequenced. This is needed to avoid warning on 14253 // something like "x ? y+= 1 : y += 2;" in the case where we will visit 14254 // both the true and false expressions because we can't evaluate x. 14255 // This will still allow us to detect an expression like (pre C++17) 14256 // "(x ? y += 1 : y += 2) = y". 14257 // 14258 // We don't wrap the visitation of the true and false expression with 14259 // SequencedSubexpression because we don't want to downgrade modifications 14260 // as side effect in the true and false expressions after the visition 14261 // is done. (for example in the expression "(x ? y++ : y++) + y" we should 14262 // not warn between the two "y++", but we should warn between the "y++" 14263 // and the "y". 14264 SequenceTree::Seq TrueRegion = Tree.allocate(Region); 14265 SequenceTree::Seq FalseRegion = Tree.allocate(Region); 14266 SequenceTree::Seq OldRegion = Region; 14267 14268 EvaluationTracker Eval(*this); 14269 { 14270 SequencedSubexpression Sequenced(*this); 14271 Region = ConditionRegion; 14272 Visit(CO->getCond()); 14273 } 14274 14275 // C++11 [expr.cond]p1: 14276 // [...] The first expression is contextually converted to bool (Clause 4). 14277 // It is evaluated and if it is true, the result of the conditional 14278 // expression is the value of the second expression, otherwise that of the 14279 // third expression. Only one of the second and third expressions is 14280 // evaluated. [...] 14281 bool EvalResult = false; 14282 bool EvalOK = Eval.evaluate(CO->getCond(), EvalResult); 14283 bool ShouldVisitTrueExpr = !EvalOK || (EvalOK && EvalResult); 14284 bool ShouldVisitFalseExpr = !EvalOK || (EvalOK && !EvalResult); 14285 if (ShouldVisitTrueExpr) { 14286 Region = TrueRegion; 14287 Visit(CO->getTrueExpr()); 14288 } 14289 if (ShouldVisitFalseExpr) { 14290 Region = FalseRegion; 14291 Visit(CO->getFalseExpr()); 14292 } 14293 14294 Region = OldRegion; 14295 Tree.merge(ConditionRegion); 14296 Tree.merge(TrueRegion); 14297 Tree.merge(FalseRegion); 14298 } 14299 14300 void VisitCallExpr(const CallExpr *CE) { 14301 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 14302 14303 if (CE->isUnevaluatedBuiltinCall(Context)) 14304 return; 14305 14306 // C++11 [intro.execution]p15: 14307 // When calling a function [...], every value computation and side effect 14308 // associated with any argument expression, or with the postfix expression 14309 // designating the called function, is sequenced before execution of every 14310 // expression or statement in the body of the function [and thus before 14311 // the value computation of its result]. 14312 SequencedSubexpression Sequenced(*this); 14313 SemaRef.runWithSufficientStackSpace(CE->getExprLoc(), [&] { 14314 // C++17 [expr.call]p5 14315 // The postfix-expression is sequenced before each expression in the 14316 // expression-list and any default argument. [...] 14317 SequenceTree::Seq CalleeRegion; 14318 SequenceTree::Seq OtherRegion; 14319 if (SemaRef.getLangOpts().CPlusPlus17) { 14320 CalleeRegion = Tree.allocate(Region); 14321 OtherRegion = Tree.allocate(Region); 14322 } else { 14323 CalleeRegion = Region; 14324 OtherRegion = Region; 14325 } 14326 SequenceTree::Seq OldRegion = Region; 14327 14328 // Visit the callee expression first. 14329 Region = CalleeRegion; 14330 if (SemaRef.getLangOpts().CPlusPlus17) { 14331 SequencedSubexpression Sequenced(*this); 14332 Visit(CE->getCallee()); 14333 } else { 14334 Visit(CE->getCallee()); 14335 } 14336 14337 // Then visit the argument expressions. 14338 Region = OtherRegion; 14339 for (const Expr *Argument : CE->arguments()) 14340 Visit(Argument); 14341 14342 Region = OldRegion; 14343 if (SemaRef.getLangOpts().CPlusPlus17) { 14344 Tree.merge(CalleeRegion); 14345 Tree.merge(OtherRegion); 14346 } 14347 }); 14348 } 14349 14350 void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) { 14351 // C++17 [over.match.oper]p2: 14352 // [...] the operator notation is first transformed to the equivalent 14353 // function-call notation as summarized in Table 12 (where @ denotes one 14354 // of the operators covered in the specified subclause). However, the 14355 // operands are sequenced in the order prescribed for the built-in 14356 // operator (Clause 8). 14357 // 14358 // From the above only overloaded binary operators and overloaded call 14359 // operators have sequencing rules in C++17 that we need to handle 14360 // separately. 14361 if (!SemaRef.getLangOpts().CPlusPlus17 || 14362 (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call)) 14363 return VisitCallExpr(CXXOCE); 14364 14365 enum { 14366 NoSequencing, 14367 LHSBeforeRHS, 14368 RHSBeforeLHS, 14369 LHSBeforeRest 14370 } SequencingKind; 14371 switch (CXXOCE->getOperator()) { 14372 case OO_Equal: 14373 case OO_PlusEqual: 14374 case OO_MinusEqual: 14375 case OO_StarEqual: 14376 case OO_SlashEqual: 14377 case OO_PercentEqual: 14378 case OO_CaretEqual: 14379 case OO_AmpEqual: 14380 case OO_PipeEqual: 14381 case OO_LessLessEqual: 14382 case OO_GreaterGreaterEqual: 14383 SequencingKind = RHSBeforeLHS; 14384 break; 14385 14386 case OO_LessLess: 14387 case OO_GreaterGreater: 14388 case OO_AmpAmp: 14389 case OO_PipePipe: 14390 case OO_Comma: 14391 case OO_ArrowStar: 14392 case OO_Subscript: 14393 SequencingKind = LHSBeforeRHS; 14394 break; 14395 14396 case OO_Call: 14397 SequencingKind = LHSBeforeRest; 14398 break; 14399 14400 default: 14401 SequencingKind = NoSequencing; 14402 break; 14403 } 14404 14405 if (SequencingKind == NoSequencing) 14406 return VisitCallExpr(CXXOCE); 14407 14408 // This is a call, so all subexpressions are sequenced before the result. 14409 SequencedSubexpression Sequenced(*this); 14410 14411 SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] { 14412 assert(SemaRef.getLangOpts().CPlusPlus17 && 14413 "Should only get there with C++17 and above!"); 14414 assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) && 14415 "Should only get there with an overloaded binary operator" 14416 " or an overloaded call operator!"); 14417 14418 if (SequencingKind == LHSBeforeRest) { 14419 assert(CXXOCE->getOperator() == OO_Call && 14420 "We should only have an overloaded call operator here!"); 14421 14422 // This is very similar to VisitCallExpr, except that we only have the 14423 // C++17 case. The postfix-expression is the first argument of the 14424 // CXXOperatorCallExpr. The expressions in the expression-list, if any, 14425 // are in the following arguments. 14426 // 14427 // Note that we intentionally do not visit the callee expression since 14428 // it is just a decayed reference to a function. 14429 SequenceTree::Seq PostfixExprRegion = Tree.allocate(Region); 14430 SequenceTree::Seq ArgsRegion = Tree.allocate(Region); 14431 SequenceTree::Seq OldRegion = Region; 14432 14433 assert(CXXOCE->getNumArgs() >= 1 && 14434 "An overloaded call operator must have at least one argument" 14435 " for the postfix-expression!"); 14436 const Expr *PostfixExpr = CXXOCE->getArgs()[0]; 14437 llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1, 14438 CXXOCE->getNumArgs() - 1); 14439 14440 // Visit the postfix-expression first. 14441 { 14442 Region = PostfixExprRegion; 14443 SequencedSubexpression Sequenced(*this); 14444 Visit(PostfixExpr); 14445 } 14446 14447 // Then visit the argument expressions. 14448 Region = ArgsRegion; 14449 for (const Expr *Arg : Args) 14450 Visit(Arg); 14451 14452 Region = OldRegion; 14453 Tree.merge(PostfixExprRegion); 14454 Tree.merge(ArgsRegion); 14455 } else { 14456 assert(CXXOCE->getNumArgs() == 2 && 14457 "Should only have two arguments here!"); 14458 assert((SequencingKind == LHSBeforeRHS || 14459 SequencingKind == RHSBeforeLHS) && 14460 "Unexpected sequencing kind!"); 14461 14462 // We do not visit the callee expression since it is just a decayed 14463 // reference to a function. 14464 const Expr *E1 = CXXOCE->getArg(0); 14465 const Expr *E2 = CXXOCE->getArg(1); 14466 if (SequencingKind == RHSBeforeLHS) 14467 std::swap(E1, E2); 14468 14469 return VisitSequencedExpressions(E1, E2); 14470 } 14471 }); 14472 } 14473 14474 void VisitCXXConstructExpr(const CXXConstructExpr *CCE) { 14475 // This is a call, so all subexpressions are sequenced before the result. 14476 SequencedSubexpression Sequenced(*this); 14477 14478 if (!CCE->isListInitialization()) 14479 return VisitExpr(CCE); 14480 14481 // In C++11, list initializations are sequenced. 14482 SmallVector<SequenceTree::Seq, 32> Elts; 14483 SequenceTree::Seq Parent = Region; 14484 for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(), 14485 E = CCE->arg_end(); 14486 I != E; ++I) { 14487 Region = Tree.allocate(Parent); 14488 Elts.push_back(Region); 14489 Visit(*I); 14490 } 14491 14492 // Forget that the initializers are sequenced. 14493 Region = Parent; 14494 for (unsigned I = 0; I < Elts.size(); ++I) 14495 Tree.merge(Elts[I]); 14496 } 14497 14498 void VisitInitListExpr(const InitListExpr *ILE) { 14499 if (!SemaRef.getLangOpts().CPlusPlus11) 14500 return VisitExpr(ILE); 14501 14502 // In C++11, list initializations are sequenced. 14503 SmallVector<SequenceTree::Seq, 32> Elts; 14504 SequenceTree::Seq Parent = Region; 14505 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 14506 const Expr *E = ILE->getInit(I); 14507 if (!E) 14508 continue; 14509 Region = Tree.allocate(Parent); 14510 Elts.push_back(Region); 14511 Visit(E); 14512 } 14513 14514 // Forget that the initializers are sequenced. 14515 Region = Parent; 14516 for (unsigned I = 0; I < Elts.size(); ++I) 14517 Tree.merge(Elts[I]); 14518 } 14519 }; 14520 14521 } // namespace 14522 14523 void Sema::CheckUnsequencedOperations(const Expr *E) { 14524 SmallVector<const Expr *, 8> WorkList; 14525 WorkList.push_back(E); 14526 while (!WorkList.empty()) { 14527 const Expr *Item = WorkList.pop_back_val(); 14528 SequenceChecker(*this, Item, WorkList); 14529 } 14530 } 14531 14532 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 14533 bool IsConstexpr) { 14534 llvm::SaveAndRestore<bool> ConstantContext( 14535 isConstantEvaluatedOverride, IsConstexpr || isa<ConstantExpr>(E)); 14536 CheckImplicitConversions(E, CheckLoc); 14537 if (!E->isInstantiationDependent()) 14538 CheckUnsequencedOperations(E); 14539 if (!IsConstexpr && !E->isValueDependent()) 14540 CheckForIntOverflow(E); 14541 DiagnoseMisalignedMembers(); 14542 } 14543 14544 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 14545 FieldDecl *BitField, 14546 Expr *Init) { 14547 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 14548 } 14549 14550 static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 14551 SourceLocation Loc) { 14552 if (!PType->isVariablyModifiedType()) 14553 return; 14554 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 14555 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 14556 return; 14557 } 14558 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 14559 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 14560 return; 14561 } 14562 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 14563 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 14564 return; 14565 } 14566 14567 const ArrayType *AT = S.Context.getAsArrayType(PType); 14568 if (!AT) 14569 return; 14570 14571 if (AT->getSizeModifier() != ArrayType::Star) { 14572 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 14573 return; 14574 } 14575 14576 S.Diag(Loc, diag::err_array_star_in_function_definition); 14577 } 14578 14579 /// CheckParmsForFunctionDef - Check that the parameters of the given 14580 /// function are appropriate for the definition of a function. This 14581 /// takes care of any checks that cannot be performed on the 14582 /// declaration itself, e.g., that the types of each of the function 14583 /// parameters are complete. 14584 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 14585 bool CheckParameterNames) { 14586 bool HasInvalidParm = false; 14587 for (ParmVarDecl *Param : Parameters) { 14588 // C99 6.7.5.3p4: the parameters in a parameter type list in a 14589 // function declarator that is part of a function definition of 14590 // that function shall not have incomplete type. 14591 // 14592 // This is also C++ [dcl.fct]p6. 14593 if (!Param->isInvalidDecl() && 14594 RequireCompleteType(Param->getLocation(), Param->getType(), 14595 diag::err_typecheck_decl_incomplete_type)) { 14596 Param->setInvalidDecl(); 14597 HasInvalidParm = true; 14598 } 14599 14600 // C99 6.9.1p5: If the declarator includes a parameter type list, the 14601 // declaration of each parameter shall include an identifier. 14602 if (CheckParameterNames && Param->getIdentifier() == nullptr && 14603 !Param->isImplicit() && !getLangOpts().CPlusPlus) { 14604 // Diagnose this as an extension in C17 and earlier. 14605 if (!getLangOpts().C2x) 14606 Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x); 14607 } 14608 14609 // C99 6.7.5.3p12: 14610 // If the function declarator is not part of a definition of that 14611 // function, parameters may have incomplete type and may use the [*] 14612 // notation in their sequences of declarator specifiers to specify 14613 // variable length array types. 14614 QualType PType = Param->getOriginalType(); 14615 // FIXME: This diagnostic should point the '[*]' if source-location 14616 // information is added for it. 14617 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 14618 14619 // If the parameter is a c++ class type and it has to be destructed in the 14620 // callee function, declare the destructor so that it can be called by the 14621 // callee function. Do not perform any direct access check on the dtor here. 14622 if (!Param->isInvalidDecl()) { 14623 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 14624 if (!ClassDecl->isInvalidDecl() && 14625 !ClassDecl->hasIrrelevantDestructor() && 14626 !ClassDecl->isDependentContext() && 14627 ClassDecl->isParamDestroyedInCallee()) { 14628 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 14629 MarkFunctionReferenced(Param->getLocation(), Destructor); 14630 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 14631 } 14632 } 14633 } 14634 14635 // Parameters with the pass_object_size attribute only need to be marked 14636 // constant at function definitions. Because we lack information about 14637 // whether we're on a declaration or definition when we're instantiating the 14638 // attribute, we need to check for constness here. 14639 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 14640 if (!Param->getType().isConstQualified()) 14641 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 14642 << Attr->getSpelling() << 1; 14643 14644 // Check for parameter names shadowing fields from the class. 14645 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) { 14646 // The owning context for the parameter should be the function, but we 14647 // want to see if this function's declaration context is a record. 14648 DeclContext *DC = Param->getDeclContext(); 14649 if (DC && DC->isFunctionOrMethod()) { 14650 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent())) 14651 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(), 14652 RD, /*DeclIsField*/ false); 14653 } 14654 } 14655 } 14656 14657 return HasInvalidParm; 14658 } 14659 14660 Optional<std::pair<CharUnits, CharUnits>> 14661 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx); 14662 14663 /// Compute the alignment and offset of the base class object given the 14664 /// derived-to-base cast expression and the alignment and offset of the derived 14665 /// class object. 14666 static std::pair<CharUnits, CharUnits> 14667 getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType, 14668 CharUnits BaseAlignment, CharUnits Offset, 14669 ASTContext &Ctx) { 14670 for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE; 14671 ++PathI) { 14672 const CXXBaseSpecifier *Base = *PathI; 14673 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl(); 14674 if (Base->isVirtual()) { 14675 // The complete object may have a lower alignment than the non-virtual 14676 // alignment of the base, in which case the base may be misaligned. Choose 14677 // the smaller of the non-virtual alignment and BaseAlignment, which is a 14678 // conservative lower bound of the complete object alignment. 14679 CharUnits NonVirtualAlignment = 14680 Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment(); 14681 BaseAlignment = std::min(BaseAlignment, NonVirtualAlignment); 14682 Offset = CharUnits::Zero(); 14683 } else { 14684 const ASTRecordLayout &RL = 14685 Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl()); 14686 Offset += RL.getBaseClassOffset(BaseDecl); 14687 } 14688 DerivedType = Base->getType(); 14689 } 14690 14691 return std::make_pair(BaseAlignment, Offset); 14692 } 14693 14694 /// Compute the alignment and offset of a binary additive operator. 14695 static Optional<std::pair<CharUnits, CharUnits>> 14696 getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE, 14697 bool IsSub, ASTContext &Ctx) { 14698 QualType PointeeType = PtrE->getType()->getPointeeType(); 14699 14700 if (!PointeeType->isConstantSizeType()) 14701 return llvm::None; 14702 14703 auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx); 14704 14705 if (!P) 14706 return llvm::None; 14707 14708 CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType); 14709 if (Optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) { 14710 CharUnits Offset = EltSize * IdxRes->getExtValue(); 14711 if (IsSub) 14712 Offset = -Offset; 14713 return std::make_pair(P->first, P->second + Offset); 14714 } 14715 14716 // If the integer expression isn't a constant expression, compute the lower 14717 // bound of the alignment using the alignment and offset of the pointer 14718 // expression and the element size. 14719 return std::make_pair( 14720 P->first.alignmentAtOffset(P->second).alignmentAtOffset(EltSize), 14721 CharUnits::Zero()); 14722 } 14723 14724 /// This helper function takes an lvalue expression and returns the alignment of 14725 /// a VarDecl and a constant offset from the VarDecl. 14726 Optional<std::pair<CharUnits, CharUnits>> 14727 static getBaseAlignmentAndOffsetFromLValue(const Expr *E, ASTContext &Ctx) { 14728 E = E->IgnoreParens(); 14729 switch (E->getStmtClass()) { 14730 default: 14731 break; 14732 case Stmt::CStyleCastExprClass: 14733 case Stmt::CXXStaticCastExprClass: 14734 case Stmt::ImplicitCastExprClass: { 14735 auto *CE = cast<CastExpr>(E); 14736 const Expr *From = CE->getSubExpr(); 14737 switch (CE->getCastKind()) { 14738 default: 14739 break; 14740 case CK_NoOp: 14741 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 14742 case CK_UncheckedDerivedToBase: 14743 case CK_DerivedToBase: { 14744 auto P = getBaseAlignmentAndOffsetFromLValue(From, Ctx); 14745 if (!P) 14746 break; 14747 return getDerivedToBaseAlignmentAndOffset(CE, From->getType(), P->first, 14748 P->second, Ctx); 14749 } 14750 } 14751 break; 14752 } 14753 case Stmt::ArraySubscriptExprClass: { 14754 auto *ASE = cast<ArraySubscriptExpr>(E); 14755 return getAlignmentAndOffsetFromBinAddOrSub(ASE->getBase(), ASE->getIdx(), 14756 false, Ctx); 14757 } 14758 case Stmt::DeclRefExprClass: { 14759 if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) { 14760 // FIXME: If VD is captured by copy or is an escaping __block variable, 14761 // use the alignment of VD's type. 14762 if (!VD->getType()->isReferenceType()) 14763 return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero()); 14764 if (VD->hasInit()) 14765 return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx); 14766 } 14767 break; 14768 } 14769 case Stmt::MemberExprClass: { 14770 auto *ME = cast<MemberExpr>(E); 14771 auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); 14772 if (!FD || FD->getType()->isReferenceType() || 14773 FD->getParent()->isInvalidDecl()) 14774 break; 14775 Optional<std::pair<CharUnits, CharUnits>> P; 14776 if (ME->isArrow()) 14777 P = getBaseAlignmentAndOffsetFromPtr(ME->getBase(), Ctx); 14778 else 14779 P = getBaseAlignmentAndOffsetFromLValue(ME->getBase(), Ctx); 14780 if (!P) 14781 break; 14782 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent()); 14783 uint64_t Offset = Layout.getFieldOffset(FD->getFieldIndex()); 14784 return std::make_pair(P->first, 14785 P->second + CharUnits::fromQuantity(Offset)); 14786 } 14787 case Stmt::UnaryOperatorClass: { 14788 auto *UO = cast<UnaryOperator>(E); 14789 switch (UO->getOpcode()) { 14790 default: 14791 break; 14792 case UO_Deref: 14793 return getBaseAlignmentAndOffsetFromPtr(UO->getSubExpr(), Ctx); 14794 } 14795 break; 14796 } 14797 case Stmt::BinaryOperatorClass: { 14798 auto *BO = cast<BinaryOperator>(E); 14799 auto Opcode = BO->getOpcode(); 14800 switch (Opcode) { 14801 default: 14802 break; 14803 case BO_Comma: 14804 return getBaseAlignmentAndOffsetFromLValue(BO->getRHS(), Ctx); 14805 } 14806 break; 14807 } 14808 } 14809 return llvm::None; 14810 } 14811 14812 /// This helper function takes a pointer expression and returns the alignment of 14813 /// a VarDecl and a constant offset from the VarDecl. 14814 Optional<std::pair<CharUnits, CharUnits>> 14815 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx) { 14816 E = E->IgnoreParens(); 14817 switch (E->getStmtClass()) { 14818 default: 14819 break; 14820 case Stmt::CStyleCastExprClass: 14821 case Stmt::CXXStaticCastExprClass: 14822 case Stmt::ImplicitCastExprClass: { 14823 auto *CE = cast<CastExpr>(E); 14824 const Expr *From = CE->getSubExpr(); 14825 switch (CE->getCastKind()) { 14826 default: 14827 break; 14828 case CK_NoOp: 14829 return getBaseAlignmentAndOffsetFromPtr(From, Ctx); 14830 case CK_ArrayToPointerDecay: 14831 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 14832 case CK_UncheckedDerivedToBase: 14833 case CK_DerivedToBase: { 14834 auto P = getBaseAlignmentAndOffsetFromPtr(From, Ctx); 14835 if (!P) 14836 break; 14837 return getDerivedToBaseAlignmentAndOffset( 14838 CE, From->getType()->getPointeeType(), P->first, P->second, Ctx); 14839 } 14840 } 14841 break; 14842 } 14843 case Stmt::CXXThisExprClass: { 14844 auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl(); 14845 CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment(); 14846 return std::make_pair(Alignment, CharUnits::Zero()); 14847 } 14848 case Stmt::UnaryOperatorClass: { 14849 auto *UO = cast<UnaryOperator>(E); 14850 if (UO->getOpcode() == UO_AddrOf) 14851 return getBaseAlignmentAndOffsetFromLValue(UO->getSubExpr(), Ctx); 14852 break; 14853 } 14854 case Stmt::BinaryOperatorClass: { 14855 auto *BO = cast<BinaryOperator>(E); 14856 auto Opcode = BO->getOpcode(); 14857 switch (Opcode) { 14858 default: 14859 break; 14860 case BO_Add: 14861 case BO_Sub: { 14862 const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS(); 14863 if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType()) 14864 std::swap(LHS, RHS); 14865 return getAlignmentAndOffsetFromBinAddOrSub(LHS, RHS, Opcode == BO_Sub, 14866 Ctx); 14867 } 14868 case BO_Comma: 14869 return getBaseAlignmentAndOffsetFromPtr(BO->getRHS(), Ctx); 14870 } 14871 break; 14872 } 14873 } 14874 return llvm::None; 14875 } 14876 14877 static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) { 14878 // See if we can compute the alignment of a VarDecl and an offset from it. 14879 Optional<std::pair<CharUnits, CharUnits>> P = 14880 getBaseAlignmentAndOffsetFromPtr(E, S.Context); 14881 14882 if (P) 14883 return P->first.alignmentAtOffset(P->second); 14884 14885 // If that failed, return the type's alignment. 14886 return S.Context.getTypeAlignInChars(E->getType()->getPointeeType()); 14887 } 14888 14889 /// CheckCastAlign - Implements -Wcast-align, which warns when a 14890 /// pointer cast increases the alignment requirements. 14891 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 14892 // This is actually a lot of work to potentially be doing on every 14893 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 14894 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 14895 return; 14896 14897 // Ignore dependent types. 14898 if (T->isDependentType() || Op->getType()->isDependentType()) 14899 return; 14900 14901 // Require that the destination be a pointer type. 14902 const PointerType *DestPtr = T->getAs<PointerType>(); 14903 if (!DestPtr) return; 14904 14905 // If the destination has alignment 1, we're done. 14906 QualType DestPointee = DestPtr->getPointeeType(); 14907 if (DestPointee->isIncompleteType()) return; 14908 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 14909 if (DestAlign.isOne()) return; 14910 14911 // Require that the source be a pointer type. 14912 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 14913 if (!SrcPtr) return; 14914 QualType SrcPointee = SrcPtr->getPointeeType(); 14915 14916 // Explicitly allow casts from cv void*. We already implicitly 14917 // allowed casts to cv void*, since they have alignment 1. 14918 // Also allow casts involving incomplete types, which implicitly 14919 // includes 'void'. 14920 if (SrcPointee->isIncompleteType()) return; 14921 14922 CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this); 14923 14924 if (SrcAlign >= DestAlign) return; 14925 14926 Diag(TRange.getBegin(), diag::warn_cast_align) 14927 << Op->getType() << T 14928 << static_cast<unsigned>(SrcAlign.getQuantity()) 14929 << static_cast<unsigned>(DestAlign.getQuantity()) 14930 << TRange << Op->getSourceRange(); 14931 } 14932 14933 /// Check whether this array fits the idiom of a size-one tail padded 14934 /// array member of a struct. 14935 /// 14936 /// We avoid emitting out-of-bounds access warnings for such arrays as they are 14937 /// commonly used to emulate flexible arrays in C89 code. 14938 static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size, 14939 const NamedDecl *ND) { 14940 if (Size != 1 || !ND) return false; 14941 14942 const FieldDecl *FD = dyn_cast<FieldDecl>(ND); 14943 if (!FD) return false; 14944 14945 // Don't consider sizes resulting from macro expansions or template argument 14946 // substitution to form C89 tail-padded arrays. 14947 14948 TypeSourceInfo *TInfo = FD->getTypeSourceInfo(); 14949 while (TInfo) { 14950 TypeLoc TL = TInfo->getTypeLoc(); 14951 // Look through typedefs. 14952 if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) { 14953 const TypedefNameDecl *TDL = TTL.getTypedefNameDecl(); 14954 TInfo = TDL->getTypeSourceInfo(); 14955 continue; 14956 } 14957 if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) { 14958 const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr()); 14959 if (!SizeExpr || SizeExpr->getExprLoc().isMacroID()) 14960 return false; 14961 } 14962 break; 14963 } 14964 14965 const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext()); 14966 if (!RD) return false; 14967 if (RD->isUnion()) return false; 14968 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 14969 if (!CRD->isStandardLayout()) return false; 14970 } 14971 14972 // See if this is the last field decl in the record. 14973 const Decl *D = FD; 14974 while ((D = D->getNextDeclInContext())) 14975 if (isa<FieldDecl>(D)) 14976 return false; 14977 return true; 14978 } 14979 14980 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 14981 const ArraySubscriptExpr *ASE, 14982 bool AllowOnePastEnd, bool IndexNegated) { 14983 // Already diagnosed by the constant evaluator. 14984 if (isConstantEvaluated()) 14985 return; 14986 14987 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 14988 if (IndexExpr->isValueDependent()) 14989 return; 14990 14991 const Type *EffectiveType = 14992 BaseExpr->getType()->getPointeeOrArrayElementType(); 14993 BaseExpr = BaseExpr->IgnoreParenCasts(); 14994 const ConstantArrayType *ArrayTy = 14995 Context.getAsConstantArrayType(BaseExpr->getType()); 14996 14997 const Type *BaseType = 14998 ArrayTy == nullptr ? nullptr : ArrayTy->getElementType().getTypePtr(); 14999 bool IsUnboundedArray = (BaseType == nullptr); 15000 if (EffectiveType->isDependentType() || 15001 (!IsUnboundedArray && BaseType->isDependentType())) 15002 return; 15003 15004 Expr::EvalResult Result; 15005 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) 15006 return; 15007 15008 llvm::APSInt index = Result.Val.getInt(); 15009 if (IndexNegated) { 15010 index.setIsUnsigned(false); 15011 index = -index; 15012 } 15013 15014 const NamedDecl *ND = nullptr; 15015 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15016 ND = DRE->getDecl(); 15017 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 15018 ND = ME->getMemberDecl(); 15019 15020 if (IsUnboundedArray) { 15021 if (index.isUnsigned() || !index.isNegative()) { 15022 const auto &ASTC = getASTContext(); 15023 unsigned AddrBits = 15024 ASTC.getTargetInfo().getPointerWidth(ASTC.getTargetAddressSpace( 15025 EffectiveType->getCanonicalTypeInternal())); 15026 if (index.getBitWidth() < AddrBits) 15027 index = index.zext(AddrBits); 15028 Optional<CharUnits> ElemCharUnits = 15029 ASTC.getTypeSizeInCharsIfKnown(EffectiveType); 15030 // PR50741 - If EffectiveType has unknown size (e.g., if it's a void 15031 // pointer) bounds-checking isn't meaningful. 15032 if (!ElemCharUnits) 15033 return; 15034 llvm::APInt ElemBytes(index.getBitWidth(), ElemCharUnits->getQuantity()); 15035 // If index has more active bits than address space, we already know 15036 // we have a bounds violation to warn about. Otherwise, compute 15037 // address of (index + 1)th element, and warn about bounds violation 15038 // only if that address exceeds address space. 15039 if (index.getActiveBits() <= AddrBits) { 15040 bool Overflow; 15041 llvm::APInt Product(index); 15042 Product += 1; 15043 Product = Product.umul_ov(ElemBytes, Overflow); 15044 if (!Overflow && Product.getActiveBits() <= AddrBits) 15045 return; 15046 } 15047 15048 // Need to compute max possible elements in address space, since that 15049 // is included in diag message. 15050 llvm::APInt MaxElems = llvm::APInt::getMaxValue(AddrBits); 15051 MaxElems = MaxElems.zext(std::max(AddrBits + 1, ElemBytes.getBitWidth())); 15052 MaxElems += 1; 15053 ElemBytes = ElemBytes.zextOrTrunc(MaxElems.getBitWidth()); 15054 MaxElems = MaxElems.udiv(ElemBytes); 15055 15056 unsigned DiagID = 15057 ASE ? diag::warn_array_index_exceeds_max_addressable_bounds 15058 : diag::warn_ptr_arith_exceeds_max_addressable_bounds; 15059 15060 // Diag message shows element size in bits and in "bytes" (platform- 15061 // dependent CharUnits) 15062 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15063 PDiag(DiagID) 15064 << toString(index, 10, true) << AddrBits 15065 << (unsigned)ASTC.toBits(*ElemCharUnits) 15066 << toString(ElemBytes, 10, false) 15067 << toString(MaxElems, 10, false) 15068 << (unsigned)MaxElems.getLimitedValue(~0U) 15069 << IndexExpr->getSourceRange()); 15070 15071 if (!ND) { 15072 // Try harder to find a NamedDecl to point at in the note. 15073 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 15074 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 15075 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15076 ND = DRE->getDecl(); 15077 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 15078 ND = ME->getMemberDecl(); 15079 } 15080 15081 if (ND) 15082 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 15083 PDiag(diag::note_array_declared_here) << ND); 15084 } 15085 return; 15086 } 15087 15088 if (index.isUnsigned() || !index.isNegative()) { 15089 // It is possible that the type of the base expression after 15090 // IgnoreParenCasts is incomplete, even though the type of the base 15091 // expression before IgnoreParenCasts is complete (see PR39746 for an 15092 // example). In this case we have no information about whether the array 15093 // access exceeds the array bounds. However we can still diagnose an array 15094 // access which precedes the array bounds. 15095 if (BaseType->isIncompleteType()) 15096 return; 15097 15098 llvm::APInt size = ArrayTy->getSize(); 15099 if (!size.isStrictlyPositive()) 15100 return; 15101 15102 if (BaseType != EffectiveType) { 15103 // Make sure we're comparing apples to apples when comparing index to size 15104 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 15105 uint64_t array_typesize = Context.getTypeSize(BaseType); 15106 // Handle ptrarith_typesize being zero, such as when casting to void* 15107 if (!ptrarith_typesize) ptrarith_typesize = 1; 15108 if (ptrarith_typesize != array_typesize) { 15109 // There's a cast to a different size type involved 15110 uint64_t ratio = array_typesize / ptrarith_typesize; 15111 // TODO: Be smarter about handling cases where array_typesize is not a 15112 // multiple of ptrarith_typesize 15113 if (ptrarith_typesize * ratio == array_typesize) 15114 size *= llvm::APInt(size.getBitWidth(), ratio); 15115 } 15116 } 15117 15118 if (size.getBitWidth() > index.getBitWidth()) 15119 index = index.zext(size.getBitWidth()); 15120 else if (size.getBitWidth() < index.getBitWidth()) 15121 size = size.zext(index.getBitWidth()); 15122 15123 // For array subscripting the index must be less than size, but for pointer 15124 // arithmetic also allow the index (offset) to be equal to size since 15125 // computing the next address after the end of the array is legal and 15126 // commonly done e.g. in C++ iterators and range-based for loops. 15127 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 15128 return; 15129 15130 // Also don't warn for arrays of size 1 which are members of some 15131 // structure. These are often used to approximate flexible arrays in C89 15132 // code. 15133 if (IsTailPaddedMemberArray(*this, size, ND)) 15134 return; 15135 15136 // Suppress the warning if the subscript expression (as identified by the 15137 // ']' location) and the index expression are both from macro expansions 15138 // within a system header. 15139 if (ASE) { 15140 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 15141 ASE->getRBracketLoc()); 15142 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 15143 SourceLocation IndexLoc = 15144 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 15145 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 15146 return; 15147 } 15148 } 15149 15150 unsigned DiagID = ASE ? diag::warn_array_index_exceeds_bounds 15151 : diag::warn_ptr_arith_exceeds_bounds; 15152 15153 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15154 PDiag(DiagID) << toString(index, 10, true) 15155 << toString(size, 10, true) 15156 << (unsigned)size.getLimitedValue(~0U) 15157 << IndexExpr->getSourceRange()); 15158 } else { 15159 unsigned DiagID = diag::warn_array_index_precedes_bounds; 15160 if (!ASE) { 15161 DiagID = diag::warn_ptr_arith_precedes_bounds; 15162 if (index.isNegative()) index = -index; 15163 } 15164 15165 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15166 PDiag(DiagID) << toString(index, 10, true) 15167 << IndexExpr->getSourceRange()); 15168 } 15169 15170 if (!ND) { 15171 // Try harder to find a NamedDecl to point at in the note. 15172 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 15173 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 15174 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15175 ND = DRE->getDecl(); 15176 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 15177 ND = ME->getMemberDecl(); 15178 } 15179 15180 if (ND) 15181 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 15182 PDiag(diag::note_array_declared_here) << ND); 15183 } 15184 15185 void Sema::CheckArrayAccess(const Expr *expr) { 15186 int AllowOnePastEnd = 0; 15187 while (expr) { 15188 expr = expr->IgnoreParenImpCasts(); 15189 switch (expr->getStmtClass()) { 15190 case Stmt::ArraySubscriptExprClass: { 15191 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 15192 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 15193 AllowOnePastEnd > 0); 15194 expr = ASE->getBase(); 15195 break; 15196 } 15197 case Stmt::MemberExprClass: { 15198 expr = cast<MemberExpr>(expr)->getBase(); 15199 break; 15200 } 15201 case Stmt::OMPArraySectionExprClass: { 15202 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 15203 if (ASE->getLowerBound()) 15204 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 15205 /*ASE=*/nullptr, AllowOnePastEnd > 0); 15206 return; 15207 } 15208 case Stmt::UnaryOperatorClass: { 15209 // Only unwrap the * and & unary operators 15210 const UnaryOperator *UO = cast<UnaryOperator>(expr); 15211 expr = UO->getSubExpr(); 15212 switch (UO->getOpcode()) { 15213 case UO_AddrOf: 15214 AllowOnePastEnd++; 15215 break; 15216 case UO_Deref: 15217 AllowOnePastEnd--; 15218 break; 15219 default: 15220 return; 15221 } 15222 break; 15223 } 15224 case Stmt::ConditionalOperatorClass: { 15225 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 15226 if (const Expr *lhs = cond->getLHS()) 15227 CheckArrayAccess(lhs); 15228 if (const Expr *rhs = cond->getRHS()) 15229 CheckArrayAccess(rhs); 15230 return; 15231 } 15232 case Stmt::CXXOperatorCallExprClass: { 15233 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 15234 for (const auto *Arg : OCE->arguments()) 15235 CheckArrayAccess(Arg); 15236 return; 15237 } 15238 default: 15239 return; 15240 } 15241 } 15242 } 15243 15244 //===--- CHECK: Objective-C retain cycles ----------------------------------// 15245 15246 namespace { 15247 15248 struct RetainCycleOwner { 15249 VarDecl *Variable = nullptr; 15250 SourceRange Range; 15251 SourceLocation Loc; 15252 bool Indirect = false; 15253 15254 RetainCycleOwner() = default; 15255 15256 void setLocsFrom(Expr *e) { 15257 Loc = e->getExprLoc(); 15258 Range = e->getSourceRange(); 15259 } 15260 }; 15261 15262 } // namespace 15263 15264 /// Consider whether capturing the given variable can possibly lead to 15265 /// a retain cycle. 15266 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 15267 // In ARC, it's captured strongly iff the variable has __strong 15268 // lifetime. In MRR, it's captured strongly if the variable is 15269 // __block and has an appropriate type. 15270 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 15271 return false; 15272 15273 owner.Variable = var; 15274 if (ref) 15275 owner.setLocsFrom(ref); 15276 return true; 15277 } 15278 15279 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 15280 while (true) { 15281 e = e->IgnoreParens(); 15282 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 15283 switch (cast->getCastKind()) { 15284 case CK_BitCast: 15285 case CK_LValueBitCast: 15286 case CK_LValueToRValue: 15287 case CK_ARCReclaimReturnedObject: 15288 e = cast->getSubExpr(); 15289 continue; 15290 15291 default: 15292 return false; 15293 } 15294 } 15295 15296 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 15297 ObjCIvarDecl *ivar = ref->getDecl(); 15298 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 15299 return false; 15300 15301 // Try to find a retain cycle in the base. 15302 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 15303 return false; 15304 15305 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 15306 owner.Indirect = true; 15307 return true; 15308 } 15309 15310 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 15311 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 15312 if (!var) return false; 15313 return considerVariable(var, ref, owner); 15314 } 15315 15316 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 15317 if (member->isArrow()) return false; 15318 15319 // Don't count this as an indirect ownership. 15320 e = member->getBase(); 15321 continue; 15322 } 15323 15324 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 15325 // Only pay attention to pseudo-objects on property references. 15326 ObjCPropertyRefExpr *pre 15327 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 15328 ->IgnoreParens()); 15329 if (!pre) return false; 15330 if (pre->isImplicitProperty()) return false; 15331 ObjCPropertyDecl *property = pre->getExplicitProperty(); 15332 if (!property->isRetaining() && 15333 !(property->getPropertyIvarDecl() && 15334 property->getPropertyIvarDecl()->getType() 15335 .getObjCLifetime() == Qualifiers::OCL_Strong)) 15336 return false; 15337 15338 owner.Indirect = true; 15339 if (pre->isSuperReceiver()) { 15340 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 15341 if (!owner.Variable) 15342 return false; 15343 owner.Loc = pre->getLocation(); 15344 owner.Range = pre->getSourceRange(); 15345 return true; 15346 } 15347 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 15348 ->getSourceExpr()); 15349 continue; 15350 } 15351 15352 // Array ivars? 15353 15354 return false; 15355 } 15356 } 15357 15358 namespace { 15359 15360 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 15361 ASTContext &Context; 15362 VarDecl *Variable; 15363 Expr *Capturer = nullptr; 15364 bool VarWillBeReased = false; 15365 15366 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 15367 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 15368 Context(Context), Variable(variable) {} 15369 15370 void VisitDeclRefExpr(DeclRefExpr *ref) { 15371 if (ref->getDecl() == Variable && !Capturer) 15372 Capturer = ref; 15373 } 15374 15375 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 15376 if (Capturer) return; 15377 Visit(ref->getBase()); 15378 if (Capturer && ref->isFreeIvar()) 15379 Capturer = ref; 15380 } 15381 15382 void VisitBlockExpr(BlockExpr *block) { 15383 // Look inside nested blocks 15384 if (block->getBlockDecl()->capturesVariable(Variable)) 15385 Visit(block->getBlockDecl()->getBody()); 15386 } 15387 15388 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 15389 if (Capturer) return; 15390 if (OVE->getSourceExpr()) 15391 Visit(OVE->getSourceExpr()); 15392 } 15393 15394 void VisitBinaryOperator(BinaryOperator *BinOp) { 15395 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 15396 return; 15397 Expr *LHS = BinOp->getLHS(); 15398 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 15399 if (DRE->getDecl() != Variable) 15400 return; 15401 if (Expr *RHS = BinOp->getRHS()) { 15402 RHS = RHS->IgnoreParenCasts(); 15403 Optional<llvm::APSInt> Value; 15404 VarWillBeReased = 15405 (RHS && (Value = RHS->getIntegerConstantExpr(Context)) && 15406 *Value == 0); 15407 } 15408 } 15409 } 15410 }; 15411 15412 } // namespace 15413 15414 /// Check whether the given argument is a block which captures a 15415 /// variable. 15416 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 15417 assert(owner.Variable && owner.Loc.isValid()); 15418 15419 e = e->IgnoreParenCasts(); 15420 15421 // Look through [^{...} copy] and Block_copy(^{...}). 15422 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 15423 Selector Cmd = ME->getSelector(); 15424 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 15425 e = ME->getInstanceReceiver(); 15426 if (!e) 15427 return nullptr; 15428 e = e->IgnoreParenCasts(); 15429 } 15430 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 15431 if (CE->getNumArgs() == 1) { 15432 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 15433 if (Fn) { 15434 const IdentifierInfo *FnI = Fn->getIdentifier(); 15435 if (FnI && FnI->isStr("_Block_copy")) { 15436 e = CE->getArg(0)->IgnoreParenCasts(); 15437 } 15438 } 15439 } 15440 } 15441 15442 BlockExpr *block = dyn_cast<BlockExpr>(e); 15443 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 15444 return nullptr; 15445 15446 FindCaptureVisitor visitor(S.Context, owner.Variable); 15447 visitor.Visit(block->getBlockDecl()->getBody()); 15448 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 15449 } 15450 15451 static void diagnoseRetainCycle(Sema &S, Expr *capturer, 15452 RetainCycleOwner &owner) { 15453 assert(capturer); 15454 assert(owner.Variable && owner.Loc.isValid()); 15455 15456 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 15457 << owner.Variable << capturer->getSourceRange(); 15458 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 15459 << owner.Indirect << owner.Range; 15460 } 15461 15462 /// Check for a keyword selector that starts with the word 'add' or 15463 /// 'set'. 15464 static bool isSetterLikeSelector(Selector sel) { 15465 if (sel.isUnarySelector()) return false; 15466 15467 StringRef str = sel.getNameForSlot(0); 15468 while (!str.empty() && str.front() == '_') str = str.substr(1); 15469 if (str.startswith("set")) 15470 str = str.substr(3); 15471 else if (str.startswith("add")) { 15472 // Specially allow 'addOperationWithBlock:'. 15473 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) 15474 return false; 15475 str = str.substr(3); 15476 } 15477 else 15478 return false; 15479 15480 if (str.empty()) return true; 15481 return !isLowercase(str.front()); 15482 } 15483 15484 static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S, 15485 ObjCMessageExpr *Message) { 15486 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 15487 Message->getReceiverInterface(), 15488 NSAPI::ClassId_NSMutableArray); 15489 if (!IsMutableArray) { 15490 return None; 15491 } 15492 15493 Selector Sel = Message->getSelector(); 15494 15495 Optional<NSAPI::NSArrayMethodKind> MKOpt = 15496 S.NSAPIObj->getNSArrayMethodKind(Sel); 15497 if (!MKOpt) { 15498 return None; 15499 } 15500 15501 NSAPI::NSArrayMethodKind MK = *MKOpt; 15502 15503 switch (MK) { 15504 case NSAPI::NSMutableArr_addObject: 15505 case NSAPI::NSMutableArr_insertObjectAtIndex: 15506 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 15507 return 0; 15508 case NSAPI::NSMutableArr_replaceObjectAtIndex: 15509 return 1; 15510 15511 default: 15512 return None; 15513 } 15514 15515 return None; 15516 } 15517 15518 static 15519 Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S, 15520 ObjCMessageExpr *Message) { 15521 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 15522 Message->getReceiverInterface(), 15523 NSAPI::ClassId_NSMutableDictionary); 15524 if (!IsMutableDictionary) { 15525 return None; 15526 } 15527 15528 Selector Sel = Message->getSelector(); 15529 15530 Optional<NSAPI::NSDictionaryMethodKind> MKOpt = 15531 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 15532 if (!MKOpt) { 15533 return None; 15534 } 15535 15536 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 15537 15538 switch (MK) { 15539 case NSAPI::NSMutableDict_setObjectForKey: 15540 case NSAPI::NSMutableDict_setValueForKey: 15541 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 15542 return 0; 15543 15544 default: 15545 return None; 15546 } 15547 15548 return None; 15549 } 15550 15551 static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 15552 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 15553 Message->getReceiverInterface(), 15554 NSAPI::ClassId_NSMutableSet); 15555 15556 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 15557 Message->getReceiverInterface(), 15558 NSAPI::ClassId_NSMutableOrderedSet); 15559 if (!IsMutableSet && !IsMutableOrderedSet) { 15560 return None; 15561 } 15562 15563 Selector Sel = Message->getSelector(); 15564 15565 Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel); 15566 if (!MKOpt) { 15567 return None; 15568 } 15569 15570 NSAPI::NSSetMethodKind MK = *MKOpt; 15571 15572 switch (MK) { 15573 case NSAPI::NSMutableSet_addObject: 15574 case NSAPI::NSOrderedSet_setObjectAtIndex: 15575 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 15576 case NSAPI::NSOrderedSet_insertObjectAtIndex: 15577 return 0; 15578 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 15579 return 1; 15580 } 15581 15582 return None; 15583 } 15584 15585 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 15586 if (!Message->isInstanceMessage()) { 15587 return; 15588 } 15589 15590 Optional<int> ArgOpt; 15591 15592 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 15593 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 15594 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 15595 return; 15596 } 15597 15598 int ArgIndex = *ArgOpt; 15599 15600 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 15601 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 15602 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 15603 } 15604 15605 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 15606 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 15607 if (ArgRE->isObjCSelfExpr()) { 15608 Diag(Message->getSourceRange().getBegin(), 15609 diag::warn_objc_circular_container) 15610 << ArgRE->getDecl() << StringRef("'super'"); 15611 } 15612 } 15613 } else { 15614 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 15615 15616 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 15617 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 15618 } 15619 15620 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 15621 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 15622 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 15623 ValueDecl *Decl = ReceiverRE->getDecl(); 15624 Diag(Message->getSourceRange().getBegin(), 15625 diag::warn_objc_circular_container) 15626 << Decl << Decl; 15627 if (!ArgRE->isObjCSelfExpr()) { 15628 Diag(Decl->getLocation(), 15629 diag::note_objc_circular_container_declared_here) 15630 << Decl; 15631 } 15632 } 15633 } 15634 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 15635 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 15636 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 15637 ObjCIvarDecl *Decl = IvarRE->getDecl(); 15638 Diag(Message->getSourceRange().getBegin(), 15639 diag::warn_objc_circular_container) 15640 << Decl << Decl; 15641 Diag(Decl->getLocation(), 15642 diag::note_objc_circular_container_declared_here) 15643 << Decl; 15644 } 15645 } 15646 } 15647 } 15648 } 15649 15650 /// Check a message send to see if it's likely to cause a retain cycle. 15651 void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 15652 // Only check instance methods whose selector looks like a setter. 15653 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 15654 return; 15655 15656 // Try to find a variable that the receiver is strongly owned by. 15657 RetainCycleOwner owner; 15658 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 15659 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 15660 return; 15661 } else { 15662 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 15663 owner.Variable = getCurMethodDecl()->getSelfDecl(); 15664 owner.Loc = msg->getSuperLoc(); 15665 owner.Range = msg->getSuperLoc(); 15666 } 15667 15668 // Check whether the receiver is captured by any of the arguments. 15669 const ObjCMethodDecl *MD = msg->getMethodDecl(); 15670 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 15671 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 15672 // noescape blocks should not be retained by the method. 15673 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 15674 continue; 15675 return diagnoseRetainCycle(*this, capturer, owner); 15676 } 15677 } 15678 } 15679 15680 /// Check a property assign to see if it's likely to cause a retain cycle. 15681 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 15682 RetainCycleOwner owner; 15683 if (!findRetainCycleOwner(*this, receiver, owner)) 15684 return; 15685 15686 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 15687 diagnoseRetainCycle(*this, capturer, owner); 15688 } 15689 15690 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 15691 RetainCycleOwner Owner; 15692 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 15693 return; 15694 15695 // Because we don't have an expression for the variable, we have to set the 15696 // location explicitly here. 15697 Owner.Loc = Var->getLocation(); 15698 Owner.Range = Var->getSourceRange(); 15699 15700 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 15701 diagnoseRetainCycle(*this, Capturer, Owner); 15702 } 15703 15704 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 15705 Expr *RHS, bool isProperty) { 15706 // Check if RHS is an Objective-C object literal, which also can get 15707 // immediately zapped in a weak reference. Note that we explicitly 15708 // allow ObjCStringLiterals, since those are designed to never really die. 15709 RHS = RHS->IgnoreParenImpCasts(); 15710 15711 // This enum needs to match with the 'select' in 15712 // warn_objc_arc_literal_assign (off-by-1). 15713 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 15714 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 15715 return false; 15716 15717 S.Diag(Loc, diag::warn_arc_literal_assign) 15718 << (unsigned) Kind 15719 << (isProperty ? 0 : 1) 15720 << RHS->getSourceRange(); 15721 15722 return true; 15723 } 15724 15725 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 15726 Qualifiers::ObjCLifetime LT, 15727 Expr *RHS, bool isProperty) { 15728 // Strip off any implicit cast added to get to the one ARC-specific. 15729 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 15730 if (cast->getCastKind() == CK_ARCConsumeObject) { 15731 S.Diag(Loc, diag::warn_arc_retained_assign) 15732 << (LT == Qualifiers::OCL_ExplicitNone) 15733 << (isProperty ? 0 : 1) 15734 << RHS->getSourceRange(); 15735 return true; 15736 } 15737 RHS = cast->getSubExpr(); 15738 } 15739 15740 if (LT == Qualifiers::OCL_Weak && 15741 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 15742 return true; 15743 15744 return false; 15745 } 15746 15747 bool Sema::checkUnsafeAssigns(SourceLocation Loc, 15748 QualType LHS, Expr *RHS) { 15749 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 15750 15751 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 15752 return false; 15753 15754 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 15755 return true; 15756 15757 return false; 15758 } 15759 15760 void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 15761 Expr *LHS, Expr *RHS) { 15762 QualType LHSType; 15763 // PropertyRef on LHS type need be directly obtained from 15764 // its declaration as it has a PseudoType. 15765 ObjCPropertyRefExpr *PRE 15766 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 15767 if (PRE && !PRE->isImplicitProperty()) { 15768 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 15769 if (PD) 15770 LHSType = PD->getType(); 15771 } 15772 15773 if (LHSType.isNull()) 15774 LHSType = LHS->getType(); 15775 15776 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 15777 15778 if (LT == Qualifiers::OCL_Weak) { 15779 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 15780 getCurFunction()->markSafeWeakUse(LHS); 15781 } 15782 15783 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 15784 return; 15785 15786 // FIXME. Check for other life times. 15787 if (LT != Qualifiers::OCL_None) 15788 return; 15789 15790 if (PRE) { 15791 if (PRE->isImplicitProperty()) 15792 return; 15793 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 15794 if (!PD) 15795 return; 15796 15797 unsigned Attributes = PD->getPropertyAttributes(); 15798 if (Attributes & ObjCPropertyAttribute::kind_assign) { 15799 // when 'assign' attribute was not explicitly specified 15800 // by user, ignore it and rely on property type itself 15801 // for lifetime info. 15802 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 15803 if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) && 15804 LHSType->isObjCRetainableType()) 15805 return; 15806 15807 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 15808 if (cast->getCastKind() == CK_ARCConsumeObject) { 15809 Diag(Loc, diag::warn_arc_retained_property_assign) 15810 << RHS->getSourceRange(); 15811 return; 15812 } 15813 RHS = cast->getSubExpr(); 15814 } 15815 } else if (Attributes & ObjCPropertyAttribute::kind_weak) { 15816 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 15817 return; 15818 } 15819 } 15820 } 15821 15822 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 15823 15824 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 15825 SourceLocation StmtLoc, 15826 const NullStmt *Body) { 15827 // Do not warn if the body is a macro that expands to nothing, e.g: 15828 // 15829 // #define CALL(x) 15830 // if (condition) 15831 // CALL(0); 15832 if (Body->hasLeadingEmptyMacro()) 15833 return false; 15834 15835 // Get line numbers of statement and body. 15836 bool StmtLineInvalid; 15837 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 15838 &StmtLineInvalid); 15839 if (StmtLineInvalid) 15840 return false; 15841 15842 bool BodyLineInvalid; 15843 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 15844 &BodyLineInvalid); 15845 if (BodyLineInvalid) 15846 return false; 15847 15848 // Warn if null statement and body are on the same line. 15849 if (StmtLine != BodyLine) 15850 return false; 15851 15852 return true; 15853 } 15854 15855 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 15856 const Stmt *Body, 15857 unsigned DiagID) { 15858 // Since this is a syntactic check, don't emit diagnostic for template 15859 // instantiations, this just adds noise. 15860 if (CurrentInstantiationScope) 15861 return; 15862 15863 // The body should be a null statement. 15864 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 15865 if (!NBody) 15866 return; 15867 15868 // Do the usual checks. 15869 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 15870 return; 15871 15872 Diag(NBody->getSemiLoc(), DiagID); 15873 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 15874 } 15875 15876 void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 15877 const Stmt *PossibleBody) { 15878 assert(!CurrentInstantiationScope); // Ensured by caller 15879 15880 SourceLocation StmtLoc; 15881 const Stmt *Body; 15882 unsigned DiagID; 15883 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 15884 StmtLoc = FS->getRParenLoc(); 15885 Body = FS->getBody(); 15886 DiagID = diag::warn_empty_for_body; 15887 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 15888 StmtLoc = WS->getCond()->getSourceRange().getEnd(); 15889 Body = WS->getBody(); 15890 DiagID = diag::warn_empty_while_body; 15891 } else 15892 return; // Neither `for' nor `while'. 15893 15894 // The body should be a null statement. 15895 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 15896 if (!NBody) 15897 return; 15898 15899 // Skip expensive checks if diagnostic is disabled. 15900 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 15901 return; 15902 15903 // Do the usual checks. 15904 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 15905 return; 15906 15907 // `for(...);' and `while(...);' are popular idioms, so in order to keep 15908 // noise level low, emit diagnostics only if for/while is followed by a 15909 // CompoundStmt, e.g.: 15910 // for (int i = 0; i < n; i++); 15911 // { 15912 // a(i); 15913 // } 15914 // or if for/while is followed by a statement with more indentation 15915 // than for/while itself: 15916 // for (int i = 0; i < n; i++); 15917 // a(i); 15918 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 15919 if (!ProbableTypo) { 15920 bool BodyColInvalid; 15921 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 15922 PossibleBody->getBeginLoc(), &BodyColInvalid); 15923 if (BodyColInvalid) 15924 return; 15925 15926 bool StmtColInvalid; 15927 unsigned StmtCol = 15928 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 15929 if (StmtColInvalid) 15930 return; 15931 15932 if (BodyCol > StmtCol) 15933 ProbableTypo = true; 15934 } 15935 15936 if (ProbableTypo) { 15937 Diag(NBody->getSemiLoc(), DiagID); 15938 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 15939 } 15940 } 15941 15942 //===--- CHECK: Warn on self move with std::move. -------------------------===// 15943 15944 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 15945 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 15946 SourceLocation OpLoc) { 15947 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 15948 return; 15949 15950 if (inTemplateInstantiation()) 15951 return; 15952 15953 // Strip parens and casts away. 15954 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 15955 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 15956 15957 // Check for a call expression 15958 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 15959 if (!CE || CE->getNumArgs() != 1) 15960 return; 15961 15962 // Check for a call to std::move 15963 if (!CE->isCallToStdMove()) 15964 return; 15965 15966 // Get argument from std::move 15967 RHSExpr = CE->getArg(0); 15968 15969 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 15970 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 15971 15972 // Two DeclRefExpr's, check that the decls are the same. 15973 if (LHSDeclRef && RHSDeclRef) { 15974 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 15975 return; 15976 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 15977 RHSDeclRef->getDecl()->getCanonicalDecl()) 15978 return; 15979 15980 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 15981 << LHSExpr->getSourceRange() 15982 << RHSExpr->getSourceRange(); 15983 return; 15984 } 15985 15986 // Member variables require a different approach to check for self moves. 15987 // MemberExpr's are the same if every nested MemberExpr refers to the same 15988 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 15989 // the base Expr's are CXXThisExpr's. 15990 const Expr *LHSBase = LHSExpr; 15991 const Expr *RHSBase = RHSExpr; 15992 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 15993 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 15994 if (!LHSME || !RHSME) 15995 return; 15996 15997 while (LHSME && RHSME) { 15998 if (LHSME->getMemberDecl()->getCanonicalDecl() != 15999 RHSME->getMemberDecl()->getCanonicalDecl()) 16000 return; 16001 16002 LHSBase = LHSME->getBase(); 16003 RHSBase = RHSME->getBase(); 16004 LHSME = dyn_cast<MemberExpr>(LHSBase); 16005 RHSME = dyn_cast<MemberExpr>(RHSBase); 16006 } 16007 16008 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 16009 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 16010 if (LHSDeclRef && RHSDeclRef) { 16011 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 16012 return; 16013 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 16014 RHSDeclRef->getDecl()->getCanonicalDecl()) 16015 return; 16016 16017 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16018 << LHSExpr->getSourceRange() 16019 << RHSExpr->getSourceRange(); 16020 return; 16021 } 16022 16023 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 16024 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 16025 << LHSExpr->getSourceRange() 16026 << RHSExpr->getSourceRange(); 16027 } 16028 16029 //===--- Layout compatibility ----------------------------------------------// 16030 16031 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 16032 16033 /// Check if two enumeration types are layout-compatible. 16034 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 16035 // C++11 [dcl.enum] p8: 16036 // Two enumeration types are layout-compatible if they have the same 16037 // underlying type. 16038 return ED1->isComplete() && ED2->isComplete() && 16039 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 16040 } 16041 16042 /// Check if two fields are layout-compatible. 16043 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 16044 FieldDecl *Field2) { 16045 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 16046 return false; 16047 16048 if (Field1->isBitField() != Field2->isBitField()) 16049 return false; 16050 16051 if (Field1->isBitField()) { 16052 // Make sure that the bit-fields are the same length. 16053 unsigned Bits1 = Field1->getBitWidthValue(C); 16054 unsigned Bits2 = Field2->getBitWidthValue(C); 16055 16056 if (Bits1 != Bits2) 16057 return false; 16058 } 16059 16060 return true; 16061 } 16062 16063 /// Check if two standard-layout structs are layout-compatible. 16064 /// (C++11 [class.mem] p17) 16065 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 16066 RecordDecl *RD2) { 16067 // If both records are C++ classes, check that base classes match. 16068 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 16069 // If one of records is a CXXRecordDecl we are in C++ mode, 16070 // thus the other one is a CXXRecordDecl, too. 16071 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 16072 // Check number of base classes. 16073 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 16074 return false; 16075 16076 // Check the base classes. 16077 for (CXXRecordDecl::base_class_const_iterator 16078 Base1 = D1CXX->bases_begin(), 16079 BaseEnd1 = D1CXX->bases_end(), 16080 Base2 = D2CXX->bases_begin(); 16081 Base1 != BaseEnd1; 16082 ++Base1, ++Base2) { 16083 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 16084 return false; 16085 } 16086 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 16087 // If only RD2 is a C++ class, it should have zero base classes. 16088 if (D2CXX->getNumBases() > 0) 16089 return false; 16090 } 16091 16092 // Check the fields. 16093 RecordDecl::field_iterator Field2 = RD2->field_begin(), 16094 Field2End = RD2->field_end(), 16095 Field1 = RD1->field_begin(), 16096 Field1End = RD1->field_end(); 16097 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 16098 if (!isLayoutCompatible(C, *Field1, *Field2)) 16099 return false; 16100 } 16101 if (Field1 != Field1End || Field2 != Field2End) 16102 return false; 16103 16104 return true; 16105 } 16106 16107 /// Check if two standard-layout unions are layout-compatible. 16108 /// (C++11 [class.mem] p18) 16109 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 16110 RecordDecl *RD2) { 16111 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 16112 for (auto *Field2 : RD2->fields()) 16113 UnmatchedFields.insert(Field2); 16114 16115 for (auto *Field1 : RD1->fields()) { 16116 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 16117 I = UnmatchedFields.begin(), 16118 E = UnmatchedFields.end(); 16119 16120 for ( ; I != E; ++I) { 16121 if (isLayoutCompatible(C, Field1, *I)) { 16122 bool Result = UnmatchedFields.erase(*I); 16123 (void) Result; 16124 assert(Result); 16125 break; 16126 } 16127 } 16128 if (I == E) 16129 return false; 16130 } 16131 16132 return UnmatchedFields.empty(); 16133 } 16134 16135 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 16136 RecordDecl *RD2) { 16137 if (RD1->isUnion() != RD2->isUnion()) 16138 return false; 16139 16140 if (RD1->isUnion()) 16141 return isLayoutCompatibleUnion(C, RD1, RD2); 16142 else 16143 return isLayoutCompatibleStruct(C, RD1, RD2); 16144 } 16145 16146 /// Check if two types are layout-compatible in C++11 sense. 16147 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 16148 if (T1.isNull() || T2.isNull()) 16149 return false; 16150 16151 // C++11 [basic.types] p11: 16152 // If two types T1 and T2 are the same type, then T1 and T2 are 16153 // layout-compatible types. 16154 if (C.hasSameType(T1, T2)) 16155 return true; 16156 16157 T1 = T1.getCanonicalType().getUnqualifiedType(); 16158 T2 = T2.getCanonicalType().getUnqualifiedType(); 16159 16160 const Type::TypeClass TC1 = T1->getTypeClass(); 16161 const Type::TypeClass TC2 = T2->getTypeClass(); 16162 16163 if (TC1 != TC2) 16164 return false; 16165 16166 if (TC1 == Type::Enum) { 16167 return isLayoutCompatible(C, 16168 cast<EnumType>(T1)->getDecl(), 16169 cast<EnumType>(T2)->getDecl()); 16170 } else if (TC1 == Type::Record) { 16171 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 16172 return false; 16173 16174 return isLayoutCompatible(C, 16175 cast<RecordType>(T1)->getDecl(), 16176 cast<RecordType>(T2)->getDecl()); 16177 } 16178 16179 return false; 16180 } 16181 16182 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 16183 16184 /// Given a type tag expression find the type tag itself. 16185 /// 16186 /// \param TypeExpr Type tag expression, as it appears in user's code. 16187 /// 16188 /// \param VD Declaration of an identifier that appears in a type tag. 16189 /// 16190 /// \param MagicValue Type tag magic value. 16191 /// 16192 /// \param isConstantEvaluated whether the evalaution should be performed in 16193 16194 /// constant context. 16195 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 16196 const ValueDecl **VD, uint64_t *MagicValue, 16197 bool isConstantEvaluated) { 16198 while(true) { 16199 if (!TypeExpr) 16200 return false; 16201 16202 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 16203 16204 switch (TypeExpr->getStmtClass()) { 16205 case Stmt::UnaryOperatorClass: { 16206 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 16207 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 16208 TypeExpr = UO->getSubExpr(); 16209 continue; 16210 } 16211 return false; 16212 } 16213 16214 case Stmt::DeclRefExprClass: { 16215 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 16216 *VD = DRE->getDecl(); 16217 return true; 16218 } 16219 16220 case Stmt::IntegerLiteralClass: { 16221 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 16222 llvm::APInt MagicValueAPInt = IL->getValue(); 16223 if (MagicValueAPInt.getActiveBits() <= 64) { 16224 *MagicValue = MagicValueAPInt.getZExtValue(); 16225 return true; 16226 } else 16227 return false; 16228 } 16229 16230 case Stmt::BinaryConditionalOperatorClass: 16231 case Stmt::ConditionalOperatorClass: { 16232 const AbstractConditionalOperator *ACO = 16233 cast<AbstractConditionalOperator>(TypeExpr); 16234 bool Result; 16235 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx, 16236 isConstantEvaluated)) { 16237 if (Result) 16238 TypeExpr = ACO->getTrueExpr(); 16239 else 16240 TypeExpr = ACO->getFalseExpr(); 16241 continue; 16242 } 16243 return false; 16244 } 16245 16246 case Stmt::BinaryOperatorClass: { 16247 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 16248 if (BO->getOpcode() == BO_Comma) { 16249 TypeExpr = BO->getRHS(); 16250 continue; 16251 } 16252 return false; 16253 } 16254 16255 default: 16256 return false; 16257 } 16258 } 16259 } 16260 16261 /// Retrieve the C type corresponding to type tag TypeExpr. 16262 /// 16263 /// \param TypeExpr Expression that specifies a type tag. 16264 /// 16265 /// \param MagicValues Registered magic values. 16266 /// 16267 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 16268 /// kind. 16269 /// 16270 /// \param TypeInfo Information about the corresponding C type. 16271 /// 16272 /// \param isConstantEvaluated whether the evalaution should be performed in 16273 /// constant context. 16274 /// 16275 /// \returns true if the corresponding C type was found. 16276 static bool GetMatchingCType( 16277 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr, 16278 const ASTContext &Ctx, 16279 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData> 16280 *MagicValues, 16281 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo, 16282 bool isConstantEvaluated) { 16283 FoundWrongKind = false; 16284 16285 // Variable declaration that has type_tag_for_datatype attribute. 16286 const ValueDecl *VD = nullptr; 16287 16288 uint64_t MagicValue; 16289 16290 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated)) 16291 return false; 16292 16293 if (VD) { 16294 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 16295 if (I->getArgumentKind() != ArgumentKind) { 16296 FoundWrongKind = true; 16297 return false; 16298 } 16299 TypeInfo.Type = I->getMatchingCType(); 16300 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 16301 TypeInfo.MustBeNull = I->getMustBeNull(); 16302 return true; 16303 } 16304 return false; 16305 } 16306 16307 if (!MagicValues) 16308 return false; 16309 16310 llvm::DenseMap<Sema::TypeTagMagicValue, 16311 Sema::TypeTagData>::const_iterator I = 16312 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 16313 if (I == MagicValues->end()) 16314 return false; 16315 16316 TypeInfo = I->second; 16317 return true; 16318 } 16319 16320 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 16321 uint64_t MagicValue, QualType Type, 16322 bool LayoutCompatible, 16323 bool MustBeNull) { 16324 if (!TypeTagForDatatypeMagicValues) 16325 TypeTagForDatatypeMagicValues.reset( 16326 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 16327 16328 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 16329 (*TypeTagForDatatypeMagicValues)[Magic] = 16330 TypeTagData(Type, LayoutCompatible, MustBeNull); 16331 } 16332 16333 static bool IsSameCharType(QualType T1, QualType T2) { 16334 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 16335 if (!BT1) 16336 return false; 16337 16338 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 16339 if (!BT2) 16340 return false; 16341 16342 BuiltinType::Kind T1Kind = BT1->getKind(); 16343 BuiltinType::Kind T2Kind = BT2->getKind(); 16344 16345 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 16346 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 16347 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 16348 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 16349 } 16350 16351 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 16352 const ArrayRef<const Expr *> ExprArgs, 16353 SourceLocation CallSiteLoc) { 16354 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 16355 bool IsPointerAttr = Attr->getIsPointer(); 16356 16357 // Retrieve the argument representing the 'type_tag'. 16358 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 16359 if (TypeTagIdxAST >= ExprArgs.size()) { 16360 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 16361 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 16362 return; 16363 } 16364 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 16365 bool FoundWrongKind; 16366 TypeTagData TypeInfo; 16367 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 16368 TypeTagForDatatypeMagicValues.get(), FoundWrongKind, 16369 TypeInfo, isConstantEvaluated())) { 16370 if (FoundWrongKind) 16371 Diag(TypeTagExpr->getExprLoc(), 16372 diag::warn_type_tag_for_datatype_wrong_kind) 16373 << TypeTagExpr->getSourceRange(); 16374 return; 16375 } 16376 16377 // Retrieve the argument representing the 'arg_idx'. 16378 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 16379 if (ArgumentIdxAST >= ExprArgs.size()) { 16380 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 16381 << 1 << Attr->getArgumentIdx().getSourceIndex(); 16382 return; 16383 } 16384 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 16385 if (IsPointerAttr) { 16386 // Skip implicit cast of pointer to `void *' (as a function argument). 16387 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 16388 if (ICE->getType()->isVoidPointerType() && 16389 ICE->getCastKind() == CK_BitCast) 16390 ArgumentExpr = ICE->getSubExpr(); 16391 } 16392 QualType ArgumentType = ArgumentExpr->getType(); 16393 16394 // Passing a `void*' pointer shouldn't trigger a warning. 16395 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 16396 return; 16397 16398 if (TypeInfo.MustBeNull) { 16399 // Type tag with matching void type requires a null pointer. 16400 if (!ArgumentExpr->isNullPointerConstant(Context, 16401 Expr::NPC_ValueDependentIsNotNull)) { 16402 Diag(ArgumentExpr->getExprLoc(), 16403 diag::warn_type_safety_null_pointer_required) 16404 << ArgumentKind->getName() 16405 << ArgumentExpr->getSourceRange() 16406 << TypeTagExpr->getSourceRange(); 16407 } 16408 return; 16409 } 16410 16411 QualType RequiredType = TypeInfo.Type; 16412 if (IsPointerAttr) 16413 RequiredType = Context.getPointerType(RequiredType); 16414 16415 bool mismatch = false; 16416 if (!TypeInfo.LayoutCompatible) { 16417 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 16418 16419 // C++11 [basic.fundamental] p1: 16420 // Plain char, signed char, and unsigned char are three distinct types. 16421 // 16422 // But we treat plain `char' as equivalent to `signed char' or `unsigned 16423 // char' depending on the current char signedness mode. 16424 if (mismatch) 16425 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 16426 RequiredType->getPointeeType())) || 16427 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 16428 mismatch = false; 16429 } else 16430 if (IsPointerAttr) 16431 mismatch = !isLayoutCompatible(Context, 16432 ArgumentType->getPointeeType(), 16433 RequiredType->getPointeeType()); 16434 else 16435 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 16436 16437 if (mismatch) 16438 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 16439 << ArgumentType << ArgumentKind 16440 << TypeInfo.LayoutCompatible << RequiredType 16441 << ArgumentExpr->getSourceRange() 16442 << TypeTagExpr->getSourceRange(); 16443 } 16444 16445 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 16446 CharUnits Alignment) { 16447 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 16448 } 16449 16450 void Sema::DiagnoseMisalignedMembers() { 16451 for (MisalignedMember &m : MisalignedMembers) { 16452 const NamedDecl *ND = m.RD; 16453 if (ND->getName().empty()) { 16454 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 16455 ND = TD; 16456 } 16457 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 16458 << m.MD << ND << m.E->getSourceRange(); 16459 } 16460 MisalignedMembers.clear(); 16461 } 16462 16463 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 16464 E = E->IgnoreParens(); 16465 if (!T->isPointerType() && !T->isIntegerType()) 16466 return; 16467 if (isa<UnaryOperator>(E) && 16468 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 16469 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 16470 if (isa<MemberExpr>(Op)) { 16471 auto MA = llvm::find(MisalignedMembers, MisalignedMember(Op)); 16472 if (MA != MisalignedMembers.end() && 16473 (T->isIntegerType() || 16474 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 16475 Context.getTypeAlignInChars( 16476 T->getPointeeType()) <= MA->Alignment)))) 16477 MisalignedMembers.erase(MA); 16478 } 16479 } 16480 } 16481 16482 void Sema::RefersToMemberWithReducedAlignment( 16483 Expr *E, 16484 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 16485 Action) { 16486 const auto *ME = dyn_cast<MemberExpr>(E); 16487 if (!ME) 16488 return; 16489 16490 // No need to check expressions with an __unaligned-qualified type. 16491 if (E->getType().getQualifiers().hasUnaligned()) 16492 return; 16493 16494 // For a chain of MemberExpr like "a.b.c.d" this list 16495 // will keep FieldDecl's like [d, c, b]. 16496 SmallVector<FieldDecl *, 4> ReverseMemberChain; 16497 const MemberExpr *TopME = nullptr; 16498 bool AnyIsPacked = false; 16499 do { 16500 QualType BaseType = ME->getBase()->getType(); 16501 if (BaseType->isDependentType()) 16502 return; 16503 if (ME->isArrow()) 16504 BaseType = BaseType->getPointeeType(); 16505 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl(); 16506 if (RD->isInvalidDecl()) 16507 return; 16508 16509 ValueDecl *MD = ME->getMemberDecl(); 16510 auto *FD = dyn_cast<FieldDecl>(MD); 16511 // We do not care about non-data members. 16512 if (!FD || FD->isInvalidDecl()) 16513 return; 16514 16515 AnyIsPacked = 16516 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 16517 ReverseMemberChain.push_back(FD); 16518 16519 TopME = ME; 16520 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 16521 } while (ME); 16522 assert(TopME && "We did not compute a topmost MemberExpr!"); 16523 16524 // Not the scope of this diagnostic. 16525 if (!AnyIsPacked) 16526 return; 16527 16528 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 16529 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 16530 // TODO: The innermost base of the member expression may be too complicated. 16531 // For now, just disregard these cases. This is left for future 16532 // improvement. 16533 if (!DRE && !isa<CXXThisExpr>(TopBase)) 16534 return; 16535 16536 // Alignment expected by the whole expression. 16537 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 16538 16539 // No need to do anything else with this case. 16540 if (ExpectedAlignment.isOne()) 16541 return; 16542 16543 // Synthesize offset of the whole access. 16544 CharUnits Offset; 16545 for (auto I = ReverseMemberChain.rbegin(); I != ReverseMemberChain.rend(); 16546 I++) { 16547 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(*I)); 16548 } 16549 16550 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 16551 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 16552 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 16553 16554 // The base expression of the innermost MemberExpr may give 16555 // stronger guarantees than the class containing the member. 16556 if (DRE && !TopME->isArrow()) { 16557 const ValueDecl *VD = DRE->getDecl(); 16558 if (!VD->getType()->isReferenceType()) 16559 CompleteObjectAlignment = 16560 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 16561 } 16562 16563 // Check if the synthesized offset fulfills the alignment. 16564 if (Offset % ExpectedAlignment != 0 || 16565 // It may fulfill the offset it but the effective alignment may still be 16566 // lower than the expected expression alignment. 16567 CompleteObjectAlignment < ExpectedAlignment) { 16568 // If this happens, we want to determine a sensible culprit of this. 16569 // Intuitively, watching the chain of member expressions from right to 16570 // left, we start with the required alignment (as required by the field 16571 // type) but some packed attribute in that chain has reduced the alignment. 16572 // It may happen that another packed structure increases it again. But if 16573 // we are here such increase has not been enough. So pointing the first 16574 // FieldDecl that either is packed or else its RecordDecl is, 16575 // seems reasonable. 16576 FieldDecl *FD = nullptr; 16577 CharUnits Alignment; 16578 for (FieldDecl *FDI : ReverseMemberChain) { 16579 if (FDI->hasAttr<PackedAttr>() || 16580 FDI->getParent()->hasAttr<PackedAttr>()) { 16581 FD = FDI; 16582 Alignment = std::min( 16583 Context.getTypeAlignInChars(FD->getType()), 16584 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 16585 break; 16586 } 16587 } 16588 assert(FD && "We did not find a packed FieldDecl!"); 16589 Action(E, FD->getParent(), FD, Alignment); 16590 } 16591 } 16592 16593 void Sema::CheckAddressOfPackedMember(Expr *rhs) { 16594 using namespace std::placeholders; 16595 16596 RefersToMemberWithReducedAlignment( 16597 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 16598 _2, _3, _4)); 16599 } 16600 16601 ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall, 16602 ExprResult CallResult) { 16603 if (checkArgCount(*this, TheCall, 1)) 16604 return ExprError(); 16605 16606 ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0)); 16607 if (MatrixArg.isInvalid()) 16608 return MatrixArg; 16609 Expr *Matrix = MatrixArg.get(); 16610 16611 auto *MType = Matrix->getType()->getAs<ConstantMatrixType>(); 16612 if (!MType) { 16613 Diag(Matrix->getBeginLoc(), diag::err_builtin_matrix_arg); 16614 return ExprError(); 16615 } 16616 16617 // Create returned matrix type by swapping rows and columns of the argument 16618 // matrix type. 16619 QualType ResultType = Context.getConstantMatrixType( 16620 MType->getElementType(), MType->getNumColumns(), MType->getNumRows()); 16621 16622 // Change the return type to the type of the returned matrix. 16623 TheCall->setType(ResultType); 16624 16625 // Update call argument to use the possibly converted matrix argument. 16626 TheCall->setArg(0, Matrix); 16627 return CallResult; 16628 } 16629 16630 // Get and verify the matrix dimensions. 16631 static llvm::Optional<unsigned> 16632 getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) { 16633 SourceLocation ErrorPos; 16634 Optional<llvm::APSInt> Value = 16635 Expr->getIntegerConstantExpr(S.Context, &ErrorPos); 16636 if (!Value) { 16637 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg) 16638 << Name; 16639 return {}; 16640 } 16641 uint64_t Dim = Value->getZExtValue(); 16642 if (!ConstantMatrixType::isDimensionValid(Dim)) { 16643 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension) 16644 << Name << ConstantMatrixType::getMaxElementsPerDimension(); 16645 return {}; 16646 } 16647 return Dim; 16648 } 16649 16650 ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, 16651 ExprResult CallResult) { 16652 if (!getLangOpts().MatrixTypes) { 16653 Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled); 16654 return ExprError(); 16655 } 16656 16657 if (checkArgCount(*this, TheCall, 4)) 16658 return ExprError(); 16659 16660 unsigned PtrArgIdx = 0; 16661 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 16662 Expr *RowsExpr = TheCall->getArg(1); 16663 Expr *ColumnsExpr = TheCall->getArg(2); 16664 Expr *StrideExpr = TheCall->getArg(3); 16665 16666 bool ArgError = false; 16667 16668 // Check pointer argument. 16669 { 16670 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 16671 if (PtrConv.isInvalid()) 16672 return PtrConv; 16673 PtrExpr = PtrConv.get(); 16674 TheCall->setArg(0, PtrExpr); 16675 if (PtrExpr->isTypeDependent()) { 16676 TheCall->setType(Context.DependentTy); 16677 return TheCall; 16678 } 16679 } 16680 16681 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 16682 QualType ElementTy; 16683 if (!PtrTy) { 16684 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg) 16685 << PtrArgIdx + 1; 16686 ArgError = true; 16687 } else { 16688 ElementTy = PtrTy->getPointeeType().getUnqualifiedType(); 16689 16690 if (!ConstantMatrixType::isValidElementType(ElementTy)) { 16691 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg) 16692 << PtrArgIdx + 1; 16693 ArgError = true; 16694 } 16695 } 16696 16697 // Apply default Lvalue conversions and convert the expression to size_t. 16698 auto ApplyArgumentConversions = [this](Expr *E) { 16699 ExprResult Conv = DefaultLvalueConversion(E); 16700 if (Conv.isInvalid()) 16701 return Conv; 16702 16703 return tryConvertExprToType(Conv.get(), Context.getSizeType()); 16704 }; 16705 16706 // Apply conversion to row and column expressions. 16707 ExprResult RowsConv = ApplyArgumentConversions(RowsExpr); 16708 if (!RowsConv.isInvalid()) { 16709 RowsExpr = RowsConv.get(); 16710 TheCall->setArg(1, RowsExpr); 16711 } else 16712 RowsExpr = nullptr; 16713 16714 ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr); 16715 if (!ColumnsConv.isInvalid()) { 16716 ColumnsExpr = ColumnsConv.get(); 16717 TheCall->setArg(2, ColumnsExpr); 16718 } else 16719 ColumnsExpr = nullptr; 16720 16721 // If any any part of the result matrix type is still pending, just use 16722 // Context.DependentTy, until all parts are resolved. 16723 if ((RowsExpr && RowsExpr->isTypeDependent()) || 16724 (ColumnsExpr && ColumnsExpr->isTypeDependent())) { 16725 TheCall->setType(Context.DependentTy); 16726 return CallResult; 16727 } 16728 16729 // Check row and column dimensions. 16730 llvm::Optional<unsigned> MaybeRows; 16731 if (RowsExpr) 16732 MaybeRows = getAndVerifyMatrixDimension(RowsExpr, "row", *this); 16733 16734 llvm::Optional<unsigned> MaybeColumns; 16735 if (ColumnsExpr) 16736 MaybeColumns = getAndVerifyMatrixDimension(ColumnsExpr, "column", *this); 16737 16738 // Check stride argument. 16739 ExprResult StrideConv = ApplyArgumentConversions(StrideExpr); 16740 if (StrideConv.isInvalid()) 16741 return ExprError(); 16742 StrideExpr = StrideConv.get(); 16743 TheCall->setArg(3, StrideExpr); 16744 16745 if (MaybeRows) { 16746 if (Optional<llvm::APSInt> Value = 16747 StrideExpr->getIntegerConstantExpr(Context)) { 16748 uint64_t Stride = Value->getZExtValue(); 16749 if (Stride < *MaybeRows) { 16750 Diag(StrideExpr->getBeginLoc(), 16751 diag::err_builtin_matrix_stride_too_small); 16752 ArgError = true; 16753 } 16754 } 16755 } 16756 16757 if (ArgError || !MaybeRows || !MaybeColumns) 16758 return ExprError(); 16759 16760 TheCall->setType( 16761 Context.getConstantMatrixType(ElementTy, *MaybeRows, *MaybeColumns)); 16762 return CallResult; 16763 } 16764 16765 ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, 16766 ExprResult CallResult) { 16767 if (checkArgCount(*this, TheCall, 3)) 16768 return ExprError(); 16769 16770 unsigned PtrArgIdx = 1; 16771 Expr *MatrixExpr = TheCall->getArg(0); 16772 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 16773 Expr *StrideExpr = TheCall->getArg(2); 16774 16775 bool ArgError = false; 16776 16777 { 16778 ExprResult MatrixConv = DefaultLvalueConversion(MatrixExpr); 16779 if (MatrixConv.isInvalid()) 16780 return MatrixConv; 16781 MatrixExpr = MatrixConv.get(); 16782 TheCall->setArg(0, MatrixExpr); 16783 } 16784 if (MatrixExpr->isTypeDependent()) { 16785 TheCall->setType(Context.DependentTy); 16786 return TheCall; 16787 } 16788 16789 auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>(); 16790 if (!MatrixTy) { 16791 Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_matrix_arg) << 0; 16792 ArgError = true; 16793 } 16794 16795 { 16796 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 16797 if (PtrConv.isInvalid()) 16798 return PtrConv; 16799 PtrExpr = PtrConv.get(); 16800 TheCall->setArg(1, PtrExpr); 16801 if (PtrExpr->isTypeDependent()) { 16802 TheCall->setType(Context.DependentTy); 16803 return TheCall; 16804 } 16805 } 16806 16807 // Check pointer argument. 16808 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 16809 if (!PtrTy) { 16810 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg) 16811 << PtrArgIdx + 1; 16812 ArgError = true; 16813 } else { 16814 QualType ElementTy = PtrTy->getPointeeType(); 16815 if (ElementTy.isConstQualified()) { 16816 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const); 16817 ArgError = true; 16818 } 16819 ElementTy = ElementTy.getUnqualifiedType().getCanonicalType(); 16820 if (MatrixTy && 16821 !Context.hasSameType(ElementTy, MatrixTy->getElementType())) { 16822 Diag(PtrExpr->getBeginLoc(), 16823 diag::err_builtin_matrix_pointer_arg_mismatch) 16824 << ElementTy << MatrixTy->getElementType(); 16825 ArgError = true; 16826 } 16827 } 16828 16829 // Apply default Lvalue conversions and convert the stride expression to 16830 // size_t. 16831 { 16832 ExprResult StrideConv = DefaultLvalueConversion(StrideExpr); 16833 if (StrideConv.isInvalid()) 16834 return StrideConv; 16835 16836 StrideConv = tryConvertExprToType(StrideConv.get(), Context.getSizeType()); 16837 if (StrideConv.isInvalid()) 16838 return StrideConv; 16839 StrideExpr = StrideConv.get(); 16840 TheCall->setArg(2, StrideExpr); 16841 } 16842 16843 // Check stride argument. 16844 if (MatrixTy) { 16845 if (Optional<llvm::APSInt> Value = 16846 StrideExpr->getIntegerConstantExpr(Context)) { 16847 uint64_t Stride = Value->getZExtValue(); 16848 if (Stride < MatrixTy->getNumRows()) { 16849 Diag(StrideExpr->getBeginLoc(), 16850 diag::err_builtin_matrix_stride_too_small); 16851 ArgError = true; 16852 } 16853 } 16854 } 16855 16856 if (ArgError) 16857 return ExprError(); 16858 16859 return CallResult; 16860 } 16861 16862 /// \brief Enforce the bounds of a TCB 16863 /// CheckTCBEnforcement - Enforces that every function in a named TCB only 16864 /// directly calls other functions in the same TCB as marked by the enforce_tcb 16865 /// and enforce_tcb_leaf attributes. 16866 void Sema::CheckTCBEnforcement(const CallExpr *TheCall, 16867 const FunctionDecl *Callee) { 16868 const FunctionDecl *Caller = getCurFunctionDecl(); 16869 16870 // Calls to builtins are not enforced. 16871 if (!Caller || !Caller->hasAttr<EnforceTCBAttr>() || 16872 Callee->getBuiltinID() != 0) 16873 return; 16874 16875 // Search through the enforce_tcb and enforce_tcb_leaf attributes to find 16876 // all TCBs the callee is a part of. 16877 llvm::StringSet<> CalleeTCBs; 16878 for_each(Callee->specific_attrs<EnforceTCBAttr>(), 16879 [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); }); 16880 for_each(Callee->specific_attrs<EnforceTCBLeafAttr>(), 16881 [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); }); 16882 16883 // Go through the TCBs the caller is a part of and emit warnings if Caller 16884 // is in a TCB that the Callee is not. 16885 for_each( 16886 Caller->specific_attrs<EnforceTCBAttr>(), 16887 [&](const auto *A) { 16888 StringRef CallerTCB = A->getTCBName(); 16889 if (CalleeTCBs.count(CallerTCB) == 0) { 16890 this->Diag(TheCall->getExprLoc(), 16891 diag::warn_tcb_enforcement_violation) << Callee 16892 << CallerTCB; 16893 } 16894 }); 16895 } 16896