1 //===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements extra semantic analysis beyond what is enforced 10 // by the C type system. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "clang/AST/APValue.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/AttrIterator.h" 18 #include "clang/AST/CharUnits.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/DeclBase.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "clang/AST/DeclObjC.h" 23 #include "clang/AST/DeclarationName.h" 24 #include "clang/AST/EvaluatedExprVisitor.h" 25 #include "clang/AST/Expr.h" 26 #include "clang/AST/ExprCXX.h" 27 #include "clang/AST/ExprObjC.h" 28 #include "clang/AST/ExprOpenMP.h" 29 #include "clang/AST/FormatString.h" 30 #include "clang/AST/NSAPI.h" 31 #include "clang/AST/NonTrivialTypeVisitor.h" 32 #include "clang/AST/OperationKinds.h" 33 #include "clang/AST/RecordLayout.h" 34 #include "clang/AST/Stmt.h" 35 #include "clang/AST/TemplateBase.h" 36 #include "clang/AST/Type.h" 37 #include "clang/AST/TypeLoc.h" 38 #include "clang/AST/UnresolvedSet.h" 39 #include "clang/Basic/AddressSpaces.h" 40 #include "clang/Basic/CharInfo.h" 41 #include "clang/Basic/Diagnostic.h" 42 #include "clang/Basic/IdentifierTable.h" 43 #include "clang/Basic/LLVM.h" 44 #include "clang/Basic/LangOptions.h" 45 #include "clang/Basic/OpenCLOptions.h" 46 #include "clang/Basic/OperatorKinds.h" 47 #include "clang/Basic/PartialDiagnostic.h" 48 #include "clang/Basic/SourceLocation.h" 49 #include "clang/Basic/SourceManager.h" 50 #include "clang/Basic/Specifiers.h" 51 #include "clang/Basic/SyncScope.h" 52 #include "clang/Basic/TargetBuiltins.h" 53 #include "clang/Basic/TargetCXXABI.h" 54 #include "clang/Basic/TargetInfo.h" 55 #include "clang/Basic/TypeTraits.h" 56 #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 57 #include "clang/Sema/Initialization.h" 58 #include "clang/Sema/Lookup.h" 59 #include "clang/Sema/Ownership.h" 60 #include "clang/Sema/Scope.h" 61 #include "clang/Sema/ScopeInfo.h" 62 #include "clang/Sema/Sema.h" 63 #include "clang/Sema/SemaInternal.h" 64 #include "llvm/ADT/APFloat.h" 65 #include "llvm/ADT/APInt.h" 66 #include "llvm/ADT/APSInt.h" 67 #include "llvm/ADT/ArrayRef.h" 68 #include "llvm/ADT/DenseMap.h" 69 #include "llvm/ADT/FoldingSet.h" 70 #include "llvm/ADT/None.h" 71 #include "llvm/ADT/Optional.h" 72 #include "llvm/ADT/STLExtras.h" 73 #include "llvm/ADT/SmallBitVector.h" 74 #include "llvm/ADT/SmallPtrSet.h" 75 #include "llvm/ADT/SmallString.h" 76 #include "llvm/ADT/SmallVector.h" 77 #include "llvm/ADT/StringRef.h" 78 #include "llvm/ADT/StringSet.h" 79 #include "llvm/ADT/StringSwitch.h" 80 #include "llvm/ADT/Triple.h" 81 #include "llvm/Support/AtomicOrdering.h" 82 #include "llvm/Support/Casting.h" 83 #include "llvm/Support/Compiler.h" 84 #include "llvm/Support/ConvertUTF.h" 85 #include "llvm/Support/ErrorHandling.h" 86 #include "llvm/Support/Format.h" 87 #include "llvm/Support/Locale.h" 88 #include "llvm/Support/MathExtras.h" 89 #include "llvm/Support/SaveAndRestore.h" 90 #include "llvm/Support/raw_ostream.h" 91 #include <algorithm> 92 #include <bitset> 93 #include <cassert> 94 #include <cctype> 95 #include <cstddef> 96 #include <cstdint> 97 #include <functional> 98 #include <limits> 99 #include <string> 100 #include <tuple> 101 #include <utility> 102 103 using namespace clang; 104 using namespace sema; 105 106 SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 107 unsigned ByteNo) const { 108 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 109 Context.getTargetInfo()); 110 } 111 112 /// Checks that a call expression's argument count is the desired number. 113 /// This is useful when doing custom type-checking. Returns true on error. 114 static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) { 115 unsigned argCount = call->getNumArgs(); 116 if (argCount == desiredArgCount) return false; 117 118 if (argCount < desiredArgCount) 119 return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args) 120 << 0 /*function call*/ << desiredArgCount << argCount 121 << call->getSourceRange(); 122 123 // Highlight all the excess arguments. 124 SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(), 125 call->getArg(argCount - 1)->getEndLoc()); 126 127 return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args) 128 << 0 /*function call*/ << desiredArgCount << argCount 129 << call->getArg(1)->getSourceRange(); 130 } 131 132 /// Check that the first argument to __builtin_annotation is an integer 133 /// and the second argument is a non-wide string literal. 134 static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 135 if (checkArgCount(S, TheCall, 2)) 136 return true; 137 138 // First argument should be an integer. 139 Expr *ValArg = TheCall->getArg(0); 140 QualType Ty = ValArg->getType(); 141 if (!Ty->isIntegerType()) { 142 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 143 << ValArg->getSourceRange(); 144 return true; 145 } 146 147 // Second argument should be a constant string. 148 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 149 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 150 if (!Literal || !Literal->isAscii()) { 151 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 152 << StrArg->getSourceRange(); 153 return true; 154 } 155 156 TheCall->setType(Ty); 157 return false; 158 } 159 160 static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 161 // We need at least one argument. 162 if (TheCall->getNumArgs() < 1) { 163 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 164 << 0 << 1 << TheCall->getNumArgs() 165 << TheCall->getCallee()->getSourceRange(); 166 return true; 167 } 168 169 // All arguments should be wide string literals. 170 for (Expr *Arg : TheCall->arguments()) { 171 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 172 if (!Literal || !Literal->isWide()) { 173 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 174 << Arg->getSourceRange(); 175 return true; 176 } 177 } 178 179 return false; 180 } 181 182 /// Check that the argument to __builtin_addressof is a glvalue, and set the 183 /// result type to the corresponding pointer type. 184 static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 185 if (checkArgCount(S, TheCall, 1)) 186 return true; 187 188 ExprResult Arg(TheCall->getArg(0)); 189 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 190 if (ResultType.isNull()) 191 return true; 192 193 TheCall->setArg(0, Arg.get()); 194 TheCall->setType(ResultType); 195 return false; 196 } 197 198 /// Check the number of arguments and set the result type to 199 /// the argument type. 200 static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) { 201 if (checkArgCount(S, TheCall, 1)) 202 return true; 203 204 TheCall->setType(TheCall->getArg(0)->getType()); 205 return false; 206 } 207 208 /// Check that the value argument for __builtin_is_aligned(value, alignment) and 209 /// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer 210 /// type (but not a function pointer) and that the alignment is a power-of-two. 211 static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) { 212 if (checkArgCount(S, TheCall, 2)) 213 return true; 214 215 clang::Expr *Source = TheCall->getArg(0); 216 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned; 217 218 auto IsValidIntegerType = [](QualType Ty) { 219 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType(); 220 }; 221 QualType SrcTy = Source->getType(); 222 // We should also be able to use it with arrays (but not functions!). 223 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) { 224 SrcTy = S.Context.getDecayedType(SrcTy); 225 } 226 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) || 227 SrcTy->isFunctionPointerType()) { 228 // FIXME: this is not quite the right error message since we don't allow 229 // floating point types, or member pointers. 230 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand) 231 << SrcTy; 232 return true; 233 } 234 235 clang::Expr *AlignOp = TheCall->getArg(1); 236 if (!IsValidIntegerType(AlignOp->getType())) { 237 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int) 238 << AlignOp->getType(); 239 return true; 240 } 241 Expr::EvalResult AlignResult; 242 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1; 243 // We can't check validity of alignment if it is value dependent. 244 if (!AlignOp->isValueDependent() && 245 AlignOp->EvaluateAsInt(AlignResult, S.Context, 246 Expr::SE_AllowSideEffects)) { 247 llvm::APSInt AlignValue = AlignResult.Val.getInt(); 248 llvm::APSInt MaxValue( 249 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits)); 250 if (AlignValue < 1) { 251 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1; 252 return true; 253 } 254 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) { 255 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big) 256 << toString(MaxValue, 10); 257 return true; 258 } 259 if (!AlignValue.isPowerOf2()) { 260 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two); 261 return true; 262 } 263 if (AlignValue == 1) { 264 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless) 265 << IsBooleanAlignBuiltin; 266 } 267 } 268 269 ExprResult SrcArg = S.PerformCopyInitialization( 270 InitializedEntity::InitializeParameter(S.Context, SrcTy, false), 271 SourceLocation(), Source); 272 if (SrcArg.isInvalid()) 273 return true; 274 TheCall->setArg(0, SrcArg.get()); 275 ExprResult AlignArg = 276 S.PerformCopyInitialization(InitializedEntity::InitializeParameter( 277 S.Context, AlignOp->getType(), false), 278 SourceLocation(), AlignOp); 279 if (AlignArg.isInvalid()) 280 return true; 281 TheCall->setArg(1, AlignArg.get()); 282 // For align_up/align_down, the return type is the same as the (potentially 283 // decayed) argument type including qualifiers. For is_aligned(), the result 284 // is always bool. 285 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy); 286 return false; 287 } 288 289 static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall, 290 unsigned BuiltinID) { 291 if (checkArgCount(S, TheCall, 3)) 292 return true; 293 294 // First two arguments should be integers. 295 for (unsigned I = 0; I < 2; ++I) { 296 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I)); 297 if (Arg.isInvalid()) return true; 298 TheCall->setArg(I, Arg.get()); 299 300 QualType Ty = Arg.get()->getType(); 301 if (!Ty->isIntegerType()) { 302 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 303 << Ty << Arg.get()->getSourceRange(); 304 return true; 305 } 306 } 307 308 // Third argument should be a pointer to a non-const integer. 309 // IRGen correctly handles volatile, restrict, and address spaces, and 310 // the other qualifiers aren't possible. 311 { 312 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2)); 313 if (Arg.isInvalid()) return true; 314 TheCall->setArg(2, Arg.get()); 315 316 QualType Ty = Arg.get()->getType(); 317 const auto *PtrTy = Ty->getAs<PointerType>(); 318 if (!PtrTy || 319 !PtrTy->getPointeeType()->isIntegerType() || 320 PtrTy->getPointeeType().isConstQualified()) { 321 S.Diag(Arg.get()->getBeginLoc(), 322 diag::err_overflow_builtin_must_be_ptr_int) 323 << Ty << Arg.get()->getSourceRange(); 324 return true; 325 } 326 } 327 328 // Disallow signed ExtIntType args larger than 128 bits to mul function until 329 // we improve backend support. 330 if (BuiltinID == Builtin::BI__builtin_mul_overflow) { 331 for (unsigned I = 0; I < 3; ++I) { 332 const auto Arg = TheCall->getArg(I); 333 // Third argument will be a pointer. 334 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType(); 335 if (Ty->isExtIntType() && Ty->isSignedIntegerType() && 336 S.getASTContext().getIntWidth(Ty) > 128) 337 return S.Diag(Arg->getBeginLoc(), 338 diag::err_overflow_builtin_ext_int_max_size) 339 << 128; 340 } 341 } 342 343 return false; 344 } 345 346 static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 347 if (checkArgCount(S, BuiltinCall, 2)) 348 return true; 349 350 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 351 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 352 Expr *Call = BuiltinCall->getArg(0); 353 Expr *Chain = BuiltinCall->getArg(1); 354 355 if (Call->getStmtClass() != Stmt::CallExprClass) { 356 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 357 << Call->getSourceRange(); 358 return true; 359 } 360 361 auto CE = cast<CallExpr>(Call); 362 if (CE->getCallee()->getType()->isBlockPointerType()) { 363 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 364 << Call->getSourceRange(); 365 return true; 366 } 367 368 const Decl *TargetDecl = CE->getCalleeDecl(); 369 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 370 if (FD->getBuiltinID()) { 371 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 372 << Call->getSourceRange(); 373 return true; 374 } 375 376 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 377 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 378 << Call->getSourceRange(); 379 return true; 380 } 381 382 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 383 if (ChainResult.isInvalid()) 384 return true; 385 if (!ChainResult.get()->getType()->isPointerType()) { 386 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 387 << Chain->getSourceRange(); 388 return true; 389 } 390 391 QualType ReturnTy = CE->getCallReturnType(S.Context); 392 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 393 QualType BuiltinTy = S.Context.getFunctionType( 394 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 395 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 396 397 Builtin = 398 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 399 400 BuiltinCall->setType(CE->getType()); 401 BuiltinCall->setValueKind(CE->getValueKind()); 402 BuiltinCall->setObjectKind(CE->getObjectKind()); 403 BuiltinCall->setCallee(Builtin); 404 BuiltinCall->setArg(1, ChainResult.get()); 405 406 return false; 407 } 408 409 namespace { 410 411 class EstimateSizeFormatHandler 412 : public analyze_format_string::FormatStringHandler { 413 size_t Size; 414 415 public: 416 EstimateSizeFormatHandler(StringRef Format) 417 : Size(std::min(Format.find(0), Format.size()) + 418 1 /* null byte always written by sprintf */) {} 419 420 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 421 const char *, unsigned SpecifierLen) override { 422 423 const size_t FieldWidth = computeFieldWidth(FS); 424 const size_t Precision = computePrecision(FS); 425 426 // The actual format. 427 switch (FS.getConversionSpecifier().getKind()) { 428 // Just a char. 429 case analyze_format_string::ConversionSpecifier::cArg: 430 case analyze_format_string::ConversionSpecifier::CArg: 431 Size += std::max(FieldWidth, (size_t)1); 432 break; 433 // Just an integer. 434 case analyze_format_string::ConversionSpecifier::dArg: 435 case analyze_format_string::ConversionSpecifier::DArg: 436 case analyze_format_string::ConversionSpecifier::iArg: 437 case analyze_format_string::ConversionSpecifier::oArg: 438 case analyze_format_string::ConversionSpecifier::OArg: 439 case analyze_format_string::ConversionSpecifier::uArg: 440 case analyze_format_string::ConversionSpecifier::UArg: 441 case analyze_format_string::ConversionSpecifier::xArg: 442 case analyze_format_string::ConversionSpecifier::XArg: 443 Size += std::max(FieldWidth, Precision); 444 break; 445 446 // %g style conversion switches between %f or %e style dynamically. 447 // %f always takes less space, so default to it. 448 case analyze_format_string::ConversionSpecifier::gArg: 449 case analyze_format_string::ConversionSpecifier::GArg: 450 451 // Floating point number in the form '[+]ddd.ddd'. 452 case analyze_format_string::ConversionSpecifier::fArg: 453 case analyze_format_string::ConversionSpecifier::FArg: 454 Size += std::max(FieldWidth, 1 /* integer part */ + 455 (Precision ? 1 + Precision 456 : 0) /* period + decimal */); 457 break; 458 459 // Floating point number in the form '[-]d.ddde[+-]dd'. 460 case analyze_format_string::ConversionSpecifier::eArg: 461 case analyze_format_string::ConversionSpecifier::EArg: 462 Size += 463 std::max(FieldWidth, 464 1 /* integer part */ + 465 (Precision ? 1 + Precision : 0) /* period + decimal */ + 466 1 /* e or E letter */ + 2 /* exponent */); 467 break; 468 469 // Floating point number in the form '[-]0xh.hhhhp±dd'. 470 case analyze_format_string::ConversionSpecifier::aArg: 471 case analyze_format_string::ConversionSpecifier::AArg: 472 Size += 473 std::max(FieldWidth, 474 2 /* 0x */ + 1 /* integer part */ + 475 (Precision ? 1 + Precision : 0) /* period + decimal */ + 476 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */); 477 break; 478 479 // Just a string. 480 case analyze_format_string::ConversionSpecifier::sArg: 481 case analyze_format_string::ConversionSpecifier::SArg: 482 Size += FieldWidth; 483 break; 484 485 // Just a pointer in the form '0xddd'. 486 case analyze_format_string::ConversionSpecifier::pArg: 487 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision); 488 break; 489 490 // A plain percent. 491 case analyze_format_string::ConversionSpecifier::PercentArg: 492 Size += 1; 493 break; 494 495 default: 496 break; 497 } 498 499 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix(); 500 501 if (FS.hasAlternativeForm()) { 502 switch (FS.getConversionSpecifier().getKind()) { 503 default: 504 break; 505 // Force a leading '0'. 506 case analyze_format_string::ConversionSpecifier::oArg: 507 Size += 1; 508 break; 509 // Force a leading '0x'. 510 case analyze_format_string::ConversionSpecifier::xArg: 511 case analyze_format_string::ConversionSpecifier::XArg: 512 Size += 2; 513 break; 514 // Force a period '.' before decimal, even if precision is 0. 515 case analyze_format_string::ConversionSpecifier::aArg: 516 case analyze_format_string::ConversionSpecifier::AArg: 517 case analyze_format_string::ConversionSpecifier::eArg: 518 case analyze_format_string::ConversionSpecifier::EArg: 519 case analyze_format_string::ConversionSpecifier::fArg: 520 case analyze_format_string::ConversionSpecifier::FArg: 521 case analyze_format_string::ConversionSpecifier::gArg: 522 case analyze_format_string::ConversionSpecifier::GArg: 523 Size += (Precision ? 0 : 1); 524 break; 525 } 526 } 527 assert(SpecifierLen <= Size && "no underflow"); 528 Size -= SpecifierLen; 529 return true; 530 } 531 532 size_t getSizeLowerBound() const { return Size; } 533 534 private: 535 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) { 536 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth(); 537 size_t FieldWidth = 0; 538 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant) 539 FieldWidth = FW.getConstantAmount(); 540 return FieldWidth; 541 } 542 543 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) { 544 const analyze_format_string::OptionalAmount &FW = FS.getPrecision(); 545 size_t Precision = 0; 546 547 // See man 3 printf for default precision value based on the specifier. 548 switch (FW.getHowSpecified()) { 549 case analyze_format_string::OptionalAmount::NotSpecified: 550 switch (FS.getConversionSpecifier().getKind()) { 551 default: 552 break; 553 case analyze_format_string::ConversionSpecifier::dArg: // %d 554 case analyze_format_string::ConversionSpecifier::DArg: // %D 555 case analyze_format_string::ConversionSpecifier::iArg: // %i 556 Precision = 1; 557 break; 558 case analyze_format_string::ConversionSpecifier::oArg: // %d 559 case analyze_format_string::ConversionSpecifier::OArg: // %D 560 case analyze_format_string::ConversionSpecifier::uArg: // %d 561 case analyze_format_string::ConversionSpecifier::UArg: // %D 562 case analyze_format_string::ConversionSpecifier::xArg: // %d 563 case analyze_format_string::ConversionSpecifier::XArg: // %D 564 Precision = 1; 565 break; 566 case analyze_format_string::ConversionSpecifier::fArg: // %f 567 case analyze_format_string::ConversionSpecifier::FArg: // %F 568 case analyze_format_string::ConversionSpecifier::eArg: // %e 569 case analyze_format_string::ConversionSpecifier::EArg: // %E 570 case analyze_format_string::ConversionSpecifier::gArg: // %g 571 case analyze_format_string::ConversionSpecifier::GArg: // %G 572 Precision = 6; 573 break; 574 case analyze_format_string::ConversionSpecifier::pArg: // %d 575 Precision = 1; 576 break; 577 } 578 break; 579 case analyze_format_string::OptionalAmount::Constant: 580 Precision = FW.getConstantAmount(); 581 break; 582 default: 583 break; 584 } 585 return Precision; 586 } 587 }; 588 589 } // namespace 590 591 void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, 592 CallExpr *TheCall) { 593 if (TheCall->isValueDependent() || TheCall->isTypeDependent() || 594 isConstantEvaluated()) 595 return; 596 597 unsigned BuiltinID = FD->getBuiltinID(/*ConsiderWrappers=*/true); 598 if (!BuiltinID) 599 return; 600 601 const TargetInfo &TI = getASTContext().getTargetInfo(); 602 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType()); 603 604 auto ComputeExplicitObjectSizeArgument = 605 [&](unsigned Index) -> Optional<llvm::APSInt> { 606 Expr::EvalResult Result; 607 Expr *SizeArg = TheCall->getArg(Index); 608 if (!SizeArg->EvaluateAsInt(Result, getASTContext())) 609 return llvm::None; 610 return Result.Val.getInt(); 611 }; 612 613 auto ComputeSizeArgument = [&](unsigned Index) -> Optional<llvm::APSInt> { 614 // If the parameter has a pass_object_size attribute, then we should use its 615 // (potentially) more strict checking mode. Otherwise, conservatively assume 616 // type 0. 617 int BOSType = 0; 618 if (const auto *POS = 619 FD->getParamDecl(Index)->getAttr<PassObjectSizeAttr>()) 620 BOSType = POS->getType(); 621 622 const Expr *ObjArg = TheCall->getArg(Index); 623 uint64_t Result; 624 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType)) 625 return llvm::None; 626 627 // Get the object size in the target's size_t width. 628 return llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth); 629 }; 630 631 auto ComputeStrLenArgument = [&](unsigned Index) -> Optional<llvm::APSInt> { 632 Expr *ObjArg = TheCall->getArg(Index); 633 uint64_t Result; 634 if (!ObjArg->tryEvaluateStrLen(Result, getASTContext())) 635 return llvm::None; 636 // Add 1 for null byte. 637 return llvm::APSInt::getUnsigned(Result + 1).extOrTrunc(SizeTypeWidth); 638 }; 639 640 Optional<llvm::APSInt> SourceSize; 641 Optional<llvm::APSInt> DestinationSize; 642 unsigned DiagID = 0; 643 bool IsChkVariant = false; 644 645 switch (BuiltinID) { 646 default: 647 return; 648 case Builtin::BI__builtin_strcpy: 649 case Builtin::BIstrcpy: { 650 DiagID = diag::warn_fortify_strlen_overflow; 651 SourceSize = ComputeStrLenArgument(1); 652 DestinationSize = ComputeSizeArgument(0); 653 break; 654 } 655 656 case Builtin::BI__builtin___strcpy_chk: { 657 DiagID = diag::warn_fortify_strlen_overflow; 658 SourceSize = ComputeStrLenArgument(1); 659 DestinationSize = ComputeExplicitObjectSizeArgument(2); 660 IsChkVariant = true; 661 break; 662 } 663 664 case Builtin::BIsprintf: 665 case Builtin::BI__builtin___sprintf_chk: { 666 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3; 667 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 668 669 if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) { 670 671 if (!Format->isAscii() && !Format->isUTF8()) 672 return; 673 674 StringRef FormatStrRef = Format->getString(); 675 EstimateSizeFormatHandler H(FormatStrRef); 676 const char *FormatBytes = FormatStrRef.data(); 677 const ConstantArrayType *T = 678 Context.getAsConstantArrayType(Format->getType()); 679 assert(T && "String literal not of constant array type!"); 680 size_t TypeSize = T->getSize().getZExtValue(); 681 682 // In case there's a null byte somewhere. 683 size_t StrLen = 684 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 685 if (!analyze_format_string::ParsePrintfString( 686 H, FormatBytes, FormatBytes + StrLen, getLangOpts(), 687 Context.getTargetInfo(), false)) { 688 DiagID = diag::warn_fortify_source_format_overflow; 689 SourceSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound()) 690 .extOrTrunc(SizeTypeWidth); 691 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) { 692 DestinationSize = ComputeExplicitObjectSizeArgument(2); 693 IsChkVariant = true; 694 } else { 695 DestinationSize = ComputeSizeArgument(0); 696 } 697 break; 698 } 699 } 700 return; 701 } 702 case Builtin::BI__builtin___memcpy_chk: 703 case Builtin::BI__builtin___memmove_chk: 704 case Builtin::BI__builtin___memset_chk: 705 case Builtin::BI__builtin___strlcat_chk: 706 case Builtin::BI__builtin___strlcpy_chk: 707 case Builtin::BI__builtin___strncat_chk: 708 case Builtin::BI__builtin___strncpy_chk: 709 case Builtin::BI__builtin___stpncpy_chk: 710 case Builtin::BI__builtin___memccpy_chk: 711 case Builtin::BI__builtin___mempcpy_chk: { 712 DiagID = diag::warn_builtin_chk_overflow; 713 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 2); 714 DestinationSize = 715 ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 716 IsChkVariant = true; 717 break; 718 } 719 720 case Builtin::BI__builtin___snprintf_chk: 721 case Builtin::BI__builtin___vsnprintf_chk: { 722 DiagID = diag::warn_builtin_chk_overflow; 723 SourceSize = ComputeExplicitObjectSizeArgument(1); 724 DestinationSize = ComputeExplicitObjectSizeArgument(3); 725 IsChkVariant = true; 726 break; 727 } 728 729 case Builtin::BIstrncat: 730 case Builtin::BI__builtin_strncat: 731 case Builtin::BIstrncpy: 732 case Builtin::BI__builtin_strncpy: 733 case Builtin::BIstpncpy: 734 case Builtin::BI__builtin_stpncpy: { 735 // Whether these functions overflow depends on the runtime strlen of the 736 // string, not just the buffer size, so emitting the "always overflow" 737 // diagnostic isn't quite right. We should still diagnose passing a buffer 738 // size larger than the destination buffer though; this is a runtime abort 739 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise. 740 DiagID = diag::warn_fortify_source_size_mismatch; 741 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 742 DestinationSize = ComputeSizeArgument(0); 743 break; 744 } 745 746 case Builtin::BImemcpy: 747 case Builtin::BI__builtin_memcpy: 748 case Builtin::BImemmove: 749 case Builtin::BI__builtin_memmove: 750 case Builtin::BImemset: 751 case Builtin::BI__builtin_memset: 752 case Builtin::BImempcpy: 753 case Builtin::BI__builtin_mempcpy: { 754 DiagID = diag::warn_fortify_source_overflow; 755 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 756 DestinationSize = ComputeSizeArgument(0); 757 break; 758 } 759 case Builtin::BIsnprintf: 760 case Builtin::BI__builtin_snprintf: 761 case Builtin::BIvsnprintf: 762 case Builtin::BI__builtin_vsnprintf: { 763 DiagID = diag::warn_fortify_source_size_mismatch; 764 SourceSize = ComputeExplicitObjectSizeArgument(1); 765 DestinationSize = ComputeSizeArgument(0); 766 break; 767 } 768 } 769 770 if (!SourceSize || !DestinationSize || 771 SourceSize.getValue().ule(DestinationSize.getValue())) 772 return; 773 774 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID); 775 // Skim off the details of whichever builtin was called to produce a better 776 // diagnostic, as it's unlikley that the user wrote the __builtin explicitly. 777 if (IsChkVariant) { 778 FunctionName = FunctionName.drop_front(std::strlen("__builtin___")); 779 FunctionName = FunctionName.drop_back(std::strlen("_chk")); 780 } else if (FunctionName.startswith("__builtin_")) { 781 FunctionName = FunctionName.drop_front(std::strlen("__builtin_")); 782 } 783 784 SmallString<16> DestinationStr; 785 SmallString<16> SourceStr; 786 DestinationSize->toString(DestinationStr, /*Radix=*/10); 787 SourceSize->toString(SourceStr, /*Radix=*/10); 788 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 789 PDiag(DiagID) 790 << FunctionName << DestinationStr << SourceStr); 791 } 792 793 static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 794 Scope::ScopeFlags NeededScopeFlags, 795 unsigned DiagID) { 796 // Scopes aren't available during instantiation. Fortunately, builtin 797 // functions cannot be template args so they cannot be formed through template 798 // instantiation. Therefore checking once during the parse is sufficient. 799 if (SemaRef.inTemplateInstantiation()) 800 return false; 801 802 Scope *S = SemaRef.getCurScope(); 803 while (S && !S->isSEHExceptScope()) 804 S = S->getParent(); 805 if (!S || !(S->getFlags() & NeededScopeFlags)) { 806 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 807 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 808 << DRE->getDecl()->getIdentifier(); 809 return true; 810 } 811 812 return false; 813 } 814 815 static inline bool isBlockPointer(Expr *Arg) { 816 return Arg->getType()->isBlockPointerType(); 817 } 818 819 /// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 820 /// void*, which is a requirement of device side enqueue. 821 static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 822 const BlockPointerType *BPT = 823 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 824 ArrayRef<QualType> Params = 825 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes(); 826 unsigned ArgCounter = 0; 827 bool IllegalParams = false; 828 // Iterate through the block parameters until either one is found that is not 829 // a local void*, or the block is valid. 830 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 831 I != E; ++I, ++ArgCounter) { 832 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 833 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 834 LangAS::opencl_local) { 835 // Get the location of the error. If a block literal has been passed 836 // (BlockExpr) then we can point straight to the offending argument, 837 // else we just point to the variable reference. 838 SourceLocation ErrorLoc; 839 if (isa<BlockExpr>(BlockArg)) { 840 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 841 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 842 } else if (isa<DeclRefExpr>(BlockArg)) { 843 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 844 } 845 S.Diag(ErrorLoc, 846 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 847 IllegalParams = true; 848 } 849 } 850 851 return IllegalParams; 852 } 853 854 static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 855 if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts())) { 856 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 857 << 1 << Call->getDirectCallee() << "cl_khr_subgroups"; 858 return true; 859 } 860 return false; 861 } 862 863 static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 864 if (checkArgCount(S, TheCall, 2)) 865 return true; 866 867 if (checkOpenCLSubgroupExt(S, TheCall)) 868 return true; 869 870 // First argument is an ndrange_t type. 871 Expr *NDRangeArg = TheCall->getArg(0); 872 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 873 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 874 << TheCall->getDirectCallee() << "'ndrange_t'"; 875 return true; 876 } 877 878 Expr *BlockArg = TheCall->getArg(1); 879 if (!isBlockPointer(BlockArg)) { 880 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 881 << TheCall->getDirectCallee() << "block"; 882 return true; 883 } 884 return checkOpenCLBlockArgs(S, BlockArg); 885 } 886 887 /// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 888 /// get_kernel_work_group_size 889 /// and get_kernel_preferred_work_group_size_multiple builtin functions. 890 static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 891 if (checkArgCount(S, TheCall, 1)) 892 return true; 893 894 Expr *BlockArg = TheCall->getArg(0); 895 if (!isBlockPointer(BlockArg)) { 896 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 897 << TheCall->getDirectCallee() << "block"; 898 return true; 899 } 900 return checkOpenCLBlockArgs(S, BlockArg); 901 } 902 903 /// Diagnose integer type and any valid implicit conversion to it. 904 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 905 const QualType &IntType); 906 907 static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 908 unsigned Start, unsigned End) { 909 bool IllegalParams = false; 910 for (unsigned I = Start; I <= End; ++I) 911 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 912 S.Context.getSizeType()); 913 return IllegalParams; 914 } 915 916 /// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 917 /// 'local void*' parameter of passed block. 918 static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 919 Expr *BlockArg, 920 unsigned NumNonVarArgs) { 921 const BlockPointerType *BPT = 922 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 923 unsigned NumBlockParams = 924 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams(); 925 unsigned TotalNumArgs = TheCall->getNumArgs(); 926 927 // For each argument passed to the block, a corresponding uint needs to 928 // be passed to describe the size of the local memory. 929 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 930 S.Diag(TheCall->getBeginLoc(), 931 diag::err_opencl_enqueue_kernel_local_size_args); 932 return true; 933 } 934 935 // Check that the sizes of the local memory are specified by integers. 936 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 937 TotalNumArgs - 1); 938 } 939 940 /// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 941 /// overload formats specified in Table 6.13.17.1. 942 /// int enqueue_kernel(queue_t queue, 943 /// kernel_enqueue_flags_t flags, 944 /// const ndrange_t ndrange, 945 /// void (^block)(void)) 946 /// int enqueue_kernel(queue_t queue, 947 /// kernel_enqueue_flags_t flags, 948 /// const ndrange_t ndrange, 949 /// uint num_events_in_wait_list, 950 /// clk_event_t *event_wait_list, 951 /// clk_event_t *event_ret, 952 /// void (^block)(void)) 953 /// int enqueue_kernel(queue_t queue, 954 /// kernel_enqueue_flags_t flags, 955 /// const ndrange_t ndrange, 956 /// void (^block)(local void*, ...), 957 /// uint size0, ...) 958 /// int enqueue_kernel(queue_t queue, 959 /// kernel_enqueue_flags_t flags, 960 /// const ndrange_t ndrange, 961 /// uint num_events_in_wait_list, 962 /// clk_event_t *event_wait_list, 963 /// clk_event_t *event_ret, 964 /// void (^block)(local void*, ...), 965 /// uint size0, ...) 966 static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 967 unsigned NumArgs = TheCall->getNumArgs(); 968 969 if (NumArgs < 4) { 970 S.Diag(TheCall->getBeginLoc(), 971 diag::err_typecheck_call_too_few_args_at_least) 972 << 0 << 4 << NumArgs; 973 return true; 974 } 975 976 Expr *Arg0 = TheCall->getArg(0); 977 Expr *Arg1 = TheCall->getArg(1); 978 Expr *Arg2 = TheCall->getArg(2); 979 Expr *Arg3 = TheCall->getArg(3); 980 981 // First argument always needs to be a queue_t type. 982 if (!Arg0->getType()->isQueueT()) { 983 S.Diag(TheCall->getArg(0)->getBeginLoc(), 984 diag::err_opencl_builtin_expected_type) 985 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 986 return true; 987 } 988 989 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 990 if (!Arg1->getType()->isIntegerType()) { 991 S.Diag(TheCall->getArg(1)->getBeginLoc(), 992 diag::err_opencl_builtin_expected_type) 993 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 994 return true; 995 } 996 997 // Third argument is always an ndrange_t type. 998 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 999 S.Diag(TheCall->getArg(2)->getBeginLoc(), 1000 diag::err_opencl_builtin_expected_type) 1001 << TheCall->getDirectCallee() << "'ndrange_t'"; 1002 return true; 1003 } 1004 1005 // With four arguments, there is only one form that the function could be 1006 // called in: no events and no variable arguments. 1007 if (NumArgs == 4) { 1008 // check that the last argument is the right block type. 1009 if (!isBlockPointer(Arg3)) { 1010 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1011 << TheCall->getDirectCallee() << "block"; 1012 return true; 1013 } 1014 // we have a block type, check the prototype 1015 const BlockPointerType *BPT = 1016 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 1017 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) { 1018 S.Diag(Arg3->getBeginLoc(), 1019 diag::err_opencl_enqueue_kernel_blocks_no_args); 1020 return true; 1021 } 1022 return false; 1023 } 1024 // we can have block + varargs. 1025 if (isBlockPointer(Arg3)) 1026 return (checkOpenCLBlockArgs(S, Arg3) || 1027 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 1028 // last two cases with either exactly 7 args or 7 args and varargs. 1029 if (NumArgs >= 7) { 1030 // check common block argument. 1031 Expr *Arg6 = TheCall->getArg(6); 1032 if (!isBlockPointer(Arg6)) { 1033 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1034 << TheCall->getDirectCallee() << "block"; 1035 return true; 1036 } 1037 if (checkOpenCLBlockArgs(S, Arg6)) 1038 return true; 1039 1040 // Forth argument has to be any integer type. 1041 if (!Arg3->getType()->isIntegerType()) { 1042 S.Diag(TheCall->getArg(3)->getBeginLoc(), 1043 diag::err_opencl_builtin_expected_type) 1044 << TheCall->getDirectCallee() << "integer"; 1045 return true; 1046 } 1047 // check remaining common arguments. 1048 Expr *Arg4 = TheCall->getArg(4); 1049 Expr *Arg5 = TheCall->getArg(5); 1050 1051 // Fifth argument is always passed as a pointer to clk_event_t. 1052 if (!Arg4->isNullPointerConstant(S.Context, 1053 Expr::NPC_ValueDependentIsNotNull) && 1054 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 1055 S.Diag(TheCall->getArg(4)->getBeginLoc(), 1056 diag::err_opencl_builtin_expected_type) 1057 << TheCall->getDirectCallee() 1058 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1059 return true; 1060 } 1061 1062 // Sixth argument is always passed as a pointer to clk_event_t. 1063 if (!Arg5->isNullPointerConstant(S.Context, 1064 Expr::NPC_ValueDependentIsNotNull) && 1065 !(Arg5->getType()->isPointerType() && 1066 Arg5->getType()->getPointeeType()->isClkEventT())) { 1067 S.Diag(TheCall->getArg(5)->getBeginLoc(), 1068 diag::err_opencl_builtin_expected_type) 1069 << TheCall->getDirectCallee() 1070 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1071 return true; 1072 } 1073 1074 if (NumArgs == 7) 1075 return false; 1076 1077 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 1078 } 1079 1080 // None of the specific case has been detected, give generic error 1081 S.Diag(TheCall->getBeginLoc(), 1082 diag::err_opencl_enqueue_kernel_incorrect_args); 1083 return true; 1084 } 1085 1086 /// Returns OpenCL access qual. 1087 static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 1088 return D->getAttr<OpenCLAccessAttr>(); 1089 } 1090 1091 /// Returns true if pipe element type is different from the pointer. 1092 static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 1093 const Expr *Arg0 = Call->getArg(0); 1094 // First argument type should always be pipe. 1095 if (!Arg0->getType()->isPipeType()) { 1096 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1097 << Call->getDirectCallee() << Arg0->getSourceRange(); 1098 return true; 1099 } 1100 OpenCLAccessAttr *AccessQual = 1101 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 1102 // Validates the access qualifier is compatible with the call. 1103 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 1104 // read_only and write_only, and assumed to be read_only if no qualifier is 1105 // specified. 1106 switch (Call->getDirectCallee()->getBuiltinID()) { 1107 case Builtin::BIread_pipe: 1108 case Builtin::BIreserve_read_pipe: 1109 case Builtin::BIcommit_read_pipe: 1110 case Builtin::BIwork_group_reserve_read_pipe: 1111 case Builtin::BIsub_group_reserve_read_pipe: 1112 case Builtin::BIwork_group_commit_read_pipe: 1113 case Builtin::BIsub_group_commit_read_pipe: 1114 if (!(!AccessQual || AccessQual->isReadOnly())) { 1115 S.Diag(Arg0->getBeginLoc(), 1116 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1117 << "read_only" << Arg0->getSourceRange(); 1118 return true; 1119 } 1120 break; 1121 case Builtin::BIwrite_pipe: 1122 case Builtin::BIreserve_write_pipe: 1123 case Builtin::BIcommit_write_pipe: 1124 case Builtin::BIwork_group_reserve_write_pipe: 1125 case Builtin::BIsub_group_reserve_write_pipe: 1126 case Builtin::BIwork_group_commit_write_pipe: 1127 case Builtin::BIsub_group_commit_write_pipe: 1128 if (!(AccessQual && AccessQual->isWriteOnly())) { 1129 S.Diag(Arg0->getBeginLoc(), 1130 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1131 << "write_only" << Arg0->getSourceRange(); 1132 return true; 1133 } 1134 break; 1135 default: 1136 break; 1137 } 1138 return false; 1139 } 1140 1141 /// Returns true if pipe element type is different from the pointer. 1142 static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 1143 const Expr *Arg0 = Call->getArg(0); 1144 const Expr *ArgIdx = Call->getArg(Idx); 1145 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 1146 const QualType EltTy = PipeTy->getElementType(); 1147 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 1148 // The Idx argument should be a pointer and the type of the pointer and 1149 // the type of pipe element should also be the same. 1150 if (!ArgTy || 1151 !S.Context.hasSameType( 1152 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 1153 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1154 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 1155 << ArgIdx->getType() << ArgIdx->getSourceRange(); 1156 return true; 1157 } 1158 return false; 1159 } 1160 1161 // Performs semantic analysis for the read/write_pipe call. 1162 // \param S Reference to the semantic analyzer. 1163 // \param Call A pointer to the builtin call. 1164 // \return True if a semantic error has been found, false otherwise. 1165 static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 1166 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 1167 // functions have two forms. 1168 switch (Call->getNumArgs()) { 1169 case 2: 1170 if (checkOpenCLPipeArg(S, Call)) 1171 return true; 1172 // The call with 2 arguments should be 1173 // read/write_pipe(pipe T, T*). 1174 // Check packet type T. 1175 if (checkOpenCLPipePacketType(S, Call, 1)) 1176 return true; 1177 break; 1178 1179 case 4: { 1180 if (checkOpenCLPipeArg(S, Call)) 1181 return true; 1182 // The call with 4 arguments should be 1183 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 1184 // Check reserve_id_t. 1185 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1186 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1187 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1188 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1189 return true; 1190 } 1191 1192 // Check the index. 1193 const Expr *Arg2 = Call->getArg(2); 1194 if (!Arg2->getType()->isIntegerType() && 1195 !Arg2->getType()->isUnsignedIntegerType()) { 1196 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1197 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1198 << Arg2->getType() << Arg2->getSourceRange(); 1199 return true; 1200 } 1201 1202 // Check packet type T. 1203 if (checkOpenCLPipePacketType(S, Call, 3)) 1204 return true; 1205 } break; 1206 default: 1207 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 1208 << Call->getDirectCallee() << Call->getSourceRange(); 1209 return true; 1210 } 1211 1212 return false; 1213 } 1214 1215 // Performs a semantic analysis on the {work_group_/sub_group_ 1216 // /_}reserve_{read/write}_pipe 1217 // \param S Reference to the semantic analyzer. 1218 // \param Call The call to the builtin function to be analyzed. 1219 // \return True if a semantic error was found, false otherwise. 1220 static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 1221 if (checkArgCount(S, Call, 2)) 1222 return true; 1223 1224 if (checkOpenCLPipeArg(S, Call)) 1225 return true; 1226 1227 // Check the reserve size. 1228 if (!Call->getArg(1)->getType()->isIntegerType() && 1229 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 1230 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1231 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1232 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1233 return true; 1234 } 1235 1236 // Since return type of reserve_read/write_pipe built-in function is 1237 // reserve_id_t, which is not defined in the builtin def file , we used int 1238 // as return type and need to override the return type of these functions. 1239 Call->setType(S.Context.OCLReserveIDTy); 1240 1241 return false; 1242 } 1243 1244 // Performs a semantic analysis on {work_group_/sub_group_ 1245 // /_}commit_{read/write}_pipe 1246 // \param S Reference to the semantic analyzer. 1247 // \param Call The call to the builtin function to be analyzed. 1248 // \return True if a semantic error was found, false otherwise. 1249 static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 1250 if (checkArgCount(S, Call, 2)) 1251 return true; 1252 1253 if (checkOpenCLPipeArg(S, Call)) 1254 return true; 1255 1256 // Check reserve_id_t. 1257 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1258 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1259 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1260 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1261 return true; 1262 } 1263 1264 return false; 1265 } 1266 1267 // Performs a semantic analysis on the call to built-in Pipe 1268 // Query Functions. 1269 // \param S Reference to the semantic analyzer. 1270 // \param Call The call to the builtin function to be analyzed. 1271 // \return True if a semantic error was found, false otherwise. 1272 static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 1273 if (checkArgCount(S, Call, 1)) 1274 return true; 1275 1276 if (!Call->getArg(0)->getType()->isPipeType()) { 1277 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1278 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 1279 return true; 1280 } 1281 1282 return false; 1283 } 1284 1285 // OpenCL v2.0 s6.13.9 - Address space qualifier functions. 1286 // Performs semantic analysis for the to_global/local/private call. 1287 // \param S Reference to the semantic analyzer. 1288 // \param BuiltinID ID of the builtin function. 1289 // \param Call A pointer to the builtin call. 1290 // \return True if a semantic error has been found, false otherwise. 1291 static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 1292 CallExpr *Call) { 1293 if (checkArgCount(S, Call, 1)) 1294 return true; 1295 1296 auto RT = Call->getArg(0)->getType(); 1297 if (!RT->isPointerType() || RT->getPointeeType() 1298 .getAddressSpace() == LangAS::opencl_constant) { 1299 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 1300 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 1301 return true; 1302 } 1303 1304 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 1305 S.Diag(Call->getArg(0)->getBeginLoc(), 1306 diag::warn_opencl_generic_address_space_arg) 1307 << Call->getDirectCallee()->getNameInfo().getAsString() 1308 << Call->getArg(0)->getSourceRange(); 1309 } 1310 1311 RT = RT->getPointeeType(); 1312 auto Qual = RT.getQualifiers(); 1313 switch (BuiltinID) { 1314 case Builtin::BIto_global: 1315 Qual.setAddressSpace(LangAS::opencl_global); 1316 break; 1317 case Builtin::BIto_local: 1318 Qual.setAddressSpace(LangAS::opencl_local); 1319 break; 1320 case Builtin::BIto_private: 1321 Qual.setAddressSpace(LangAS::opencl_private); 1322 break; 1323 default: 1324 llvm_unreachable("Invalid builtin function"); 1325 } 1326 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 1327 RT.getUnqualifiedType(), Qual))); 1328 1329 return false; 1330 } 1331 1332 static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { 1333 if (checkArgCount(S, TheCall, 1)) 1334 return ExprError(); 1335 1336 // Compute __builtin_launder's parameter type from the argument. 1337 // The parameter type is: 1338 // * The type of the argument if it's not an array or function type, 1339 // Otherwise, 1340 // * The decayed argument type. 1341 QualType ParamTy = [&]() { 1342 QualType ArgTy = TheCall->getArg(0)->getType(); 1343 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe()) 1344 return S.Context.getPointerType(Ty->getElementType()); 1345 if (ArgTy->isFunctionType()) { 1346 return S.Context.getPointerType(ArgTy); 1347 } 1348 return ArgTy; 1349 }(); 1350 1351 TheCall->setType(ParamTy); 1352 1353 auto DiagSelect = [&]() -> llvm::Optional<unsigned> { 1354 if (!ParamTy->isPointerType()) 1355 return 0; 1356 if (ParamTy->isFunctionPointerType()) 1357 return 1; 1358 if (ParamTy->isVoidPointerType()) 1359 return 2; 1360 return llvm::Optional<unsigned>{}; 1361 }(); 1362 if (DiagSelect.hasValue()) { 1363 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) 1364 << DiagSelect.getValue() << TheCall->getSourceRange(); 1365 return ExprError(); 1366 } 1367 1368 // We either have an incomplete class type, or we have a class template 1369 // whose instantiation has not been forced. Example: 1370 // 1371 // template <class T> struct Foo { T value; }; 1372 // Foo<int> *p = nullptr; 1373 // auto *d = __builtin_launder(p); 1374 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(), 1375 diag::err_incomplete_type)) 1376 return ExprError(); 1377 1378 assert(ParamTy->getPointeeType()->isObjectType() && 1379 "Unhandled non-object pointer case"); 1380 1381 InitializedEntity Entity = 1382 InitializedEntity::InitializeParameter(S.Context, ParamTy, false); 1383 ExprResult Arg = 1384 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0)); 1385 if (Arg.isInvalid()) 1386 return ExprError(); 1387 TheCall->setArg(0, Arg.get()); 1388 1389 return TheCall; 1390 } 1391 1392 // Emit an error and return true if the current architecture is not in the list 1393 // of supported architectures. 1394 static bool 1395 CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1396 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 1397 llvm::Triple::ArchType CurArch = 1398 S.getASTContext().getTargetInfo().getTriple().getArch(); 1399 if (llvm::is_contained(SupportedArchs, CurArch)) 1400 return false; 1401 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1402 << TheCall->getSourceRange(); 1403 return true; 1404 } 1405 1406 static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr, 1407 SourceLocation CallSiteLoc); 1408 1409 bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 1410 CallExpr *TheCall) { 1411 switch (TI.getTriple().getArch()) { 1412 default: 1413 // Some builtins don't require additional checking, so just consider these 1414 // acceptable. 1415 return false; 1416 case llvm::Triple::arm: 1417 case llvm::Triple::armeb: 1418 case llvm::Triple::thumb: 1419 case llvm::Triple::thumbeb: 1420 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall); 1421 case llvm::Triple::aarch64: 1422 case llvm::Triple::aarch64_32: 1423 case llvm::Triple::aarch64_be: 1424 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall); 1425 case llvm::Triple::bpfeb: 1426 case llvm::Triple::bpfel: 1427 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall); 1428 case llvm::Triple::hexagon: 1429 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall); 1430 case llvm::Triple::mips: 1431 case llvm::Triple::mipsel: 1432 case llvm::Triple::mips64: 1433 case llvm::Triple::mips64el: 1434 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall); 1435 case llvm::Triple::systemz: 1436 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall); 1437 case llvm::Triple::x86: 1438 case llvm::Triple::x86_64: 1439 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall); 1440 case llvm::Triple::ppc: 1441 case llvm::Triple::ppcle: 1442 case llvm::Triple::ppc64: 1443 case llvm::Triple::ppc64le: 1444 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall); 1445 case llvm::Triple::amdgcn: 1446 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall); 1447 case llvm::Triple::riscv32: 1448 case llvm::Triple::riscv64: 1449 return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall); 1450 } 1451 } 1452 1453 ExprResult 1454 Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 1455 CallExpr *TheCall) { 1456 ExprResult TheCallResult(TheCall); 1457 1458 // Find out if any arguments are required to be integer constant expressions. 1459 unsigned ICEArguments = 0; 1460 ASTContext::GetBuiltinTypeError Error; 1461 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 1462 if (Error != ASTContext::GE_None) 1463 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 1464 1465 // If any arguments are required to be ICE's, check and diagnose. 1466 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 1467 // Skip arguments not required to be ICE's. 1468 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 1469 1470 llvm::APSInt Result; 1471 if (SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 1472 return true; 1473 ICEArguments &= ~(1 << ArgNo); 1474 } 1475 1476 switch (BuiltinID) { 1477 case Builtin::BI__builtin___CFStringMakeConstantString: 1478 assert(TheCall->getNumArgs() == 1 && 1479 "Wrong # arguments to builtin CFStringMakeConstantString"); 1480 if (CheckObjCString(TheCall->getArg(0))) 1481 return ExprError(); 1482 break; 1483 case Builtin::BI__builtin_ms_va_start: 1484 case Builtin::BI__builtin_stdarg_start: 1485 case Builtin::BI__builtin_va_start: 1486 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1487 return ExprError(); 1488 break; 1489 case Builtin::BI__va_start: { 1490 switch (Context.getTargetInfo().getTriple().getArch()) { 1491 case llvm::Triple::aarch64: 1492 case llvm::Triple::arm: 1493 case llvm::Triple::thumb: 1494 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 1495 return ExprError(); 1496 break; 1497 default: 1498 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 1499 return ExprError(); 1500 break; 1501 } 1502 break; 1503 } 1504 1505 // The acquire, release, and no fence variants are ARM and AArch64 only. 1506 case Builtin::BI_interlockedbittestandset_acq: 1507 case Builtin::BI_interlockedbittestandset_rel: 1508 case Builtin::BI_interlockedbittestandset_nf: 1509 case Builtin::BI_interlockedbittestandreset_acq: 1510 case Builtin::BI_interlockedbittestandreset_rel: 1511 case Builtin::BI_interlockedbittestandreset_nf: 1512 if (CheckBuiltinTargetSupport( 1513 *this, BuiltinID, TheCall, 1514 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 1515 return ExprError(); 1516 break; 1517 1518 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 1519 case Builtin::BI_bittest64: 1520 case Builtin::BI_bittestandcomplement64: 1521 case Builtin::BI_bittestandreset64: 1522 case Builtin::BI_bittestandset64: 1523 case Builtin::BI_interlockedbittestandreset64: 1524 case Builtin::BI_interlockedbittestandset64: 1525 if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall, 1526 {llvm::Triple::x86_64, llvm::Triple::arm, 1527 llvm::Triple::thumb, llvm::Triple::aarch64})) 1528 return ExprError(); 1529 break; 1530 1531 case Builtin::BI__builtin_isgreater: 1532 case Builtin::BI__builtin_isgreaterequal: 1533 case Builtin::BI__builtin_isless: 1534 case Builtin::BI__builtin_islessequal: 1535 case Builtin::BI__builtin_islessgreater: 1536 case Builtin::BI__builtin_isunordered: 1537 if (SemaBuiltinUnorderedCompare(TheCall)) 1538 return ExprError(); 1539 break; 1540 case Builtin::BI__builtin_fpclassify: 1541 if (SemaBuiltinFPClassification(TheCall, 6)) 1542 return ExprError(); 1543 break; 1544 case Builtin::BI__builtin_isfinite: 1545 case Builtin::BI__builtin_isinf: 1546 case Builtin::BI__builtin_isinf_sign: 1547 case Builtin::BI__builtin_isnan: 1548 case Builtin::BI__builtin_isnormal: 1549 case Builtin::BI__builtin_signbit: 1550 case Builtin::BI__builtin_signbitf: 1551 case Builtin::BI__builtin_signbitl: 1552 if (SemaBuiltinFPClassification(TheCall, 1)) 1553 return ExprError(); 1554 break; 1555 case Builtin::BI__builtin_shufflevector: 1556 return SemaBuiltinShuffleVector(TheCall); 1557 // TheCall will be freed by the smart pointer here, but that's fine, since 1558 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 1559 case Builtin::BI__builtin_prefetch: 1560 if (SemaBuiltinPrefetch(TheCall)) 1561 return ExprError(); 1562 break; 1563 case Builtin::BI__builtin_alloca_with_align: 1564 if (SemaBuiltinAllocaWithAlign(TheCall)) 1565 return ExprError(); 1566 LLVM_FALLTHROUGH; 1567 case Builtin::BI__builtin_alloca: 1568 Diag(TheCall->getBeginLoc(), diag::warn_alloca) 1569 << TheCall->getDirectCallee(); 1570 break; 1571 case Builtin::BI__arithmetic_fence: 1572 if (SemaBuiltinArithmeticFence(TheCall)) 1573 return ExprError(); 1574 break; 1575 case Builtin::BI__assume: 1576 case Builtin::BI__builtin_assume: 1577 if (SemaBuiltinAssume(TheCall)) 1578 return ExprError(); 1579 break; 1580 case Builtin::BI__builtin_assume_aligned: 1581 if (SemaBuiltinAssumeAligned(TheCall)) 1582 return ExprError(); 1583 break; 1584 case Builtin::BI__builtin_dynamic_object_size: 1585 case Builtin::BI__builtin_object_size: 1586 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 1587 return ExprError(); 1588 break; 1589 case Builtin::BI__builtin_longjmp: 1590 if (SemaBuiltinLongjmp(TheCall)) 1591 return ExprError(); 1592 break; 1593 case Builtin::BI__builtin_setjmp: 1594 if (SemaBuiltinSetjmp(TheCall)) 1595 return ExprError(); 1596 break; 1597 case Builtin::BI__builtin_classify_type: 1598 if (checkArgCount(*this, TheCall, 1)) return true; 1599 TheCall->setType(Context.IntTy); 1600 break; 1601 case Builtin::BI__builtin_complex: 1602 if (SemaBuiltinComplex(TheCall)) 1603 return ExprError(); 1604 break; 1605 case Builtin::BI__builtin_constant_p: { 1606 if (checkArgCount(*this, TheCall, 1)) return true; 1607 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 1608 if (Arg.isInvalid()) return true; 1609 TheCall->setArg(0, Arg.get()); 1610 TheCall->setType(Context.IntTy); 1611 break; 1612 } 1613 case Builtin::BI__builtin_launder: 1614 return SemaBuiltinLaunder(*this, TheCall); 1615 case Builtin::BI__sync_fetch_and_add: 1616 case Builtin::BI__sync_fetch_and_add_1: 1617 case Builtin::BI__sync_fetch_and_add_2: 1618 case Builtin::BI__sync_fetch_and_add_4: 1619 case Builtin::BI__sync_fetch_and_add_8: 1620 case Builtin::BI__sync_fetch_and_add_16: 1621 case Builtin::BI__sync_fetch_and_sub: 1622 case Builtin::BI__sync_fetch_and_sub_1: 1623 case Builtin::BI__sync_fetch_and_sub_2: 1624 case Builtin::BI__sync_fetch_and_sub_4: 1625 case Builtin::BI__sync_fetch_and_sub_8: 1626 case Builtin::BI__sync_fetch_and_sub_16: 1627 case Builtin::BI__sync_fetch_and_or: 1628 case Builtin::BI__sync_fetch_and_or_1: 1629 case Builtin::BI__sync_fetch_and_or_2: 1630 case Builtin::BI__sync_fetch_and_or_4: 1631 case Builtin::BI__sync_fetch_and_or_8: 1632 case Builtin::BI__sync_fetch_and_or_16: 1633 case Builtin::BI__sync_fetch_and_and: 1634 case Builtin::BI__sync_fetch_and_and_1: 1635 case Builtin::BI__sync_fetch_and_and_2: 1636 case Builtin::BI__sync_fetch_and_and_4: 1637 case Builtin::BI__sync_fetch_and_and_8: 1638 case Builtin::BI__sync_fetch_and_and_16: 1639 case Builtin::BI__sync_fetch_and_xor: 1640 case Builtin::BI__sync_fetch_and_xor_1: 1641 case Builtin::BI__sync_fetch_and_xor_2: 1642 case Builtin::BI__sync_fetch_and_xor_4: 1643 case Builtin::BI__sync_fetch_and_xor_8: 1644 case Builtin::BI__sync_fetch_and_xor_16: 1645 case Builtin::BI__sync_fetch_and_nand: 1646 case Builtin::BI__sync_fetch_and_nand_1: 1647 case Builtin::BI__sync_fetch_and_nand_2: 1648 case Builtin::BI__sync_fetch_and_nand_4: 1649 case Builtin::BI__sync_fetch_and_nand_8: 1650 case Builtin::BI__sync_fetch_and_nand_16: 1651 case Builtin::BI__sync_add_and_fetch: 1652 case Builtin::BI__sync_add_and_fetch_1: 1653 case Builtin::BI__sync_add_and_fetch_2: 1654 case Builtin::BI__sync_add_and_fetch_4: 1655 case Builtin::BI__sync_add_and_fetch_8: 1656 case Builtin::BI__sync_add_and_fetch_16: 1657 case Builtin::BI__sync_sub_and_fetch: 1658 case Builtin::BI__sync_sub_and_fetch_1: 1659 case Builtin::BI__sync_sub_and_fetch_2: 1660 case Builtin::BI__sync_sub_and_fetch_4: 1661 case Builtin::BI__sync_sub_and_fetch_8: 1662 case Builtin::BI__sync_sub_and_fetch_16: 1663 case Builtin::BI__sync_and_and_fetch: 1664 case Builtin::BI__sync_and_and_fetch_1: 1665 case Builtin::BI__sync_and_and_fetch_2: 1666 case Builtin::BI__sync_and_and_fetch_4: 1667 case Builtin::BI__sync_and_and_fetch_8: 1668 case Builtin::BI__sync_and_and_fetch_16: 1669 case Builtin::BI__sync_or_and_fetch: 1670 case Builtin::BI__sync_or_and_fetch_1: 1671 case Builtin::BI__sync_or_and_fetch_2: 1672 case Builtin::BI__sync_or_and_fetch_4: 1673 case Builtin::BI__sync_or_and_fetch_8: 1674 case Builtin::BI__sync_or_and_fetch_16: 1675 case Builtin::BI__sync_xor_and_fetch: 1676 case Builtin::BI__sync_xor_and_fetch_1: 1677 case Builtin::BI__sync_xor_and_fetch_2: 1678 case Builtin::BI__sync_xor_and_fetch_4: 1679 case Builtin::BI__sync_xor_and_fetch_8: 1680 case Builtin::BI__sync_xor_and_fetch_16: 1681 case Builtin::BI__sync_nand_and_fetch: 1682 case Builtin::BI__sync_nand_and_fetch_1: 1683 case Builtin::BI__sync_nand_and_fetch_2: 1684 case Builtin::BI__sync_nand_and_fetch_4: 1685 case Builtin::BI__sync_nand_and_fetch_8: 1686 case Builtin::BI__sync_nand_and_fetch_16: 1687 case Builtin::BI__sync_val_compare_and_swap: 1688 case Builtin::BI__sync_val_compare_and_swap_1: 1689 case Builtin::BI__sync_val_compare_and_swap_2: 1690 case Builtin::BI__sync_val_compare_and_swap_4: 1691 case Builtin::BI__sync_val_compare_and_swap_8: 1692 case Builtin::BI__sync_val_compare_and_swap_16: 1693 case Builtin::BI__sync_bool_compare_and_swap: 1694 case Builtin::BI__sync_bool_compare_and_swap_1: 1695 case Builtin::BI__sync_bool_compare_and_swap_2: 1696 case Builtin::BI__sync_bool_compare_and_swap_4: 1697 case Builtin::BI__sync_bool_compare_and_swap_8: 1698 case Builtin::BI__sync_bool_compare_and_swap_16: 1699 case Builtin::BI__sync_lock_test_and_set: 1700 case Builtin::BI__sync_lock_test_and_set_1: 1701 case Builtin::BI__sync_lock_test_and_set_2: 1702 case Builtin::BI__sync_lock_test_and_set_4: 1703 case Builtin::BI__sync_lock_test_and_set_8: 1704 case Builtin::BI__sync_lock_test_and_set_16: 1705 case Builtin::BI__sync_lock_release: 1706 case Builtin::BI__sync_lock_release_1: 1707 case Builtin::BI__sync_lock_release_2: 1708 case Builtin::BI__sync_lock_release_4: 1709 case Builtin::BI__sync_lock_release_8: 1710 case Builtin::BI__sync_lock_release_16: 1711 case Builtin::BI__sync_swap: 1712 case Builtin::BI__sync_swap_1: 1713 case Builtin::BI__sync_swap_2: 1714 case Builtin::BI__sync_swap_4: 1715 case Builtin::BI__sync_swap_8: 1716 case Builtin::BI__sync_swap_16: 1717 return SemaBuiltinAtomicOverloaded(TheCallResult); 1718 case Builtin::BI__sync_synchronize: 1719 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 1720 << TheCall->getCallee()->getSourceRange(); 1721 break; 1722 case Builtin::BI__builtin_nontemporal_load: 1723 case Builtin::BI__builtin_nontemporal_store: 1724 return SemaBuiltinNontemporalOverloaded(TheCallResult); 1725 case Builtin::BI__builtin_memcpy_inline: { 1726 clang::Expr *SizeOp = TheCall->getArg(2); 1727 // We warn about copying to or from `nullptr` pointers when `size` is 1728 // greater than 0. When `size` is value dependent we cannot evaluate its 1729 // value so we bail out. 1730 if (SizeOp->isValueDependent()) 1731 break; 1732 if (!SizeOp->EvaluateKnownConstInt(Context).isNullValue()) { 1733 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 1734 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc()); 1735 } 1736 break; 1737 } 1738 #define BUILTIN(ID, TYPE, ATTRS) 1739 #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 1740 case Builtin::BI##ID: \ 1741 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 1742 #include "clang/Basic/Builtins.def" 1743 case Builtin::BI__annotation: 1744 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 1745 return ExprError(); 1746 break; 1747 case Builtin::BI__builtin_annotation: 1748 if (SemaBuiltinAnnotation(*this, TheCall)) 1749 return ExprError(); 1750 break; 1751 case Builtin::BI__builtin_addressof: 1752 if (SemaBuiltinAddressof(*this, TheCall)) 1753 return ExprError(); 1754 break; 1755 case Builtin::BI__builtin_is_aligned: 1756 case Builtin::BI__builtin_align_up: 1757 case Builtin::BI__builtin_align_down: 1758 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID)) 1759 return ExprError(); 1760 break; 1761 case Builtin::BI__builtin_add_overflow: 1762 case Builtin::BI__builtin_sub_overflow: 1763 case Builtin::BI__builtin_mul_overflow: 1764 if (SemaBuiltinOverflow(*this, TheCall, BuiltinID)) 1765 return ExprError(); 1766 break; 1767 case Builtin::BI__builtin_operator_new: 1768 case Builtin::BI__builtin_operator_delete: { 1769 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 1770 ExprResult Res = 1771 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 1772 if (Res.isInvalid()) 1773 CorrectDelayedTyposInExpr(TheCallResult.get()); 1774 return Res; 1775 } 1776 case Builtin::BI__builtin_dump_struct: { 1777 // We first want to ensure we are called with 2 arguments 1778 if (checkArgCount(*this, TheCall, 2)) 1779 return ExprError(); 1780 // Ensure that the first argument is of type 'struct XX *' 1781 const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts(); 1782 const QualType PtrArgType = PtrArg->getType(); 1783 if (!PtrArgType->isPointerType() || 1784 !PtrArgType->getPointeeType()->isRecordType()) { 1785 Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1786 << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType 1787 << "structure pointer"; 1788 return ExprError(); 1789 } 1790 1791 // Ensure that the second argument is of type 'FunctionType' 1792 const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts(); 1793 const QualType FnPtrArgType = FnPtrArg->getType(); 1794 if (!FnPtrArgType->isPointerType()) { 1795 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1796 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1797 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1798 return ExprError(); 1799 } 1800 1801 const auto *FuncType = 1802 FnPtrArgType->getPointeeType()->getAs<FunctionType>(); 1803 1804 if (!FuncType) { 1805 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1806 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2 1807 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1808 return ExprError(); 1809 } 1810 1811 if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) { 1812 if (!FT->getNumParams()) { 1813 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1814 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1815 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1816 return ExprError(); 1817 } 1818 QualType PT = FT->getParamType(0); 1819 if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy || 1820 !PT->isPointerType() || !PT->getPointeeType()->isCharType() || 1821 !PT->getPointeeType().isConstQualified()) { 1822 Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 1823 << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 1824 << 2 << FnPtrArgType << "'int (*)(const char *, ...)'"; 1825 return ExprError(); 1826 } 1827 } 1828 1829 TheCall->setType(Context.IntTy); 1830 break; 1831 } 1832 case Builtin::BI__builtin_expect_with_probability: { 1833 // We first want to ensure we are called with 3 arguments 1834 if (checkArgCount(*this, TheCall, 3)) 1835 return ExprError(); 1836 // then check probability is constant float in range [0.0, 1.0] 1837 const Expr *ProbArg = TheCall->getArg(2); 1838 SmallVector<PartialDiagnosticAt, 8> Notes; 1839 Expr::EvalResult Eval; 1840 Eval.Diag = &Notes; 1841 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) || 1842 !Eval.Val.isFloat()) { 1843 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float) 1844 << ProbArg->getSourceRange(); 1845 for (const PartialDiagnosticAt &PDiag : Notes) 1846 Diag(PDiag.first, PDiag.second); 1847 return ExprError(); 1848 } 1849 llvm::APFloat Probability = Eval.Val.getFloat(); 1850 bool LoseInfo = false; 1851 Probability.convert(llvm::APFloat::IEEEdouble(), 1852 llvm::RoundingMode::Dynamic, &LoseInfo); 1853 if (!(Probability >= llvm::APFloat(0.0) && 1854 Probability <= llvm::APFloat(1.0))) { 1855 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range) 1856 << ProbArg->getSourceRange(); 1857 return ExprError(); 1858 } 1859 break; 1860 } 1861 case Builtin::BI__builtin_preserve_access_index: 1862 if (SemaBuiltinPreserveAI(*this, TheCall)) 1863 return ExprError(); 1864 break; 1865 case Builtin::BI__builtin_call_with_static_chain: 1866 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 1867 return ExprError(); 1868 break; 1869 case Builtin::BI__exception_code: 1870 case Builtin::BI_exception_code: 1871 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 1872 diag::err_seh___except_block)) 1873 return ExprError(); 1874 break; 1875 case Builtin::BI__exception_info: 1876 case Builtin::BI_exception_info: 1877 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 1878 diag::err_seh___except_filter)) 1879 return ExprError(); 1880 break; 1881 case Builtin::BI__GetExceptionInfo: 1882 if (checkArgCount(*this, TheCall, 1)) 1883 return ExprError(); 1884 1885 if (CheckCXXThrowOperand( 1886 TheCall->getBeginLoc(), 1887 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 1888 TheCall)) 1889 return ExprError(); 1890 1891 TheCall->setType(Context.VoidPtrTy); 1892 break; 1893 // OpenCL v2.0, s6.13.16 - Pipe functions 1894 case Builtin::BIread_pipe: 1895 case Builtin::BIwrite_pipe: 1896 // Since those two functions are declared with var args, we need a semantic 1897 // check for the argument. 1898 if (SemaBuiltinRWPipe(*this, TheCall)) 1899 return ExprError(); 1900 break; 1901 case Builtin::BIreserve_read_pipe: 1902 case Builtin::BIreserve_write_pipe: 1903 case Builtin::BIwork_group_reserve_read_pipe: 1904 case Builtin::BIwork_group_reserve_write_pipe: 1905 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 1906 return ExprError(); 1907 break; 1908 case Builtin::BIsub_group_reserve_read_pipe: 1909 case Builtin::BIsub_group_reserve_write_pipe: 1910 if (checkOpenCLSubgroupExt(*this, TheCall) || 1911 SemaBuiltinReserveRWPipe(*this, TheCall)) 1912 return ExprError(); 1913 break; 1914 case Builtin::BIcommit_read_pipe: 1915 case Builtin::BIcommit_write_pipe: 1916 case Builtin::BIwork_group_commit_read_pipe: 1917 case Builtin::BIwork_group_commit_write_pipe: 1918 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 1919 return ExprError(); 1920 break; 1921 case Builtin::BIsub_group_commit_read_pipe: 1922 case Builtin::BIsub_group_commit_write_pipe: 1923 if (checkOpenCLSubgroupExt(*this, TheCall) || 1924 SemaBuiltinCommitRWPipe(*this, TheCall)) 1925 return ExprError(); 1926 break; 1927 case Builtin::BIget_pipe_num_packets: 1928 case Builtin::BIget_pipe_max_packets: 1929 if (SemaBuiltinPipePackets(*this, TheCall)) 1930 return ExprError(); 1931 break; 1932 case Builtin::BIto_global: 1933 case Builtin::BIto_local: 1934 case Builtin::BIto_private: 1935 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 1936 return ExprError(); 1937 break; 1938 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 1939 case Builtin::BIenqueue_kernel: 1940 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 1941 return ExprError(); 1942 break; 1943 case Builtin::BIget_kernel_work_group_size: 1944 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 1945 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 1946 return ExprError(); 1947 break; 1948 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 1949 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 1950 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 1951 return ExprError(); 1952 break; 1953 case Builtin::BI__builtin_os_log_format: 1954 Cleanup.setExprNeedsCleanups(true); 1955 LLVM_FALLTHROUGH; 1956 case Builtin::BI__builtin_os_log_format_buffer_size: 1957 if (SemaBuiltinOSLogFormat(TheCall)) 1958 return ExprError(); 1959 break; 1960 case Builtin::BI__builtin_frame_address: 1961 case Builtin::BI__builtin_return_address: { 1962 if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF)) 1963 return ExprError(); 1964 1965 // -Wframe-address warning if non-zero passed to builtin 1966 // return/frame address. 1967 Expr::EvalResult Result; 1968 if (!TheCall->getArg(0)->isValueDependent() && 1969 TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) && 1970 Result.Val.getInt() != 0) 1971 Diag(TheCall->getBeginLoc(), diag::warn_frame_address) 1972 << ((BuiltinID == Builtin::BI__builtin_return_address) 1973 ? "__builtin_return_address" 1974 : "__builtin_frame_address") 1975 << TheCall->getSourceRange(); 1976 break; 1977 } 1978 1979 case Builtin::BI__builtin_matrix_transpose: 1980 return SemaBuiltinMatrixTranspose(TheCall, TheCallResult); 1981 1982 case Builtin::BI__builtin_matrix_column_major_load: 1983 return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult); 1984 1985 case Builtin::BI__builtin_matrix_column_major_store: 1986 return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult); 1987 1988 case Builtin::BI__builtin_get_device_side_mangled_name: { 1989 auto Check = [](CallExpr *TheCall) { 1990 if (TheCall->getNumArgs() != 1) 1991 return false; 1992 auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts()); 1993 if (!DRE) 1994 return false; 1995 auto *D = DRE->getDecl(); 1996 if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D)) 1997 return false; 1998 return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() || 1999 D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>(); 2000 }; 2001 if (!Check(TheCall)) { 2002 Diag(TheCall->getBeginLoc(), 2003 diag::err_hip_invalid_args_builtin_mangled_name); 2004 return ExprError(); 2005 } 2006 } 2007 } 2008 2009 // Since the target specific builtins for each arch overlap, only check those 2010 // of the arch we are compiling for. 2011 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 2012 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) { 2013 assert(Context.getAuxTargetInfo() && 2014 "Aux Target Builtin, but not an aux target?"); 2015 2016 if (CheckTSBuiltinFunctionCall( 2017 *Context.getAuxTargetInfo(), 2018 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall)) 2019 return ExprError(); 2020 } else { 2021 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID, 2022 TheCall)) 2023 return ExprError(); 2024 } 2025 } 2026 2027 return TheCallResult; 2028 } 2029 2030 // Get the valid immediate range for the specified NEON type code. 2031 static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 2032 NeonTypeFlags Type(t); 2033 int IsQuad = ForceQuad ? true : Type.isQuad(); 2034 switch (Type.getEltType()) { 2035 case NeonTypeFlags::Int8: 2036 case NeonTypeFlags::Poly8: 2037 return shift ? 7 : (8 << IsQuad) - 1; 2038 case NeonTypeFlags::Int16: 2039 case NeonTypeFlags::Poly16: 2040 return shift ? 15 : (4 << IsQuad) - 1; 2041 case NeonTypeFlags::Int32: 2042 return shift ? 31 : (2 << IsQuad) - 1; 2043 case NeonTypeFlags::Int64: 2044 case NeonTypeFlags::Poly64: 2045 return shift ? 63 : (1 << IsQuad) - 1; 2046 case NeonTypeFlags::Poly128: 2047 return shift ? 127 : (1 << IsQuad) - 1; 2048 case NeonTypeFlags::Float16: 2049 assert(!shift && "cannot shift float types!"); 2050 return (4 << IsQuad) - 1; 2051 case NeonTypeFlags::Float32: 2052 assert(!shift && "cannot shift float types!"); 2053 return (2 << IsQuad) - 1; 2054 case NeonTypeFlags::Float64: 2055 assert(!shift && "cannot shift float types!"); 2056 return (1 << IsQuad) - 1; 2057 case NeonTypeFlags::BFloat16: 2058 assert(!shift && "cannot shift float types!"); 2059 return (4 << IsQuad) - 1; 2060 } 2061 llvm_unreachable("Invalid NeonTypeFlag!"); 2062 } 2063 2064 /// getNeonEltType - Return the QualType corresponding to the elements of 2065 /// the vector type specified by the NeonTypeFlags. This is used to check 2066 /// the pointer arguments for Neon load/store intrinsics. 2067 static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 2068 bool IsPolyUnsigned, bool IsInt64Long) { 2069 switch (Flags.getEltType()) { 2070 case NeonTypeFlags::Int8: 2071 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 2072 case NeonTypeFlags::Int16: 2073 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 2074 case NeonTypeFlags::Int32: 2075 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 2076 case NeonTypeFlags::Int64: 2077 if (IsInt64Long) 2078 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 2079 else 2080 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 2081 : Context.LongLongTy; 2082 case NeonTypeFlags::Poly8: 2083 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 2084 case NeonTypeFlags::Poly16: 2085 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 2086 case NeonTypeFlags::Poly64: 2087 if (IsInt64Long) 2088 return Context.UnsignedLongTy; 2089 else 2090 return Context.UnsignedLongLongTy; 2091 case NeonTypeFlags::Poly128: 2092 break; 2093 case NeonTypeFlags::Float16: 2094 return Context.HalfTy; 2095 case NeonTypeFlags::Float32: 2096 return Context.FloatTy; 2097 case NeonTypeFlags::Float64: 2098 return Context.DoubleTy; 2099 case NeonTypeFlags::BFloat16: 2100 return Context.BFloat16Ty; 2101 } 2102 llvm_unreachable("Invalid NeonTypeFlag!"); 2103 } 2104 2105 bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2106 // Range check SVE intrinsics that take immediate values. 2107 SmallVector<std::tuple<int,int,int>, 3> ImmChecks; 2108 2109 switch (BuiltinID) { 2110 default: 2111 return false; 2112 #define GET_SVE_IMMEDIATE_CHECK 2113 #include "clang/Basic/arm_sve_sema_rangechecks.inc" 2114 #undef GET_SVE_IMMEDIATE_CHECK 2115 } 2116 2117 // Perform all the immediate checks for this builtin call. 2118 bool HasError = false; 2119 for (auto &I : ImmChecks) { 2120 int ArgNum, CheckTy, ElementSizeInBits; 2121 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I; 2122 2123 typedef bool(*OptionSetCheckFnTy)(int64_t Value); 2124 2125 // Function that checks whether the operand (ArgNum) is an immediate 2126 // that is one of the predefined values. 2127 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm, 2128 int ErrDiag) -> bool { 2129 // We can't check the value of a dependent argument. 2130 Expr *Arg = TheCall->getArg(ArgNum); 2131 if (Arg->isTypeDependent() || Arg->isValueDependent()) 2132 return false; 2133 2134 // Check constant-ness first. 2135 llvm::APSInt Imm; 2136 if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm)) 2137 return true; 2138 2139 if (!CheckImm(Imm.getSExtValue())) 2140 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange(); 2141 return false; 2142 }; 2143 2144 switch ((SVETypeFlags::ImmCheckType)CheckTy) { 2145 case SVETypeFlags::ImmCheck0_31: 2146 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31)) 2147 HasError = true; 2148 break; 2149 case SVETypeFlags::ImmCheck0_13: 2150 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13)) 2151 HasError = true; 2152 break; 2153 case SVETypeFlags::ImmCheck1_16: 2154 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16)) 2155 HasError = true; 2156 break; 2157 case SVETypeFlags::ImmCheck0_7: 2158 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7)) 2159 HasError = true; 2160 break; 2161 case SVETypeFlags::ImmCheckExtract: 2162 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2163 (2048 / ElementSizeInBits) - 1)) 2164 HasError = true; 2165 break; 2166 case SVETypeFlags::ImmCheckShiftRight: 2167 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits)) 2168 HasError = true; 2169 break; 2170 case SVETypeFlags::ImmCheckShiftRightNarrow: 2171 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 2172 ElementSizeInBits / 2)) 2173 HasError = true; 2174 break; 2175 case SVETypeFlags::ImmCheckShiftLeft: 2176 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2177 ElementSizeInBits - 1)) 2178 HasError = true; 2179 break; 2180 case SVETypeFlags::ImmCheckLaneIndex: 2181 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2182 (128 / (1 * ElementSizeInBits)) - 1)) 2183 HasError = true; 2184 break; 2185 case SVETypeFlags::ImmCheckLaneIndexCompRotate: 2186 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2187 (128 / (2 * ElementSizeInBits)) - 1)) 2188 HasError = true; 2189 break; 2190 case SVETypeFlags::ImmCheckLaneIndexDot: 2191 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2192 (128 / (4 * ElementSizeInBits)) - 1)) 2193 HasError = true; 2194 break; 2195 case SVETypeFlags::ImmCheckComplexRot90_270: 2196 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; }, 2197 diag::err_rotation_argument_to_cadd)) 2198 HasError = true; 2199 break; 2200 case SVETypeFlags::ImmCheckComplexRotAll90: 2201 if (CheckImmediateInSet( 2202 [](int64_t V) { 2203 return V == 0 || V == 90 || V == 180 || V == 270; 2204 }, 2205 diag::err_rotation_argument_to_cmla)) 2206 HasError = true; 2207 break; 2208 case SVETypeFlags::ImmCheck0_1: 2209 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1)) 2210 HasError = true; 2211 break; 2212 case SVETypeFlags::ImmCheck0_2: 2213 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2)) 2214 HasError = true; 2215 break; 2216 case SVETypeFlags::ImmCheck0_3: 2217 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3)) 2218 HasError = true; 2219 break; 2220 } 2221 } 2222 2223 return HasError; 2224 } 2225 2226 bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI, 2227 unsigned BuiltinID, CallExpr *TheCall) { 2228 llvm::APSInt Result; 2229 uint64_t mask = 0; 2230 unsigned TV = 0; 2231 int PtrArgNum = -1; 2232 bool HasConstPtr = false; 2233 switch (BuiltinID) { 2234 #define GET_NEON_OVERLOAD_CHECK 2235 #include "clang/Basic/arm_neon.inc" 2236 #include "clang/Basic/arm_fp16.inc" 2237 #undef GET_NEON_OVERLOAD_CHECK 2238 } 2239 2240 // For NEON intrinsics which are overloaded on vector element type, validate 2241 // the immediate which specifies which variant to emit. 2242 unsigned ImmArg = TheCall->getNumArgs()-1; 2243 if (mask) { 2244 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 2245 return true; 2246 2247 TV = Result.getLimitedValue(64); 2248 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 2249 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 2250 << TheCall->getArg(ImmArg)->getSourceRange(); 2251 } 2252 2253 if (PtrArgNum >= 0) { 2254 // Check that pointer arguments have the specified type. 2255 Expr *Arg = TheCall->getArg(PtrArgNum); 2256 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 2257 Arg = ICE->getSubExpr(); 2258 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 2259 QualType RHSTy = RHS.get()->getType(); 2260 2261 llvm::Triple::ArchType Arch = TI.getTriple().getArch(); 2262 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 2263 Arch == llvm::Triple::aarch64_32 || 2264 Arch == llvm::Triple::aarch64_be; 2265 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong; 2266 QualType EltTy = 2267 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 2268 if (HasConstPtr) 2269 EltTy = EltTy.withConst(); 2270 QualType LHSTy = Context.getPointerType(EltTy); 2271 AssignConvertType ConvTy; 2272 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 2273 if (RHS.isInvalid()) 2274 return true; 2275 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 2276 RHS.get(), AA_Assigning)) 2277 return true; 2278 } 2279 2280 // For NEON intrinsics which take an immediate value as part of the 2281 // instruction, range check them here. 2282 unsigned i = 0, l = 0, u = 0; 2283 switch (BuiltinID) { 2284 default: 2285 return false; 2286 #define GET_NEON_IMMEDIATE_CHECK 2287 #include "clang/Basic/arm_neon.inc" 2288 #include "clang/Basic/arm_fp16.inc" 2289 #undef GET_NEON_IMMEDIATE_CHECK 2290 } 2291 2292 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2293 } 2294 2295 bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2296 switch (BuiltinID) { 2297 default: 2298 return false; 2299 #include "clang/Basic/arm_mve_builtin_sema.inc" 2300 } 2301 } 2302 2303 bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2304 CallExpr *TheCall) { 2305 bool Err = false; 2306 switch (BuiltinID) { 2307 default: 2308 return false; 2309 #include "clang/Basic/arm_cde_builtin_sema.inc" 2310 } 2311 2312 if (Err) 2313 return true; 2314 2315 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true); 2316 } 2317 2318 bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI, 2319 const Expr *CoprocArg, bool WantCDE) { 2320 if (isConstantEvaluated()) 2321 return false; 2322 2323 // We can't check the value of a dependent argument. 2324 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent()) 2325 return false; 2326 2327 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context); 2328 int64_t CoprocNo = CoprocNoAP.getExtValue(); 2329 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative"); 2330 2331 uint32_t CDECoprocMask = TI.getARMCDECoprocMask(); 2332 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo)); 2333 2334 if (IsCDECoproc != WantCDE) 2335 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc) 2336 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange(); 2337 2338 return false; 2339 } 2340 2341 bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 2342 unsigned MaxWidth) { 2343 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 2344 BuiltinID == ARM::BI__builtin_arm_ldaex || 2345 BuiltinID == ARM::BI__builtin_arm_strex || 2346 BuiltinID == ARM::BI__builtin_arm_stlex || 2347 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2348 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2349 BuiltinID == AArch64::BI__builtin_arm_strex || 2350 BuiltinID == AArch64::BI__builtin_arm_stlex) && 2351 "unexpected ARM builtin"); 2352 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 2353 BuiltinID == ARM::BI__builtin_arm_ldaex || 2354 BuiltinID == AArch64::BI__builtin_arm_ldrex || 2355 BuiltinID == AArch64::BI__builtin_arm_ldaex; 2356 2357 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 2358 2359 // Ensure that we have the proper number of arguments. 2360 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 2361 return true; 2362 2363 // Inspect the pointer argument of the atomic builtin. This should always be 2364 // a pointer type, whose element is an integral scalar or pointer type. 2365 // Because it is a pointer type, we don't have to worry about any implicit 2366 // casts here. 2367 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 2368 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 2369 if (PointerArgRes.isInvalid()) 2370 return true; 2371 PointerArg = PointerArgRes.get(); 2372 2373 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 2374 if (!pointerType) { 2375 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 2376 << PointerArg->getType() << PointerArg->getSourceRange(); 2377 return true; 2378 } 2379 2380 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 2381 // task is to insert the appropriate casts into the AST. First work out just 2382 // what the appropriate type is. 2383 QualType ValType = pointerType->getPointeeType(); 2384 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 2385 if (IsLdrex) 2386 AddrType.addConst(); 2387 2388 // Issue a warning if the cast is dodgy. 2389 CastKind CastNeeded = CK_NoOp; 2390 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 2391 CastNeeded = CK_BitCast; 2392 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 2393 << PointerArg->getType() << Context.getPointerType(AddrType) 2394 << AA_Passing << PointerArg->getSourceRange(); 2395 } 2396 2397 // Finally, do the cast and replace the argument with the corrected version. 2398 AddrType = Context.getPointerType(AddrType); 2399 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 2400 if (PointerArgRes.isInvalid()) 2401 return true; 2402 PointerArg = PointerArgRes.get(); 2403 2404 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 2405 2406 // In general, we allow ints, floats and pointers to be loaded and stored. 2407 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 2408 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 2409 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 2410 << PointerArg->getType() << PointerArg->getSourceRange(); 2411 return true; 2412 } 2413 2414 // But ARM doesn't have instructions to deal with 128-bit versions. 2415 if (Context.getTypeSize(ValType) > MaxWidth) { 2416 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 2417 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 2418 << PointerArg->getType() << PointerArg->getSourceRange(); 2419 return true; 2420 } 2421 2422 switch (ValType.getObjCLifetime()) { 2423 case Qualifiers::OCL_None: 2424 case Qualifiers::OCL_ExplicitNone: 2425 // okay 2426 break; 2427 2428 case Qualifiers::OCL_Weak: 2429 case Qualifiers::OCL_Strong: 2430 case Qualifiers::OCL_Autoreleasing: 2431 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 2432 << ValType << PointerArg->getSourceRange(); 2433 return true; 2434 } 2435 2436 if (IsLdrex) { 2437 TheCall->setType(ValType); 2438 return false; 2439 } 2440 2441 // Initialize the argument to be stored. 2442 ExprResult ValArg = TheCall->getArg(0); 2443 InitializedEntity Entity = InitializedEntity::InitializeParameter( 2444 Context, ValType, /*consume*/ false); 2445 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 2446 if (ValArg.isInvalid()) 2447 return true; 2448 TheCall->setArg(0, ValArg.get()); 2449 2450 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 2451 // but the custom checker bypasses all default analysis. 2452 TheCall->setType(Context.IntTy); 2453 return false; 2454 } 2455 2456 bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 2457 CallExpr *TheCall) { 2458 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 2459 BuiltinID == ARM::BI__builtin_arm_ldaex || 2460 BuiltinID == ARM::BI__builtin_arm_strex || 2461 BuiltinID == ARM::BI__builtin_arm_stlex) { 2462 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 2463 } 2464 2465 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 2466 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 2467 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 2468 } 2469 2470 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 2471 BuiltinID == ARM::BI__builtin_arm_wsr64) 2472 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 2473 2474 if (BuiltinID == ARM::BI__builtin_arm_rsr || 2475 BuiltinID == ARM::BI__builtin_arm_rsrp || 2476 BuiltinID == ARM::BI__builtin_arm_wsr || 2477 BuiltinID == ARM::BI__builtin_arm_wsrp) 2478 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2479 2480 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2481 return true; 2482 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall)) 2483 return true; 2484 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2485 return true; 2486 2487 // For intrinsics which take an immediate value as part of the instruction, 2488 // range check them here. 2489 // FIXME: VFP Intrinsics should error if VFP not present. 2490 switch (BuiltinID) { 2491 default: return false; 2492 case ARM::BI__builtin_arm_ssat: 2493 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 2494 case ARM::BI__builtin_arm_usat: 2495 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 2496 case ARM::BI__builtin_arm_ssat16: 2497 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 2498 case ARM::BI__builtin_arm_usat16: 2499 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 2500 case ARM::BI__builtin_arm_vcvtr_f: 2501 case ARM::BI__builtin_arm_vcvtr_d: 2502 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 2503 case ARM::BI__builtin_arm_dmb: 2504 case ARM::BI__builtin_arm_dsb: 2505 case ARM::BI__builtin_arm_isb: 2506 case ARM::BI__builtin_arm_dbg: 2507 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 2508 case ARM::BI__builtin_arm_cdp: 2509 case ARM::BI__builtin_arm_cdp2: 2510 case ARM::BI__builtin_arm_mcr: 2511 case ARM::BI__builtin_arm_mcr2: 2512 case ARM::BI__builtin_arm_mrc: 2513 case ARM::BI__builtin_arm_mrc2: 2514 case ARM::BI__builtin_arm_mcrr: 2515 case ARM::BI__builtin_arm_mcrr2: 2516 case ARM::BI__builtin_arm_mrrc: 2517 case ARM::BI__builtin_arm_mrrc2: 2518 case ARM::BI__builtin_arm_ldc: 2519 case ARM::BI__builtin_arm_ldcl: 2520 case ARM::BI__builtin_arm_ldc2: 2521 case ARM::BI__builtin_arm_ldc2l: 2522 case ARM::BI__builtin_arm_stc: 2523 case ARM::BI__builtin_arm_stcl: 2524 case ARM::BI__builtin_arm_stc2: 2525 case ARM::BI__builtin_arm_stc2l: 2526 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) || 2527 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), 2528 /*WantCDE*/ false); 2529 } 2530 } 2531 2532 bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, 2533 unsigned BuiltinID, 2534 CallExpr *TheCall) { 2535 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 2536 BuiltinID == AArch64::BI__builtin_arm_ldaex || 2537 BuiltinID == AArch64::BI__builtin_arm_strex || 2538 BuiltinID == AArch64::BI__builtin_arm_stlex) { 2539 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 2540 } 2541 2542 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 2543 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 2544 SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || 2545 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 2546 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 2547 } 2548 2549 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 2550 BuiltinID == AArch64::BI__builtin_arm_wsr64) 2551 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2552 2553 // Memory Tagging Extensions (MTE) Intrinsics 2554 if (BuiltinID == AArch64::BI__builtin_arm_irg || 2555 BuiltinID == AArch64::BI__builtin_arm_addg || 2556 BuiltinID == AArch64::BI__builtin_arm_gmi || 2557 BuiltinID == AArch64::BI__builtin_arm_ldg || 2558 BuiltinID == AArch64::BI__builtin_arm_stg || 2559 BuiltinID == AArch64::BI__builtin_arm_subp) { 2560 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 2561 } 2562 2563 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 2564 BuiltinID == AArch64::BI__builtin_arm_rsrp || 2565 BuiltinID == AArch64::BI__builtin_arm_wsr || 2566 BuiltinID == AArch64::BI__builtin_arm_wsrp) 2567 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 2568 2569 // Only check the valid encoding range. Any constant in this range would be 2570 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 2571 // an exception for incorrect registers. This matches MSVC behavior. 2572 if (BuiltinID == AArch64::BI_ReadStatusReg || 2573 BuiltinID == AArch64::BI_WriteStatusReg) 2574 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 2575 2576 if (BuiltinID == AArch64::BI__getReg) 2577 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 2578 2579 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 2580 return true; 2581 2582 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall)) 2583 return true; 2584 2585 // For intrinsics which take an immediate value as part of the instruction, 2586 // range check them here. 2587 unsigned i = 0, l = 0, u = 0; 2588 switch (BuiltinID) { 2589 default: return false; 2590 case AArch64::BI__builtin_arm_dmb: 2591 case AArch64::BI__builtin_arm_dsb: 2592 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 2593 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break; 2594 } 2595 2596 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 2597 } 2598 2599 static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) { 2600 if (Arg->getType()->getAsPlaceholderType()) 2601 return false; 2602 2603 // The first argument needs to be a record field access. 2604 // If it is an array element access, we delay decision 2605 // to BPF backend to check whether the access is a 2606 // field access or not. 2607 return (Arg->IgnoreParens()->getObjectKind() == OK_BitField || 2608 dyn_cast<MemberExpr>(Arg->IgnoreParens()) || 2609 dyn_cast<ArraySubscriptExpr>(Arg->IgnoreParens())); 2610 } 2611 2612 static bool isEltOfVectorTy(ASTContext &Context, CallExpr *Call, Sema &S, 2613 QualType VectorTy, QualType EltTy) { 2614 QualType VectorEltTy = VectorTy->castAs<VectorType>()->getElementType(); 2615 if (!Context.hasSameType(VectorEltTy, EltTy)) { 2616 S.Diag(Call->getBeginLoc(), diag::err_typecheck_call_different_arg_types) 2617 << Call->getSourceRange() << VectorEltTy << EltTy; 2618 return false; 2619 } 2620 return true; 2621 } 2622 2623 static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) { 2624 QualType ArgType = Arg->getType(); 2625 if (ArgType->getAsPlaceholderType()) 2626 return false; 2627 2628 // for TYPE_EXISTENCE/TYPE_SIZEOF reloc type 2629 // format: 2630 // 1. __builtin_preserve_type_info(*(<type> *)0, flag); 2631 // 2. <type> var; 2632 // __builtin_preserve_type_info(var, flag); 2633 if (!dyn_cast<DeclRefExpr>(Arg->IgnoreParens()) && 2634 !dyn_cast<UnaryOperator>(Arg->IgnoreParens())) 2635 return false; 2636 2637 // Typedef type. 2638 if (ArgType->getAs<TypedefType>()) 2639 return true; 2640 2641 // Record type or Enum type. 2642 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 2643 if (const auto *RT = Ty->getAs<RecordType>()) { 2644 if (!RT->getDecl()->getDeclName().isEmpty()) 2645 return true; 2646 } else if (const auto *ET = Ty->getAs<EnumType>()) { 2647 if (!ET->getDecl()->getDeclName().isEmpty()) 2648 return true; 2649 } 2650 2651 return false; 2652 } 2653 2654 static bool isValidBPFPreserveEnumValueArg(Expr *Arg) { 2655 QualType ArgType = Arg->getType(); 2656 if (ArgType->getAsPlaceholderType()) 2657 return false; 2658 2659 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type 2660 // format: 2661 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>, 2662 // flag); 2663 const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens()); 2664 if (!UO) 2665 return false; 2666 2667 const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr()); 2668 if (!CE) 2669 return false; 2670 if (CE->getCastKind() != CK_IntegralToPointer && 2671 CE->getCastKind() != CK_NullToPointer) 2672 return false; 2673 2674 // The integer must be from an EnumConstantDecl. 2675 const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr()); 2676 if (!DR) 2677 return false; 2678 2679 const EnumConstantDecl *Enumerator = 2680 dyn_cast<EnumConstantDecl>(DR->getDecl()); 2681 if (!Enumerator) 2682 return false; 2683 2684 // The type must be EnumType. 2685 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 2686 const auto *ET = Ty->getAs<EnumType>(); 2687 if (!ET) 2688 return false; 2689 2690 // The enum value must be supported. 2691 for (auto *EDI : ET->getDecl()->enumerators()) { 2692 if (EDI == Enumerator) 2693 return true; 2694 } 2695 2696 return false; 2697 } 2698 2699 bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID, 2700 CallExpr *TheCall) { 2701 assert((BuiltinID == BPF::BI__builtin_preserve_field_info || 2702 BuiltinID == BPF::BI__builtin_btf_type_id || 2703 BuiltinID == BPF::BI__builtin_preserve_type_info || 2704 BuiltinID == BPF::BI__builtin_preserve_enum_value) && 2705 "unexpected BPF builtin"); 2706 2707 if (checkArgCount(*this, TheCall, 2)) 2708 return true; 2709 2710 // The second argument needs to be a constant int 2711 Expr *Arg = TheCall->getArg(1); 2712 Optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context); 2713 diag::kind kind; 2714 if (!Value) { 2715 if (BuiltinID == BPF::BI__builtin_preserve_field_info) 2716 kind = diag::err_preserve_field_info_not_const; 2717 else if (BuiltinID == BPF::BI__builtin_btf_type_id) 2718 kind = diag::err_btf_type_id_not_const; 2719 else if (BuiltinID == BPF::BI__builtin_preserve_type_info) 2720 kind = diag::err_preserve_type_info_not_const; 2721 else 2722 kind = diag::err_preserve_enum_value_not_const; 2723 Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange(); 2724 return true; 2725 } 2726 2727 // The first argument 2728 Arg = TheCall->getArg(0); 2729 bool InvalidArg = false; 2730 bool ReturnUnsignedInt = true; 2731 if (BuiltinID == BPF::BI__builtin_preserve_field_info) { 2732 if (!isValidBPFPreserveFieldInfoArg(Arg)) { 2733 InvalidArg = true; 2734 kind = diag::err_preserve_field_info_not_field; 2735 } 2736 } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) { 2737 if (!isValidBPFPreserveTypeInfoArg(Arg)) { 2738 InvalidArg = true; 2739 kind = diag::err_preserve_type_info_invalid; 2740 } 2741 } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) { 2742 if (!isValidBPFPreserveEnumValueArg(Arg)) { 2743 InvalidArg = true; 2744 kind = diag::err_preserve_enum_value_invalid; 2745 } 2746 ReturnUnsignedInt = false; 2747 } else if (BuiltinID == BPF::BI__builtin_btf_type_id) { 2748 ReturnUnsignedInt = false; 2749 } 2750 2751 if (InvalidArg) { 2752 Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange(); 2753 return true; 2754 } 2755 2756 if (ReturnUnsignedInt) 2757 TheCall->setType(Context.UnsignedIntTy); 2758 else 2759 TheCall->setType(Context.UnsignedLongTy); 2760 return false; 2761 } 2762 2763 bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 2764 struct ArgInfo { 2765 uint8_t OpNum; 2766 bool IsSigned; 2767 uint8_t BitWidth; 2768 uint8_t Align; 2769 }; 2770 struct BuiltinInfo { 2771 unsigned BuiltinID; 2772 ArgInfo Infos[2]; 2773 }; 2774 2775 static BuiltinInfo Infos[] = { 2776 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 2777 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 2778 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 2779 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} }, 2780 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 2781 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 2782 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 2783 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 2784 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 2785 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 2786 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 2787 2788 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 2789 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 2790 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 2791 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 2792 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 2793 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 2794 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 2795 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 2796 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 2797 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 2798 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 2799 2800 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 2801 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 2802 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 2803 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 2804 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 2805 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 2806 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 2807 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 2808 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 2809 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 2810 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 2811 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 2812 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 2813 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 2814 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 2815 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 2816 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 2817 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 2818 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 2819 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 2820 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 2821 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 2822 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 2823 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 2824 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 2825 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 2826 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 2827 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 2828 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 2829 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 2830 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 2831 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 2832 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 2833 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 2834 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 2835 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 2836 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 2837 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 2838 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 2839 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 2840 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 2841 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 2842 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 2843 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 2844 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 2845 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 2846 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 2847 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 2848 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 2849 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 2850 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 2851 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 2852 {{ 1, false, 6, 0 }} }, 2853 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 2854 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 2855 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 2856 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 2857 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 2858 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 2859 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 2860 {{ 1, false, 5, 0 }} }, 2861 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 2862 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 2863 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 2864 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 2865 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 2866 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 2867 { 2, false, 5, 0 }} }, 2868 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 2869 { 2, false, 6, 0 }} }, 2870 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 2871 { 3, false, 5, 0 }} }, 2872 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 2873 { 3, false, 6, 0 }} }, 2874 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 2875 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 2876 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 2877 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 2878 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 2879 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 2880 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 2881 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 2882 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 2883 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 2884 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 2885 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 2886 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 2887 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 2888 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 2889 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 2890 {{ 2, false, 4, 0 }, 2891 { 3, false, 5, 0 }} }, 2892 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 2893 {{ 2, false, 4, 0 }, 2894 { 3, false, 5, 0 }} }, 2895 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 2896 {{ 2, false, 4, 0 }, 2897 { 3, false, 5, 0 }} }, 2898 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 2899 {{ 2, false, 4, 0 }, 2900 { 3, false, 5, 0 }} }, 2901 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 2902 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 2903 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 2904 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 2905 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 2906 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 2907 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 2908 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 2909 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 2910 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 2911 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 2912 { 2, false, 5, 0 }} }, 2913 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 2914 { 2, false, 6, 0 }} }, 2915 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 2916 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 2917 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 2918 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 2919 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 2920 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 2921 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 2922 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 2923 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 2924 {{ 1, false, 4, 0 }} }, 2925 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 2926 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 2927 {{ 1, false, 4, 0 }} }, 2928 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 2929 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 2930 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 2931 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 2932 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 2933 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 2934 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 2935 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 2936 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 2937 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 2938 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 2939 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 2940 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 2941 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 2942 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 2943 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 2944 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 2945 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 2946 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 2947 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 2948 {{ 3, false, 1, 0 }} }, 2949 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 2950 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 2951 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 2952 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 2953 {{ 3, false, 1, 0 }} }, 2954 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 2955 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 2956 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 2957 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 2958 {{ 3, false, 1, 0 }} }, 2959 }; 2960 2961 // Use a dynamically initialized static to sort the table exactly once on 2962 // first run. 2963 static const bool SortOnce = 2964 (llvm::sort(Infos, 2965 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 2966 return LHS.BuiltinID < RHS.BuiltinID; 2967 }), 2968 true); 2969 (void)SortOnce; 2970 2971 const BuiltinInfo *F = llvm::partition_point( 2972 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; }); 2973 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 2974 return false; 2975 2976 bool Error = false; 2977 2978 for (const ArgInfo &A : F->Infos) { 2979 // Ignore empty ArgInfo elements. 2980 if (A.BitWidth == 0) 2981 continue; 2982 2983 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 2984 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 2985 if (!A.Align) { 2986 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 2987 } else { 2988 unsigned M = 1 << A.Align; 2989 Min *= M; 2990 Max *= M; 2991 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max) | 2992 SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 2993 } 2994 } 2995 return Error; 2996 } 2997 2998 bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 2999 CallExpr *TheCall) { 3000 return CheckHexagonBuiltinArgument(BuiltinID, TheCall); 3001 } 3002 3003 bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI, 3004 unsigned BuiltinID, CallExpr *TheCall) { 3005 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) || 3006 CheckMipsBuiltinArgument(BuiltinID, TheCall); 3007 } 3008 3009 bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, 3010 CallExpr *TheCall) { 3011 3012 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID && 3013 BuiltinID <= Mips::BI__builtin_mips_lwx) { 3014 if (!TI.hasFeature("dsp")) 3015 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp); 3016 } 3017 3018 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID && 3019 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) { 3020 if (!TI.hasFeature("dspr2")) 3021 return Diag(TheCall->getBeginLoc(), 3022 diag::err_mips_builtin_requires_dspr2); 3023 } 3024 3025 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID && 3026 BuiltinID <= Mips::BI__builtin_msa_xori_b) { 3027 if (!TI.hasFeature("msa")) 3028 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa); 3029 } 3030 3031 return false; 3032 } 3033 3034 // CheckMipsBuiltinArgument - Checks the constant value passed to the 3035 // intrinsic is correct. The switch statement is ordered by DSP, MSA. The 3036 // ordering for DSP is unspecified. MSA is ordered by the data format used 3037 // by the underlying instruction i.e., df/m, df/n and then by size. 3038 // 3039 // FIXME: The size tests here should instead be tablegen'd along with the 3040 // definitions from include/clang/Basic/BuiltinsMips.def. 3041 // FIXME: GCC is strict on signedness for some of these intrinsics, we should 3042 // be too. 3043 bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3044 unsigned i = 0, l = 0, u = 0, m = 0; 3045 switch (BuiltinID) { 3046 default: return false; 3047 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 3048 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 3049 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 3050 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 3051 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 3052 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 3053 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 3054 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the 3055 // df/m field. 3056 // These intrinsics take an unsigned 3 bit immediate. 3057 case Mips::BI__builtin_msa_bclri_b: 3058 case Mips::BI__builtin_msa_bnegi_b: 3059 case Mips::BI__builtin_msa_bseti_b: 3060 case Mips::BI__builtin_msa_sat_s_b: 3061 case Mips::BI__builtin_msa_sat_u_b: 3062 case Mips::BI__builtin_msa_slli_b: 3063 case Mips::BI__builtin_msa_srai_b: 3064 case Mips::BI__builtin_msa_srari_b: 3065 case Mips::BI__builtin_msa_srli_b: 3066 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 3067 case Mips::BI__builtin_msa_binsli_b: 3068 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 3069 // These intrinsics take an unsigned 4 bit immediate. 3070 case Mips::BI__builtin_msa_bclri_h: 3071 case Mips::BI__builtin_msa_bnegi_h: 3072 case Mips::BI__builtin_msa_bseti_h: 3073 case Mips::BI__builtin_msa_sat_s_h: 3074 case Mips::BI__builtin_msa_sat_u_h: 3075 case Mips::BI__builtin_msa_slli_h: 3076 case Mips::BI__builtin_msa_srai_h: 3077 case Mips::BI__builtin_msa_srari_h: 3078 case Mips::BI__builtin_msa_srli_h: 3079 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 3080 case Mips::BI__builtin_msa_binsli_h: 3081 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 3082 // These intrinsics take an unsigned 5 bit immediate. 3083 // The first block of intrinsics actually have an unsigned 5 bit field, 3084 // not a df/n field. 3085 case Mips::BI__builtin_msa_cfcmsa: 3086 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; 3087 case Mips::BI__builtin_msa_clei_u_b: 3088 case Mips::BI__builtin_msa_clei_u_h: 3089 case Mips::BI__builtin_msa_clei_u_w: 3090 case Mips::BI__builtin_msa_clei_u_d: 3091 case Mips::BI__builtin_msa_clti_u_b: 3092 case Mips::BI__builtin_msa_clti_u_h: 3093 case Mips::BI__builtin_msa_clti_u_w: 3094 case Mips::BI__builtin_msa_clti_u_d: 3095 case Mips::BI__builtin_msa_maxi_u_b: 3096 case Mips::BI__builtin_msa_maxi_u_h: 3097 case Mips::BI__builtin_msa_maxi_u_w: 3098 case Mips::BI__builtin_msa_maxi_u_d: 3099 case Mips::BI__builtin_msa_mini_u_b: 3100 case Mips::BI__builtin_msa_mini_u_h: 3101 case Mips::BI__builtin_msa_mini_u_w: 3102 case Mips::BI__builtin_msa_mini_u_d: 3103 case Mips::BI__builtin_msa_addvi_b: 3104 case Mips::BI__builtin_msa_addvi_h: 3105 case Mips::BI__builtin_msa_addvi_w: 3106 case Mips::BI__builtin_msa_addvi_d: 3107 case Mips::BI__builtin_msa_bclri_w: 3108 case Mips::BI__builtin_msa_bnegi_w: 3109 case Mips::BI__builtin_msa_bseti_w: 3110 case Mips::BI__builtin_msa_sat_s_w: 3111 case Mips::BI__builtin_msa_sat_u_w: 3112 case Mips::BI__builtin_msa_slli_w: 3113 case Mips::BI__builtin_msa_srai_w: 3114 case Mips::BI__builtin_msa_srari_w: 3115 case Mips::BI__builtin_msa_srli_w: 3116 case Mips::BI__builtin_msa_srlri_w: 3117 case Mips::BI__builtin_msa_subvi_b: 3118 case Mips::BI__builtin_msa_subvi_h: 3119 case Mips::BI__builtin_msa_subvi_w: 3120 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 3121 case Mips::BI__builtin_msa_binsli_w: 3122 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 3123 // These intrinsics take an unsigned 6 bit immediate. 3124 case Mips::BI__builtin_msa_bclri_d: 3125 case Mips::BI__builtin_msa_bnegi_d: 3126 case Mips::BI__builtin_msa_bseti_d: 3127 case Mips::BI__builtin_msa_sat_s_d: 3128 case Mips::BI__builtin_msa_sat_u_d: 3129 case Mips::BI__builtin_msa_slli_d: 3130 case Mips::BI__builtin_msa_srai_d: 3131 case Mips::BI__builtin_msa_srari_d: 3132 case Mips::BI__builtin_msa_srli_d: 3133 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 3134 case Mips::BI__builtin_msa_binsli_d: 3135 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 3136 // These intrinsics take a signed 5 bit immediate. 3137 case Mips::BI__builtin_msa_ceqi_b: 3138 case Mips::BI__builtin_msa_ceqi_h: 3139 case Mips::BI__builtin_msa_ceqi_w: 3140 case Mips::BI__builtin_msa_ceqi_d: 3141 case Mips::BI__builtin_msa_clti_s_b: 3142 case Mips::BI__builtin_msa_clti_s_h: 3143 case Mips::BI__builtin_msa_clti_s_w: 3144 case Mips::BI__builtin_msa_clti_s_d: 3145 case Mips::BI__builtin_msa_clei_s_b: 3146 case Mips::BI__builtin_msa_clei_s_h: 3147 case Mips::BI__builtin_msa_clei_s_w: 3148 case Mips::BI__builtin_msa_clei_s_d: 3149 case Mips::BI__builtin_msa_maxi_s_b: 3150 case Mips::BI__builtin_msa_maxi_s_h: 3151 case Mips::BI__builtin_msa_maxi_s_w: 3152 case Mips::BI__builtin_msa_maxi_s_d: 3153 case Mips::BI__builtin_msa_mini_s_b: 3154 case Mips::BI__builtin_msa_mini_s_h: 3155 case Mips::BI__builtin_msa_mini_s_w: 3156 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 3157 // These intrinsics take an unsigned 8 bit immediate. 3158 case Mips::BI__builtin_msa_andi_b: 3159 case Mips::BI__builtin_msa_nori_b: 3160 case Mips::BI__builtin_msa_ori_b: 3161 case Mips::BI__builtin_msa_shf_b: 3162 case Mips::BI__builtin_msa_shf_h: 3163 case Mips::BI__builtin_msa_shf_w: 3164 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 3165 case Mips::BI__builtin_msa_bseli_b: 3166 case Mips::BI__builtin_msa_bmnzi_b: 3167 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 3168 // df/n format 3169 // These intrinsics take an unsigned 4 bit immediate. 3170 case Mips::BI__builtin_msa_copy_s_b: 3171 case Mips::BI__builtin_msa_copy_u_b: 3172 case Mips::BI__builtin_msa_insve_b: 3173 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 3174 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 3175 // These intrinsics take an unsigned 3 bit immediate. 3176 case Mips::BI__builtin_msa_copy_s_h: 3177 case Mips::BI__builtin_msa_copy_u_h: 3178 case Mips::BI__builtin_msa_insve_h: 3179 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 3180 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 3181 // These intrinsics take an unsigned 2 bit immediate. 3182 case Mips::BI__builtin_msa_copy_s_w: 3183 case Mips::BI__builtin_msa_copy_u_w: 3184 case Mips::BI__builtin_msa_insve_w: 3185 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 3186 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 3187 // These intrinsics take an unsigned 1 bit immediate. 3188 case Mips::BI__builtin_msa_copy_s_d: 3189 case Mips::BI__builtin_msa_copy_u_d: 3190 case Mips::BI__builtin_msa_insve_d: 3191 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 3192 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 3193 // Memory offsets and immediate loads. 3194 // These intrinsics take a signed 10 bit immediate. 3195 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 3196 case Mips::BI__builtin_msa_ldi_h: 3197 case Mips::BI__builtin_msa_ldi_w: 3198 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 3199 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 3200 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 3201 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 3202 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 3203 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break; 3204 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break; 3205 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 3206 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 3207 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 3208 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 3209 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break; 3210 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break; 3211 } 3212 3213 if (!m) 3214 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3215 3216 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 3217 SemaBuiltinConstantArgMultiple(TheCall, i, m); 3218 } 3219 3220 /// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str, 3221 /// advancing the pointer over the consumed characters. The decoded type is 3222 /// returned. If the decoded type represents a constant integer with a 3223 /// constraint on its value then Mask is set to that value. The type descriptors 3224 /// used in Str are specific to PPC MMA builtins and are documented in the file 3225 /// defining the PPC builtins. 3226 static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str, 3227 unsigned &Mask) { 3228 bool RequireICE = false; 3229 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 3230 switch (*Str++) { 3231 case 'V': 3232 return Context.getVectorType(Context.UnsignedCharTy, 16, 3233 VectorType::VectorKind::AltiVecVector); 3234 case 'i': { 3235 char *End; 3236 unsigned size = strtoul(Str, &End, 10); 3237 assert(End != Str && "Missing constant parameter constraint"); 3238 Str = End; 3239 Mask = size; 3240 return Context.IntTy; 3241 } 3242 case 'W': { 3243 char *End; 3244 unsigned size = strtoul(Str, &End, 10); 3245 assert(End != Str && "Missing PowerPC MMA type size"); 3246 Str = End; 3247 QualType Type; 3248 switch (size) { 3249 #define PPC_VECTOR_TYPE(typeName, Id, size) \ 3250 case size: Type = Context.Id##Ty; break; 3251 #include "clang/Basic/PPCTypes.def" 3252 default: llvm_unreachable("Invalid PowerPC MMA vector type"); 3253 } 3254 bool CheckVectorArgs = false; 3255 while (!CheckVectorArgs) { 3256 switch (*Str++) { 3257 case '*': 3258 Type = Context.getPointerType(Type); 3259 break; 3260 case 'C': 3261 Type = Type.withConst(); 3262 break; 3263 default: 3264 CheckVectorArgs = true; 3265 --Str; 3266 break; 3267 } 3268 } 3269 return Type; 3270 } 3271 default: 3272 return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true); 3273 } 3274 } 3275 3276 static bool isPPC_64Builtin(unsigned BuiltinID) { 3277 // These builtins only work on PPC 64bit targets. 3278 switch (BuiltinID) { 3279 case PPC::BI__builtin_divde: 3280 case PPC::BI__builtin_divdeu: 3281 case PPC::BI__builtin_bpermd: 3282 case PPC::BI__builtin_ppc_ldarx: 3283 case PPC::BI__builtin_ppc_stdcx: 3284 case PPC::BI__builtin_ppc_tdw: 3285 case PPC::BI__builtin_ppc_trapd: 3286 case PPC::BI__builtin_ppc_cmpeqb: 3287 case PPC::BI__builtin_ppc_setb: 3288 case PPC::BI__builtin_ppc_mulhd: 3289 case PPC::BI__builtin_ppc_mulhdu: 3290 case PPC::BI__builtin_ppc_maddhd: 3291 case PPC::BI__builtin_ppc_maddhdu: 3292 case PPC::BI__builtin_ppc_maddld: 3293 case PPC::BI__builtin_ppc_load8r: 3294 case PPC::BI__builtin_ppc_store8r: 3295 case PPC::BI__builtin_ppc_insert_exp: 3296 case PPC::BI__builtin_ppc_extract_sig: 3297 case PPC::BI__builtin_ppc_addex: 3298 return true; 3299 } 3300 return false; 3301 } 3302 3303 static bool SemaFeatureCheck(Sema &S, CallExpr *TheCall, 3304 StringRef FeatureToCheck, unsigned DiagID, 3305 StringRef DiagArg = "") { 3306 if (S.Context.getTargetInfo().hasFeature(FeatureToCheck)) 3307 return false; 3308 3309 if (DiagArg.empty()) 3310 S.Diag(TheCall->getBeginLoc(), DiagID) << TheCall->getSourceRange(); 3311 else 3312 S.Diag(TheCall->getBeginLoc(), DiagID) 3313 << DiagArg << TheCall->getSourceRange(); 3314 3315 return true; 3316 } 3317 3318 /// Returns true if the argument consists of one contiguous run of 1s with any 3319 /// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so 3320 /// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not, 3321 /// since all 1s are not contiguous. 3322 bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) { 3323 llvm::APSInt Result; 3324 // We can't check the value of a dependent argument. 3325 Expr *Arg = TheCall->getArg(ArgNum); 3326 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3327 return false; 3328 3329 // Check constant-ness first. 3330 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3331 return true; 3332 3333 // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s. 3334 if (Result.isShiftedMask() || (~Result).isShiftedMask()) 3335 return false; 3336 3337 return Diag(TheCall->getBeginLoc(), 3338 diag::err_argument_not_contiguous_bit_field) 3339 << ArgNum << Arg->getSourceRange(); 3340 } 3341 3342 bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3343 CallExpr *TheCall) { 3344 unsigned i = 0, l = 0, u = 0; 3345 bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64; 3346 llvm::APSInt Result; 3347 3348 if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit) 3349 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 3350 << TheCall->getSourceRange(); 3351 3352 switch (BuiltinID) { 3353 default: return false; 3354 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 3355 case PPC::BI__builtin_altivec_crypto_vshasigmad: 3356 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3357 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3358 case PPC::BI__builtin_altivec_dss: 3359 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3); 3360 case PPC::BI__builtin_tbegin: 3361 case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break; 3362 case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break; 3363 case PPC::BI__builtin_tabortwc: 3364 case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break; 3365 case PPC::BI__builtin_tabortwci: 3366 case PPC::BI__builtin_tabortdci: 3367 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 3368 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); 3369 case PPC::BI__builtin_altivec_dst: 3370 case PPC::BI__builtin_altivec_dstt: 3371 case PPC::BI__builtin_altivec_dstst: 3372 case PPC::BI__builtin_altivec_dststt: 3373 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 3374 case PPC::BI__builtin_vsx_xxpermdi: 3375 case PPC::BI__builtin_vsx_xxsldwi: 3376 return SemaBuiltinVSX(TheCall); 3377 case PPC::BI__builtin_divwe: 3378 case PPC::BI__builtin_divweu: 3379 case PPC::BI__builtin_divde: 3380 case PPC::BI__builtin_divdeu: 3381 return SemaFeatureCheck(*this, TheCall, "extdiv", 3382 diag::err_ppc_builtin_only_on_arch, "7"); 3383 case PPC::BI__builtin_bpermd: 3384 return SemaFeatureCheck(*this, TheCall, "bpermd", 3385 diag::err_ppc_builtin_only_on_arch, "7"); 3386 case PPC::BI__builtin_unpack_vector_int128: 3387 return SemaFeatureCheck(*this, TheCall, "vsx", 3388 diag::err_ppc_builtin_only_on_arch, "7") || 3389 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3390 case PPC::BI__builtin_pack_vector_int128: 3391 return SemaFeatureCheck(*this, TheCall, "vsx", 3392 diag::err_ppc_builtin_only_on_arch, "7"); 3393 case PPC::BI__builtin_altivec_vgnb: 3394 return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7); 3395 case PPC::BI__builtin_altivec_vec_replace_elt: 3396 case PPC::BI__builtin_altivec_vec_replace_unaligned: { 3397 QualType VecTy = TheCall->getArg(0)->getType(); 3398 QualType EltTy = TheCall->getArg(1)->getType(); 3399 unsigned Width = Context.getIntWidth(EltTy); 3400 return SemaBuiltinConstantArgRange(TheCall, 2, 0, Width == 32 ? 12 : 8) || 3401 !isEltOfVectorTy(Context, TheCall, *this, VecTy, EltTy); 3402 } 3403 case PPC::BI__builtin_vsx_xxeval: 3404 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255); 3405 case PPC::BI__builtin_altivec_vsldbi: 3406 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 3407 case PPC::BI__builtin_altivec_vsrdbi: 3408 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 3409 case PPC::BI__builtin_vsx_xxpermx: 3410 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7); 3411 case PPC::BI__builtin_ppc_tw: 3412 case PPC::BI__builtin_ppc_tdw: 3413 return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31); 3414 case PPC::BI__builtin_ppc_cmpeqb: 3415 case PPC::BI__builtin_ppc_setb: 3416 case PPC::BI__builtin_ppc_maddhd: 3417 case PPC::BI__builtin_ppc_maddhdu: 3418 case PPC::BI__builtin_ppc_maddld: 3419 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3420 diag::err_ppc_builtin_only_on_arch, "9"); 3421 case PPC::BI__builtin_ppc_cmprb: 3422 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3423 diag::err_ppc_builtin_only_on_arch, "9") || 3424 SemaBuiltinConstantArgRange(TheCall, 0, 0, 1); 3425 // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must 3426 // be a constant that represents a contiguous bit field. 3427 case PPC::BI__builtin_ppc_rlwnm: 3428 return SemaBuiltinConstantArg(TheCall, 1, Result) || 3429 SemaValueIsRunOfOnes(TheCall, 2); 3430 case PPC::BI__builtin_ppc_rlwimi: 3431 case PPC::BI__builtin_ppc_rldimi: 3432 return SemaBuiltinConstantArg(TheCall, 2, Result) || 3433 SemaValueIsRunOfOnes(TheCall, 3); 3434 case PPC::BI__builtin_ppc_extract_exp: 3435 case PPC::BI__builtin_ppc_extract_sig: 3436 case PPC::BI__builtin_ppc_insert_exp: 3437 return SemaFeatureCheck(*this, TheCall, "power9-vector", 3438 diag::err_ppc_builtin_only_on_arch, "9"); 3439 case PPC::BI__builtin_ppc_addex: { 3440 if (SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 3441 diag::err_ppc_builtin_only_on_arch, "9") || 3442 SemaBuiltinConstantArgRange(TheCall, 2, 0, 3)) 3443 return true; 3444 // Output warning for reserved values 1 to 3. 3445 int ArgValue = 3446 TheCall->getArg(2)->getIntegerConstantExpr(Context)->getSExtValue(); 3447 if (ArgValue != 0) 3448 Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour) 3449 << ArgValue; 3450 return false; 3451 } 3452 case PPC::BI__builtin_ppc_mtfsb0: 3453 case PPC::BI__builtin_ppc_mtfsb1: 3454 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 3455 case PPC::BI__builtin_ppc_mtfsf: 3456 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 255); 3457 case PPC::BI__builtin_ppc_mtfsfi: 3458 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 3459 SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 3460 case PPC::BI__builtin_ppc_alignx: 3461 return SemaBuiltinConstantArgPower2(TheCall, 0); 3462 case PPC::BI__builtin_ppc_rdlam: 3463 return SemaValueIsRunOfOnes(TheCall, 2); 3464 case PPC::BI__builtin_ppc_icbt: 3465 case PPC::BI__builtin_ppc_sthcx: 3466 case PPC::BI__builtin_ppc_stbcx: 3467 case PPC::BI__builtin_ppc_lharx: 3468 case PPC::BI__builtin_ppc_lbarx: 3469 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 3470 diag::err_ppc_builtin_only_on_arch, "8"); 3471 case PPC::BI__builtin_vsx_ldrmb: 3472 case PPC::BI__builtin_vsx_strmb: 3473 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 3474 diag::err_ppc_builtin_only_on_arch, "8") || 3475 SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 3476 #define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \ 3477 case PPC::BI__builtin_##Name: \ 3478 return SemaBuiltinPPCMMACall(TheCall, Types); 3479 #include "clang/Basic/BuiltinsPPC.def" 3480 } 3481 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3482 } 3483 3484 // Check if the given type is a non-pointer PPC MMA type. This function is used 3485 // in Sema to prevent invalid uses of restricted PPC MMA types. 3486 bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) { 3487 if (Type->isPointerType() || Type->isArrayType()) 3488 return false; 3489 3490 QualType CoreType = Type.getCanonicalType().getUnqualifiedType(); 3491 #define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty 3492 if (false 3493 #include "clang/Basic/PPCTypes.def" 3494 ) { 3495 Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type); 3496 return true; 3497 } 3498 return false; 3499 } 3500 3501 bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, 3502 CallExpr *TheCall) { 3503 // position of memory order and scope arguments in the builtin 3504 unsigned OrderIndex, ScopeIndex; 3505 switch (BuiltinID) { 3506 case AMDGPU::BI__builtin_amdgcn_atomic_inc32: 3507 case AMDGPU::BI__builtin_amdgcn_atomic_inc64: 3508 case AMDGPU::BI__builtin_amdgcn_atomic_dec32: 3509 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: 3510 OrderIndex = 2; 3511 ScopeIndex = 3; 3512 break; 3513 case AMDGPU::BI__builtin_amdgcn_fence: 3514 OrderIndex = 0; 3515 ScopeIndex = 1; 3516 break; 3517 default: 3518 return false; 3519 } 3520 3521 ExprResult Arg = TheCall->getArg(OrderIndex); 3522 auto ArgExpr = Arg.get(); 3523 Expr::EvalResult ArgResult; 3524 3525 if (!ArgExpr->EvaluateAsInt(ArgResult, Context)) 3526 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int) 3527 << ArgExpr->getType(); 3528 auto Ord = ArgResult.Val.getInt().getZExtValue(); 3529 3530 // Check valididty of memory ordering as per C11 / C++11's memody model. 3531 // Only fence needs check. Atomic dec/inc allow all memory orders. 3532 if (!llvm::isValidAtomicOrderingCABI(Ord)) 3533 return Diag(ArgExpr->getBeginLoc(), 3534 diag::warn_atomic_op_has_invalid_memory_order) 3535 << ArgExpr->getSourceRange(); 3536 switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) { 3537 case llvm::AtomicOrderingCABI::relaxed: 3538 case llvm::AtomicOrderingCABI::consume: 3539 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence) 3540 return Diag(ArgExpr->getBeginLoc(), 3541 diag::warn_atomic_op_has_invalid_memory_order) 3542 << ArgExpr->getSourceRange(); 3543 break; 3544 case llvm::AtomicOrderingCABI::acquire: 3545 case llvm::AtomicOrderingCABI::release: 3546 case llvm::AtomicOrderingCABI::acq_rel: 3547 case llvm::AtomicOrderingCABI::seq_cst: 3548 break; 3549 } 3550 3551 Arg = TheCall->getArg(ScopeIndex); 3552 ArgExpr = Arg.get(); 3553 Expr::EvalResult ArgResult1; 3554 // Check that sync scope is a constant literal 3555 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context)) 3556 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal) 3557 << ArgExpr->getType(); 3558 3559 return false; 3560 } 3561 3562 bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) { 3563 llvm::APSInt Result; 3564 3565 // We can't check the value of a dependent argument. 3566 Expr *Arg = TheCall->getArg(ArgNum); 3567 if (Arg->isTypeDependent() || Arg->isValueDependent()) 3568 return false; 3569 3570 // Check constant-ness first. 3571 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 3572 return true; 3573 3574 int64_t Val = Result.getSExtValue(); 3575 if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7)) 3576 return false; 3577 3578 return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul) 3579 << Arg->getSourceRange(); 3580 } 3581 3582 bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, 3583 unsigned BuiltinID, 3584 CallExpr *TheCall) { 3585 // CodeGenFunction can also detect this, but this gives a better error 3586 // message. 3587 bool FeatureMissing = false; 3588 SmallVector<StringRef> ReqFeatures; 3589 StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID); 3590 Features.split(ReqFeatures, ','); 3591 3592 // Check if each required feature is included 3593 for (StringRef F : ReqFeatures) { 3594 if (TI.hasFeature(F)) 3595 continue; 3596 3597 // If the feature is 64bit, alter the string so it will print better in 3598 // the diagnostic. 3599 if (F == "64bit") 3600 F = "RV64"; 3601 3602 // Convert features like "zbr" and "experimental-zbr" to "Zbr". 3603 F.consume_front("experimental-"); 3604 std::string FeatureStr = F.str(); 3605 FeatureStr[0] = std::toupper(FeatureStr[0]); 3606 3607 // Error message 3608 FeatureMissing = true; 3609 Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension) 3610 << TheCall->getSourceRange() << StringRef(FeatureStr); 3611 } 3612 3613 if (FeatureMissing) 3614 return true; 3615 3616 switch (BuiltinID) { 3617 case RISCV::BI__builtin_rvv_vsetvli: 3618 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) || 3619 CheckRISCVLMUL(TheCall, 2); 3620 case RISCV::BI__builtin_rvv_vsetvlimax: 3621 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 3622 CheckRISCVLMUL(TheCall, 1); 3623 case RISCV::BI__builtin_rvv_vget_v_i8m2_i8m1: 3624 case RISCV::BI__builtin_rvv_vget_v_i16m2_i16m1: 3625 case RISCV::BI__builtin_rvv_vget_v_i32m2_i32m1: 3626 case RISCV::BI__builtin_rvv_vget_v_i64m2_i64m1: 3627 case RISCV::BI__builtin_rvv_vget_v_f32m2_f32m1: 3628 case RISCV::BI__builtin_rvv_vget_v_f64m2_f64m1: 3629 case RISCV::BI__builtin_rvv_vget_v_u8m2_u8m1: 3630 case RISCV::BI__builtin_rvv_vget_v_u16m2_u16m1: 3631 case RISCV::BI__builtin_rvv_vget_v_u32m2_u32m1: 3632 case RISCV::BI__builtin_rvv_vget_v_u64m2_u64m1: 3633 case RISCV::BI__builtin_rvv_vget_v_i8m4_i8m2: 3634 case RISCV::BI__builtin_rvv_vget_v_i16m4_i16m2: 3635 case RISCV::BI__builtin_rvv_vget_v_i32m4_i32m2: 3636 case RISCV::BI__builtin_rvv_vget_v_i64m4_i64m2: 3637 case RISCV::BI__builtin_rvv_vget_v_f32m4_f32m2: 3638 case RISCV::BI__builtin_rvv_vget_v_f64m4_f64m2: 3639 case RISCV::BI__builtin_rvv_vget_v_u8m4_u8m2: 3640 case RISCV::BI__builtin_rvv_vget_v_u16m4_u16m2: 3641 case RISCV::BI__builtin_rvv_vget_v_u32m4_u32m2: 3642 case RISCV::BI__builtin_rvv_vget_v_u64m4_u64m2: 3643 case RISCV::BI__builtin_rvv_vget_v_i8m8_i8m4: 3644 case RISCV::BI__builtin_rvv_vget_v_i16m8_i16m4: 3645 case RISCV::BI__builtin_rvv_vget_v_i32m8_i32m4: 3646 case RISCV::BI__builtin_rvv_vget_v_i64m8_i64m4: 3647 case RISCV::BI__builtin_rvv_vget_v_f32m8_f32m4: 3648 case RISCV::BI__builtin_rvv_vget_v_f64m8_f64m4: 3649 case RISCV::BI__builtin_rvv_vget_v_u8m8_u8m4: 3650 case RISCV::BI__builtin_rvv_vget_v_u16m8_u16m4: 3651 case RISCV::BI__builtin_rvv_vget_v_u32m8_u32m4: 3652 case RISCV::BI__builtin_rvv_vget_v_u64m8_u64m4: 3653 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3654 case RISCV::BI__builtin_rvv_vget_v_i8m4_i8m1: 3655 case RISCV::BI__builtin_rvv_vget_v_i16m4_i16m1: 3656 case RISCV::BI__builtin_rvv_vget_v_i32m4_i32m1: 3657 case RISCV::BI__builtin_rvv_vget_v_i64m4_i64m1: 3658 case RISCV::BI__builtin_rvv_vget_v_f32m4_f32m1: 3659 case RISCV::BI__builtin_rvv_vget_v_f64m4_f64m1: 3660 case RISCV::BI__builtin_rvv_vget_v_u8m4_u8m1: 3661 case RISCV::BI__builtin_rvv_vget_v_u16m4_u16m1: 3662 case RISCV::BI__builtin_rvv_vget_v_u32m4_u32m1: 3663 case RISCV::BI__builtin_rvv_vget_v_u64m4_u64m1: 3664 case RISCV::BI__builtin_rvv_vget_v_i8m8_i8m2: 3665 case RISCV::BI__builtin_rvv_vget_v_i16m8_i16m2: 3666 case RISCV::BI__builtin_rvv_vget_v_i32m8_i32m2: 3667 case RISCV::BI__builtin_rvv_vget_v_i64m8_i64m2: 3668 case RISCV::BI__builtin_rvv_vget_v_f32m8_f32m2: 3669 case RISCV::BI__builtin_rvv_vget_v_f64m8_f64m2: 3670 case RISCV::BI__builtin_rvv_vget_v_u8m8_u8m2: 3671 case RISCV::BI__builtin_rvv_vget_v_u16m8_u16m2: 3672 case RISCV::BI__builtin_rvv_vget_v_u32m8_u32m2: 3673 case RISCV::BI__builtin_rvv_vget_v_u64m8_u64m2: 3674 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3); 3675 case RISCV::BI__builtin_rvv_vget_v_i8m8_i8m1: 3676 case RISCV::BI__builtin_rvv_vget_v_i16m8_i16m1: 3677 case RISCV::BI__builtin_rvv_vget_v_i32m8_i32m1: 3678 case RISCV::BI__builtin_rvv_vget_v_i64m8_i64m1: 3679 case RISCV::BI__builtin_rvv_vget_v_f32m8_f32m1: 3680 case RISCV::BI__builtin_rvv_vget_v_f64m8_f64m1: 3681 case RISCV::BI__builtin_rvv_vget_v_u8m8_u8m1: 3682 case RISCV::BI__builtin_rvv_vget_v_u16m8_u16m1: 3683 case RISCV::BI__builtin_rvv_vget_v_u32m8_u32m1: 3684 case RISCV::BI__builtin_rvv_vget_v_u64m8_u64m1: 3685 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7); 3686 case RISCV::BI__builtin_rvv_vset_v_i8m1_i8m2: 3687 case RISCV::BI__builtin_rvv_vset_v_i16m1_i16m2: 3688 case RISCV::BI__builtin_rvv_vset_v_i32m1_i32m2: 3689 case RISCV::BI__builtin_rvv_vset_v_i64m1_i64m2: 3690 case RISCV::BI__builtin_rvv_vset_v_f32m1_f32m2: 3691 case RISCV::BI__builtin_rvv_vset_v_f64m1_f64m2: 3692 case RISCV::BI__builtin_rvv_vset_v_u8m1_u8m2: 3693 case RISCV::BI__builtin_rvv_vset_v_u16m1_u16m2: 3694 case RISCV::BI__builtin_rvv_vset_v_u32m1_u32m2: 3695 case RISCV::BI__builtin_rvv_vset_v_u64m1_u64m2: 3696 case RISCV::BI__builtin_rvv_vset_v_i8m2_i8m4: 3697 case RISCV::BI__builtin_rvv_vset_v_i16m2_i16m4: 3698 case RISCV::BI__builtin_rvv_vset_v_i32m2_i32m4: 3699 case RISCV::BI__builtin_rvv_vset_v_i64m2_i64m4: 3700 case RISCV::BI__builtin_rvv_vset_v_f32m2_f32m4: 3701 case RISCV::BI__builtin_rvv_vset_v_f64m2_f64m4: 3702 case RISCV::BI__builtin_rvv_vset_v_u8m2_u8m4: 3703 case RISCV::BI__builtin_rvv_vset_v_u16m2_u16m4: 3704 case RISCV::BI__builtin_rvv_vset_v_u32m2_u32m4: 3705 case RISCV::BI__builtin_rvv_vset_v_u64m2_u64m4: 3706 case RISCV::BI__builtin_rvv_vset_v_i8m4_i8m8: 3707 case RISCV::BI__builtin_rvv_vset_v_i16m4_i16m8: 3708 case RISCV::BI__builtin_rvv_vset_v_i32m4_i32m8: 3709 case RISCV::BI__builtin_rvv_vset_v_i64m4_i64m8: 3710 case RISCV::BI__builtin_rvv_vset_v_f32m4_f32m8: 3711 case RISCV::BI__builtin_rvv_vset_v_f64m4_f64m8: 3712 case RISCV::BI__builtin_rvv_vset_v_u8m4_u8m8: 3713 case RISCV::BI__builtin_rvv_vset_v_u16m4_u16m8: 3714 case RISCV::BI__builtin_rvv_vset_v_u32m4_u32m8: 3715 case RISCV::BI__builtin_rvv_vset_v_u64m4_u64m8: 3716 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3717 case RISCV::BI__builtin_rvv_vset_v_i8m1_i8m4: 3718 case RISCV::BI__builtin_rvv_vset_v_i16m1_i16m4: 3719 case RISCV::BI__builtin_rvv_vset_v_i32m1_i32m4: 3720 case RISCV::BI__builtin_rvv_vset_v_i64m1_i64m4: 3721 case RISCV::BI__builtin_rvv_vset_v_f32m1_f32m4: 3722 case RISCV::BI__builtin_rvv_vset_v_f64m1_f64m4: 3723 case RISCV::BI__builtin_rvv_vset_v_u8m1_u8m4: 3724 case RISCV::BI__builtin_rvv_vset_v_u16m1_u16m4: 3725 case RISCV::BI__builtin_rvv_vset_v_u32m1_u32m4: 3726 case RISCV::BI__builtin_rvv_vset_v_u64m1_u64m4: 3727 case RISCV::BI__builtin_rvv_vset_v_i8m2_i8m8: 3728 case RISCV::BI__builtin_rvv_vset_v_i16m2_i16m8: 3729 case RISCV::BI__builtin_rvv_vset_v_i32m2_i32m8: 3730 case RISCV::BI__builtin_rvv_vset_v_i64m2_i64m8: 3731 case RISCV::BI__builtin_rvv_vset_v_f32m2_f32m8: 3732 case RISCV::BI__builtin_rvv_vset_v_f64m2_f64m8: 3733 case RISCV::BI__builtin_rvv_vset_v_u8m2_u8m8: 3734 case RISCV::BI__builtin_rvv_vset_v_u16m2_u16m8: 3735 case RISCV::BI__builtin_rvv_vset_v_u32m2_u32m8: 3736 case RISCV::BI__builtin_rvv_vset_v_u64m2_u64m8: 3737 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3); 3738 case RISCV::BI__builtin_rvv_vset_v_i8m1_i8m8: 3739 case RISCV::BI__builtin_rvv_vset_v_i16m1_i16m8: 3740 case RISCV::BI__builtin_rvv_vset_v_i32m1_i32m8: 3741 case RISCV::BI__builtin_rvv_vset_v_i64m1_i64m8: 3742 case RISCV::BI__builtin_rvv_vset_v_f32m1_f32m8: 3743 case RISCV::BI__builtin_rvv_vset_v_f64m1_f64m8: 3744 case RISCV::BI__builtin_rvv_vset_v_u8m1_u8m8: 3745 case RISCV::BI__builtin_rvv_vset_v_u16m1_u16m8: 3746 case RISCV::BI__builtin_rvv_vset_v_u32m1_u32m8: 3747 case RISCV::BI__builtin_rvv_vset_v_u64m1_u64m8: 3748 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7); 3749 } 3750 3751 return false; 3752 } 3753 3754 bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 3755 CallExpr *TheCall) { 3756 if (BuiltinID == SystemZ::BI__builtin_tabort) { 3757 Expr *Arg = TheCall->getArg(0); 3758 if (Optional<llvm::APSInt> AbortCode = Arg->getIntegerConstantExpr(Context)) 3759 if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256) 3760 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 3761 << Arg->getSourceRange(); 3762 } 3763 3764 // For intrinsics which take an immediate value as part of the instruction, 3765 // range check them here. 3766 unsigned i = 0, l = 0, u = 0; 3767 switch (BuiltinID) { 3768 default: return false; 3769 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 3770 case SystemZ::BI__builtin_s390_verimb: 3771 case SystemZ::BI__builtin_s390_verimh: 3772 case SystemZ::BI__builtin_s390_verimf: 3773 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 3774 case SystemZ::BI__builtin_s390_vfaeb: 3775 case SystemZ::BI__builtin_s390_vfaeh: 3776 case SystemZ::BI__builtin_s390_vfaef: 3777 case SystemZ::BI__builtin_s390_vfaebs: 3778 case SystemZ::BI__builtin_s390_vfaehs: 3779 case SystemZ::BI__builtin_s390_vfaefs: 3780 case SystemZ::BI__builtin_s390_vfaezb: 3781 case SystemZ::BI__builtin_s390_vfaezh: 3782 case SystemZ::BI__builtin_s390_vfaezf: 3783 case SystemZ::BI__builtin_s390_vfaezbs: 3784 case SystemZ::BI__builtin_s390_vfaezhs: 3785 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 3786 case SystemZ::BI__builtin_s390_vfisb: 3787 case SystemZ::BI__builtin_s390_vfidb: 3788 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 3789 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 3790 case SystemZ::BI__builtin_s390_vftcisb: 3791 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 3792 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 3793 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 3794 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 3795 case SystemZ::BI__builtin_s390_vstrcb: 3796 case SystemZ::BI__builtin_s390_vstrch: 3797 case SystemZ::BI__builtin_s390_vstrcf: 3798 case SystemZ::BI__builtin_s390_vstrczb: 3799 case SystemZ::BI__builtin_s390_vstrczh: 3800 case SystemZ::BI__builtin_s390_vstrczf: 3801 case SystemZ::BI__builtin_s390_vstrcbs: 3802 case SystemZ::BI__builtin_s390_vstrchs: 3803 case SystemZ::BI__builtin_s390_vstrcfs: 3804 case SystemZ::BI__builtin_s390_vstrczbs: 3805 case SystemZ::BI__builtin_s390_vstrczhs: 3806 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 3807 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 3808 case SystemZ::BI__builtin_s390_vfminsb: 3809 case SystemZ::BI__builtin_s390_vfmaxsb: 3810 case SystemZ::BI__builtin_s390_vfmindb: 3811 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 3812 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break; 3813 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break; 3814 case SystemZ::BI__builtin_s390_vclfnhs: 3815 case SystemZ::BI__builtin_s390_vclfnls: 3816 case SystemZ::BI__builtin_s390_vcfn: 3817 case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break; 3818 case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break; 3819 } 3820 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 3821 } 3822 3823 /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 3824 /// This checks that the target supports __builtin_cpu_supports and 3825 /// that the string argument is constant and valid. 3826 static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI, 3827 CallExpr *TheCall) { 3828 Expr *Arg = TheCall->getArg(0); 3829 3830 // Check if the argument is a string literal. 3831 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3832 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3833 << Arg->getSourceRange(); 3834 3835 // Check the contents of the string. 3836 StringRef Feature = 3837 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3838 if (!TI.validateCpuSupports(Feature)) 3839 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 3840 << Arg->getSourceRange(); 3841 return false; 3842 } 3843 3844 /// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 3845 /// This checks that the target supports __builtin_cpu_is and 3846 /// that the string argument is constant and valid. 3847 static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) { 3848 Expr *Arg = TheCall->getArg(0); 3849 3850 // Check if the argument is a string literal. 3851 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 3852 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 3853 << Arg->getSourceRange(); 3854 3855 // Check the contents of the string. 3856 StringRef Feature = 3857 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 3858 if (!TI.validateCpuIs(Feature)) 3859 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 3860 << Arg->getSourceRange(); 3861 return false; 3862 } 3863 3864 // Check if the rounding mode is legal. 3865 bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 3866 // Indicates if this instruction has rounding control or just SAE. 3867 bool HasRC = false; 3868 3869 unsigned ArgNum = 0; 3870 switch (BuiltinID) { 3871 default: 3872 return false; 3873 case X86::BI__builtin_ia32_vcvttsd2si32: 3874 case X86::BI__builtin_ia32_vcvttsd2si64: 3875 case X86::BI__builtin_ia32_vcvttsd2usi32: 3876 case X86::BI__builtin_ia32_vcvttsd2usi64: 3877 case X86::BI__builtin_ia32_vcvttss2si32: 3878 case X86::BI__builtin_ia32_vcvttss2si64: 3879 case X86::BI__builtin_ia32_vcvttss2usi32: 3880 case X86::BI__builtin_ia32_vcvttss2usi64: 3881 ArgNum = 1; 3882 break; 3883 case X86::BI__builtin_ia32_maxpd512: 3884 case X86::BI__builtin_ia32_maxps512: 3885 case X86::BI__builtin_ia32_minpd512: 3886 case X86::BI__builtin_ia32_minps512: 3887 case X86::BI__builtin_ia32_maxph512: 3888 case X86::BI__builtin_ia32_minph512: 3889 ArgNum = 2; 3890 break; 3891 case X86::BI__builtin_ia32_cvtps2pd512_mask: 3892 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 3893 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 3894 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 3895 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 3896 case X86::BI__builtin_ia32_cvttps2dq512_mask: 3897 case X86::BI__builtin_ia32_cvttps2qq512_mask: 3898 case X86::BI__builtin_ia32_cvttps2udq512_mask: 3899 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 3900 case X86::BI__builtin_ia32_exp2pd_mask: 3901 case X86::BI__builtin_ia32_exp2ps_mask: 3902 case X86::BI__builtin_ia32_getexppd512_mask: 3903 case X86::BI__builtin_ia32_getexpps512_mask: 3904 case X86::BI__builtin_ia32_rcp28pd_mask: 3905 case X86::BI__builtin_ia32_rcp28ps_mask: 3906 case X86::BI__builtin_ia32_rsqrt28pd_mask: 3907 case X86::BI__builtin_ia32_rsqrt28ps_mask: 3908 case X86::BI__builtin_ia32_vcomisd: 3909 case X86::BI__builtin_ia32_vcomiss: 3910 case X86::BI__builtin_ia32_vcomish: 3911 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 3912 ArgNum = 3; 3913 break; 3914 case X86::BI__builtin_ia32_cmppd512_mask: 3915 case X86::BI__builtin_ia32_cmpps512_mask: 3916 case X86::BI__builtin_ia32_cmpsd_mask: 3917 case X86::BI__builtin_ia32_cmpss_mask: 3918 case X86::BI__builtin_ia32_cmpsh_mask: 3919 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 3920 case X86::BI__builtin_ia32_getexpsd128_round_mask: 3921 case X86::BI__builtin_ia32_getexpss128_round_mask: 3922 case X86::BI__builtin_ia32_getmantpd512_mask: 3923 case X86::BI__builtin_ia32_getmantps512_mask: 3924 case X86::BI__builtin_ia32_maxsd_round_mask: 3925 case X86::BI__builtin_ia32_maxss_round_mask: 3926 case X86::BI__builtin_ia32_maxsh_round_mask: 3927 case X86::BI__builtin_ia32_minsd_round_mask: 3928 case X86::BI__builtin_ia32_minss_round_mask: 3929 case X86::BI__builtin_ia32_minsh_round_mask: 3930 case X86::BI__builtin_ia32_rcp28sd_round_mask: 3931 case X86::BI__builtin_ia32_rcp28ss_round_mask: 3932 case X86::BI__builtin_ia32_reducepd512_mask: 3933 case X86::BI__builtin_ia32_reduceps512_mask: 3934 case X86::BI__builtin_ia32_rndscalepd_mask: 3935 case X86::BI__builtin_ia32_rndscaleps_mask: 3936 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 3937 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 3938 ArgNum = 4; 3939 break; 3940 case X86::BI__builtin_ia32_fixupimmpd512_mask: 3941 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 3942 case X86::BI__builtin_ia32_fixupimmps512_mask: 3943 case X86::BI__builtin_ia32_fixupimmps512_maskz: 3944 case X86::BI__builtin_ia32_fixupimmsd_mask: 3945 case X86::BI__builtin_ia32_fixupimmsd_maskz: 3946 case X86::BI__builtin_ia32_fixupimmss_mask: 3947 case X86::BI__builtin_ia32_fixupimmss_maskz: 3948 case X86::BI__builtin_ia32_getmantsd_round_mask: 3949 case X86::BI__builtin_ia32_getmantss_round_mask: 3950 case X86::BI__builtin_ia32_rangepd512_mask: 3951 case X86::BI__builtin_ia32_rangeps512_mask: 3952 case X86::BI__builtin_ia32_rangesd128_round_mask: 3953 case X86::BI__builtin_ia32_rangess128_round_mask: 3954 case X86::BI__builtin_ia32_reducesd_mask: 3955 case X86::BI__builtin_ia32_reducess_mask: 3956 case X86::BI__builtin_ia32_rndscalesd_round_mask: 3957 case X86::BI__builtin_ia32_rndscaless_round_mask: 3958 ArgNum = 5; 3959 break; 3960 case X86::BI__builtin_ia32_vcvtsd2si64: 3961 case X86::BI__builtin_ia32_vcvtsd2si32: 3962 case X86::BI__builtin_ia32_vcvtsd2usi32: 3963 case X86::BI__builtin_ia32_vcvtsd2usi64: 3964 case X86::BI__builtin_ia32_vcvtss2si32: 3965 case X86::BI__builtin_ia32_vcvtss2si64: 3966 case X86::BI__builtin_ia32_vcvtss2usi32: 3967 case X86::BI__builtin_ia32_vcvtss2usi64: 3968 case X86::BI__builtin_ia32_sqrtpd512: 3969 case X86::BI__builtin_ia32_sqrtps512: 3970 ArgNum = 1; 3971 HasRC = true; 3972 break; 3973 case X86::BI__builtin_ia32_addph512: 3974 case X86::BI__builtin_ia32_divph512: 3975 case X86::BI__builtin_ia32_mulph512: 3976 case X86::BI__builtin_ia32_subph512: 3977 case X86::BI__builtin_ia32_addpd512: 3978 case X86::BI__builtin_ia32_addps512: 3979 case X86::BI__builtin_ia32_divpd512: 3980 case X86::BI__builtin_ia32_divps512: 3981 case X86::BI__builtin_ia32_mulpd512: 3982 case X86::BI__builtin_ia32_mulps512: 3983 case X86::BI__builtin_ia32_subpd512: 3984 case X86::BI__builtin_ia32_subps512: 3985 case X86::BI__builtin_ia32_cvtsi2sd64: 3986 case X86::BI__builtin_ia32_cvtsi2ss32: 3987 case X86::BI__builtin_ia32_cvtsi2ss64: 3988 case X86::BI__builtin_ia32_cvtusi2sd64: 3989 case X86::BI__builtin_ia32_cvtusi2ss32: 3990 case X86::BI__builtin_ia32_cvtusi2ss64: 3991 ArgNum = 2; 3992 HasRC = true; 3993 break; 3994 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 3995 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 3996 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 3997 case X86::BI__builtin_ia32_cvtpd2dq512_mask: 3998 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 3999 case X86::BI__builtin_ia32_cvtpd2udq512_mask: 4000 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 4001 case X86::BI__builtin_ia32_cvtps2dq512_mask: 4002 case X86::BI__builtin_ia32_cvtps2qq512_mask: 4003 case X86::BI__builtin_ia32_cvtps2udq512_mask: 4004 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 4005 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 4006 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 4007 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 4008 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 4009 ArgNum = 3; 4010 HasRC = true; 4011 break; 4012 case X86::BI__builtin_ia32_addsh_round_mask: 4013 case X86::BI__builtin_ia32_addss_round_mask: 4014 case X86::BI__builtin_ia32_addsd_round_mask: 4015 case X86::BI__builtin_ia32_divsh_round_mask: 4016 case X86::BI__builtin_ia32_divss_round_mask: 4017 case X86::BI__builtin_ia32_divsd_round_mask: 4018 case X86::BI__builtin_ia32_mulsh_round_mask: 4019 case X86::BI__builtin_ia32_mulss_round_mask: 4020 case X86::BI__builtin_ia32_mulsd_round_mask: 4021 case X86::BI__builtin_ia32_subsh_round_mask: 4022 case X86::BI__builtin_ia32_subss_round_mask: 4023 case X86::BI__builtin_ia32_subsd_round_mask: 4024 case X86::BI__builtin_ia32_scalefpd512_mask: 4025 case X86::BI__builtin_ia32_scalefps512_mask: 4026 case X86::BI__builtin_ia32_scalefsd_round_mask: 4027 case X86::BI__builtin_ia32_scalefss_round_mask: 4028 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 4029 case X86::BI__builtin_ia32_sqrtsd_round_mask: 4030 case X86::BI__builtin_ia32_sqrtss_round_mask: 4031 case X86::BI__builtin_ia32_vfmaddsd3_mask: 4032 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 4033 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 4034 case X86::BI__builtin_ia32_vfmaddss3_mask: 4035 case X86::BI__builtin_ia32_vfmaddss3_maskz: 4036 case X86::BI__builtin_ia32_vfmaddss3_mask3: 4037 case X86::BI__builtin_ia32_vfmaddpd512_mask: 4038 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 4039 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 4040 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 4041 case X86::BI__builtin_ia32_vfmaddps512_mask: 4042 case X86::BI__builtin_ia32_vfmaddps512_maskz: 4043 case X86::BI__builtin_ia32_vfmaddps512_mask3: 4044 case X86::BI__builtin_ia32_vfmsubps512_mask3: 4045 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 4046 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 4047 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 4048 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 4049 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 4050 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 4051 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 4052 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 4053 ArgNum = 4; 4054 HasRC = true; 4055 break; 4056 } 4057 4058 llvm::APSInt Result; 4059 4060 // We can't check the value of a dependent argument. 4061 Expr *Arg = TheCall->getArg(ArgNum); 4062 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4063 return false; 4064 4065 // Check constant-ness first. 4066 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4067 return true; 4068 4069 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 4070 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 4071 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding 4072 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together. 4073 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 4074 Result == 8/*ROUND_NO_EXC*/ || 4075 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) || 4076 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 4077 return false; 4078 4079 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 4080 << Arg->getSourceRange(); 4081 } 4082 4083 // Check if the gather/scatter scale is legal. 4084 bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 4085 CallExpr *TheCall) { 4086 unsigned ArgNum = 0; 4087 switch (BuiltinID) { 4088 default: 4089 return false; 4090 case X86::BI__builtin_ia32_gatherpfdpd: 4091 case X86::BI__builtin_ia32_gatherpfdps: 4092 case X86::BI__builtin_ia32_gatherpfqpd: 4093 case X86::BI__builtin_ia32_gatherpfqps: 4094 case X86::BI__builtin_ia32_scatterpfdpd: 4095 case X86::BI__builtin_ia32_scatterpfdps: 4096 case X86::BI__builtin_ia32_scatterpfqpd: 4097 case X86::BI__builtin_ia32_scatterpfqps: 4098 ArgNum = 3; 4099 break; 4100 case X86::BI__builtin_ia32_gatherd_pd: 4101 case X86::BI__builtin_ia32_gatherd_pd256: 4102 case X86::BI__builtin_ia32_gatherq_pd: 4103 case X86::BI__builtin_ia32_gatherq_pd256: 4104 case X86::BI__builtin_ia32_gatherd_ps: 4105 case X86::BI__builtin_ia32_gatherd_ps256: 4106 case X86::BI__builtin_ia32_gatherq_ps: 4107 case X86::BI__builtin_ia32_gatherq_ps256: 4108 case X86::BI__builtin_ia32_gatherd_q: 4109 case X86::BI__builtin_ia32_gatherd_q256: 4110 case X86::BI__builtin_ia32_gatherq_q: 4111 case X86::BI__builtin_ia32_gatherq_q256: 4112 case X86::BI__builtin_ia32_gatherd_d: 4113 case X86::BI__builtin_ia32_gatherd_d256: 4114 case X86::BI__builtin_ia32_gatherq_d: 4115 case X86::BI__builtin_ia32_gatherq_d256: 4116 case X86::BI__builtin_ia32_gather3div2df: 4117 case X86::BI__builtin_ia32_gather3div2di: 4118 case X86::BI__builtin_ia32_gather3div4df: 4119 case X86::BI__builtin_ia32_gather3div4di: 4120 case X86::BI__builtin_ia32_gather3div4sf: 4121 case X86::BI__builtin_ia32_gather3div4si: 4122 case X86::BI__builtin_ia32_gather3div8sf: 4123 case X86::BI__builtin_ia32_gather3div8si: 4124 case X86::BI__builtin_ia32_gather3siv2df: 4125 case X86::BI__builtin_ia32_gather3siv2di: 4126 case X86::BI__builtin_ia32_gather3siv4df: 4127 case X86::BI__builtin_ia32_gather3siv4di: 4128 case X86::BI__builtin_ia32_gather3siv4sf: 4129 case X86::BI__builtin_ia32_gather3siv4si: 4130 case X86::BI__builtin_ia32_gather3siv8sf: 4131 case X86::BI__builtin_ia32_gather3siv8si: 4132 case X86::BI__builtin_ia32_gathersiv8df: 4133 case X86::BI__builtin_ia32_gathersiv16sf: 4134 case X86::BI__builtin_ia32_gatherdiv8df: 4135 case X86::BI__builtin_ia32_gatherdiv16sf: 4136 case X86::BI__builtin_ia32_gathersiv8di: 4137 case X86::BI__builtin_ia32_gathersiv16si: 4138 case X86::BI__builtin_ia32_gatherdiv8di: 4139 case X86::BI__builtin_ia32_gatherdiv16si: 4140 case X86::BI__builtin_ia32_scatterdiv2df: 4141 case X86::BI__builtin_ia32_scatterdiv2di: 4142 case X86::BI__builtin_ia32_scatterdiv4df: 4143 case X86::BI__builtin_ia32_scatterdiv4di: 4144 case X86::BI__builtin_ia32_scatterdiv4sf: 4145 case X86::BI__builtin_ia32_scatterdiv4si: 4146 case X86::BI__builtin_ia32_scatterdiv8sf: 4147 case X86::BI__builtin_ia32_scatterdiv8si: 4148 case X86::BI__builtin_ia32_scattersiv2df: 4149 case X86::BI__builtin_ia32_scattersiv2di: 4150 case X86::BI__builtin_ia32_scattersiv4df: 4151 case X86::BI__builtin_ia32_scattersiv4di: 4152 case X86::BI__builtin_ia32_scattersiv4sf: 4153 case X86::BI__builtin_ia32_scattersiv4si: 4154 case X86::BI__builtin_ia32_scattersiv8sf: 4155 case X86::BI__builtin_ia32_scattersiv8si: 4156 case X86::BI__builtin_ia32_scattersiv8df: 4157 case X86::BI__builtin_ia32_scattersiv16sf: 4158 case X86::BI__builtin_ia32_scatterdiv8df: 4159 case X86::BI__builtin_ia32_scatterdiv16sf: 4160 case X86::BI__builtin_ia32_scattersiv8di: 4161 case X86::BI__builtin_ia32_scattersiv16si: 4162 case X86::BI__builtin_ia32_scatterdiv8di: 4163 case X86::BI__builtin_ia32_scatterdiv16si: 4164 ArgNum = 4; 4165 break; 4166 } 4167 4168 llvm::APSInt Result; 4169 4170 // We can't check the value of a dependent argument. 4171 Expr *Arg = TheCall->getArg(ArgNum); 4172 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4173 return false; 4174 4175 // Check constant-ness first. 4176 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4177 return true; 4178 4179 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 4180 return false; 4181 4182 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 4183 << Arg->getSourceRange(); 4184 } 4185 4186 enum { TileRegLow = 0, TileRegHigh = 7 }; 4187 4188 bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, 4189 ArrayRef<int> ArgNums) { 4190 for (int ArgNum : ArgNums) { 4191 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh)) 4192 return true; 4193 } 4194 return false; 4195 } 4196 4197 bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall, 4198 ArrayRef<int> ArgNums) { 4199 // Because the max number of tile register is TileRegHigh + 1, so here we use 4200 // each bit to represent the usage of them in bitset. 4201 std::bitset<TileRegHigh + 1> ArgValues; 4202 for (int ArgNum : ArgNums) { 4203 Expr *Arg = TheCall->getArg(ArgNum); 4204 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4205 continue; 4206 4207 llvm::APSInt Result; 4208 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4209 return true; 4210 int ArgExtValue = Result.getExtValue(); 4211 assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) && 4212 "Incorrect tile register num."); 4213 if (ArgValues.test(ArgExtValue)) 4214 return Diag(TheCall->getBeginLoc(), 4215 diag::err_x86_builtin_tile_arg_duplicate) 4216 << TheCall->getArg(ArgNum)->getSourceRange(); 4217 ArgValues.set(ArgExtValue); 4218 } 4219 return false; 4220 } 4221 4222 bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, 4223 ArrayRef<int> ArgNums) { 4224 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) || 4225 CheckX86BuiltinTileDuplicate(TheCall, ArgNums); 4226 } 4227 4228 bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) { 4229 switch (BuiltinID) { 4230 default: 4231 return false; 4232 case X86::BI__builtin_ia32_tileloadd64: 4233 case X86::BI__builtin_ia32_tileloaddt164: 4234 case X86::BI__builtin_ia32_tilestored64: 4235 case X86::BI__builtin_ia32_tilezero: 4236 return CheckX86BuiltinTileArgumentsRange(TheCall, 0); 4237 case X86::BI__builtin_ia32_tdpbssd: 4238 case X86::BI__builtin_ia32_tdpbsud: 4239 case X86::BI__builtin_ia32_tdpbusd: 4240 case X86::BI__builtin_ia32_tdpbuud: 4241 case X86::BI__builtin_ia32_tdpbf16ps: 4242 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2}); 4243 } 4244 } 4245 static bool isX86_32Builtin(unsigned BuiltinID) { 4246 // These builtins only work on x86-32 targets. 4247 switch (BuiltinID) { 4248 case X86::BI__builtin_ia32_readeflags_u32: 4249 case X86::BI__builtin_ia32_writeeflags_u32: 4250 return true; 4251 } 4252 4253 return false; 4254 } 4255 4256 bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 4257 CallExpr *TheCall) { 4258 if (BuiltinID == X86::BI__builtin_cpu_supports) 4259 return SemaBuiltinCpuSupports(*this, TI, TheCall); 4260 4261 if (BuiltinID == X86::BI__builtin_cpu_is) 4262 return SemaBuiltinCpuIs(*this, TI, TheCall); 4263 4264 // Check for 32-bit only builtins on a 64-bit target. 4265 const llvm::Triple &TT = TI.getTriple(); 4266 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 4267 return Diag(TheCall->getCallee()->getBeginLoc(), 4268 diag::err_32_bit_builtin_64_bit_tgt); 4269 4270 // If the intrinsic has rounding or SAE make sure its valid. 4271 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 4272 return true; 4273 4274 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 4275 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 4276 return true; 4277 4278 // If the intrinsic has a tile arguments, make sure they are valid. 4279 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall)) 4280 return true; 4281 4282 // For intrinsics which take an immediate value as part of the instruction, 4283 // range check them here. 4284 int i = 0, l = 0, u = 0; 4285 switch (BuiltinID) { 4286 default: 4287 return false; 4288 case X86::BI__builtin_ia32_vec_ext_v2si: 4289 case X86::BI__builtin_ia32_vec_ext_v2di: 4290 case X86::BI__builtin_ia32_vextractf128_pd256: 4291 case X86::BI__builtin_ia32_vextractf128_ps256: 4292 case X86::BI__builtin_ia32_vextractf128_si256: 4293 case X86::BI__builtin_ia32_extract128i256: 4294 case X86::BI__builtin_ia32_extractf64x4_mask: 4295 case X86::BI__builtin_ia32_extracti64x4_mask: 4296 case X86::BI__builtin_ia32_extractf32x8_mask: 4297 case X86::BI__builtin_ia32_extracti32x8_mask: 4298 case X86::BI__builtin_ia32_extractf64x2_256_mask: 4299 case X86::BI__builtin_ia32_extracti64x2_256_mask: 4300 case X86::BI__builtin_ia32_extractf32x4_256_mask: 4301 case X86::BI__builtin_ia32_extracti32x4_256_mask: 4302 i = 1; l = 0; u = 1; 4303 break; 4304 case X86::BI__builtin_ia32_vec_set_v2di: 4305 case X86::BI__builtin_ia32_vinsertf128_pd256: 4306 case X86::BI__builtin_ia32_vinsertf128_ps256: 4307 case X86::BI__builtin_ia32_vinsertf128_si256: 4308 case X86::BI__builtin_ia32_insert128i256: 4309 case X86::BI__builtin_ia32_insertf32x8: 4310 case X86::BI__builtin_ia32_inserti32x8: 4311 case X86::BI__builtin_ia32_insertf64x4: 4312 case X86::BI__builtin_ia32_inserti64x4: 4313 case X86::BI__builtin_ia32_insertf64x2_256: 4314 case X86::BI__builtin_ia32_inserti64x2_256: 4315 case X86::BI__builtin_ia32_insertf32x4_256: 4316 case X86::BI__builtin_ia32_inserti32x4_256: 4317 i = 2; l = 0; u = 1; 4318 break; 4319 case X86::BI__builtin_ia32_vpermilpd: 4320 case X86::BI__builtin_ia32_vec_ext_v4hi: 4321 case X86::BI__builtin_ia32_vec_ext_v4si: 4322 case X86::BI__builtin_ia32_vec_ext_v4sf: 4323 case X86::BI__builtin_ia32_vec_ext_v4di: 4324 case X86::BI__builtin_ia32_extractf32x4_mask: 4325 case X86::BI__builtin_ia32_extracti32x4_mask: 4326 case X86::BI__builtin_ia32_extractf64x2_512_mask: 4327 case X86::BI__builtin_ia32_extracti64x2_512_mask: 4328 i = 1; l = 0; u = 3; 4329 break; 4330 case X86::BI_mm_prefetch: 4331 case X86::BI__builtin_ia32_vec_ext_v8hi: 4332 case X86::BI__builtin_ia32_vec_ext_v8si: 4333 i = 1; l = 0; u = 7; 4334 break; 4335 case X86::BI__builtin_ia32_sha1rnds4: 4336 case X86::BI__builtin_ia32_blendpd: 4337 case X86::BI__builtin_ia32_shufpd: 4338 case X86::BI__builtin_ia32_vec_set_v4hi: 4339 case X86::BI__builtin_ia32_vec_set_v4si: 4340 case X86::BI__builtin_ia32_vec_set_v4di: 4341 case X86::BI__builtin_ia32_shuf_f32x4_256: 4342 case X86::BI__builtin_ia32_shuf_f64x2_256: 4343 case X86::BI__builtin_ia32_shuf_i32x4_256: 4344 case X86::BI__builtin_ia32_shuf_i64x2_256: 4345 case X86::BI__builtin_ia32_insertf64x2_512: 4346 case X86::BI__builtin_ia32_inserti64x2_512: 4347 case X86::BI__builtin_ia32_insertf32x4: 4348 case X86::BI__builtin_ia32_inserti32x4: 4349 i = 2; l = 0; u = 3; 4350 break; 4351 case X86::BI__builtin_ia32_vpermil2pd: 4352 case X86::BI__builtin_ia32_vpermil2pd256: 4353 case X86::BI__builtin_ia32_vpermil2ps: 4354 case X86::BI__builtin_ia32_vpermil2ps256: 4355 i = 3; l = 0; u = 3; 4356 break; 4357 case X86::BI__builtin_ia32_cmpb128_mask: 4358 case X86::BI__builtin_ia32_cmpw128_mask: 4359 case X86::BI__builtin_ia32_cmpd128_mask: 4360 case X86::BI__builtin_ia32_cmpq128_mask: 4361 case X86::BI__builtin_ia32_cmpb256_mask: 4362 case X86::BI__builtin_ia32_cmpw256_mask: 4363 case X86::BI__builtin_ia32_cmpd256_mask: 4364 case X86::BI__builtin_ia32_cmpq256_mask: 4365 case X86::BI__builtin_ia32_cmpb512_mask: 4366 case X86::BI__builtin_ia32_cmpw512_mask: 4367 case X86::BI__builtin_ia32_cmpd512_mask: 4368 case X86::BI__builtin_ia32_cmpq512_mask: 4369 case X86::BI__builtin_ia32_ucmpb128_mask: 4370 case X86::BI__builtin_ia32_ucmpw128_mask: 4371 case X86::BI__builtin_ia32_ucmpd128_mask: 4372 case X86::BI__builtin_ia32_ucmpq128_mask: 4373 case X86::BI__builtin_ia32_ucmpb256_mask: 4374 case X86::BI__builtin_ia32_ucmpw256_mask: 4375 case X86::BI__builtin_ia32_ucmpd256_mask: 4376 case X86::BI__builtin_ia32_ucmpq256_mask: 4377 case X86::BI__builtin_ia32_ucmpb512_mask: 4378 case X86::BI__builtin_ia32_ucmpw512_mask: 4379 case X86::BI__builtin_ia32_ucmpd512_mask: 4380 case X86::BI__builtin_ia32_ucmpq512_mask: 4381 case X86::BI__builtin_ia32_vpcomub: 4382 case X86::BI__builtin_ia32_vpcomuw: 4383 case X86::BI__builtin_ia32_vpcomud: 4384 case X86::BI__builtin_ia32_vpcomuq: 4385 case X86::BI__builtin_ia32_vpcomb: 4386 case X86::BI__builtin_ia32_vpcomw: 4387 case X86::BI__builtin_ia32_vpcomd: 4388 case X86::BI__builtin_ia32_vpcomq: 4389 case X86::BI__builtin_ia32_vec_set_v8hi: 4390 case X86::BI__builtin_ia32_vec_set_v8si: 4391 i = 2; l = 0; u = 7; 4392 break; 4393 case X86::BI__builtin_ia32_vpermilpd256: 4394 case X86::BI__builtin_ia32_roundps: 4395 case X86::BI__builtin_ia32_roundpd: 4396 case X86::BI__builtin_ia32_roundps256: 4397 case X86::BI__builtin_ia32_roundpd256: 4398 case X86::BI__builtin_ia32_getmantpd128_mask: 4399 case X86::BI__builtin_ia32_getmantpd256_mask: 4400 case X86::BI__builtin_ia32_getmantps128_mask: 4401 case X86::BI__builtin_ia32_getmantps256_mask: 4402 case X86::BI__builtin_ia32_getmantpd512_mask: 4403 case X86::BI__builtin_ia32_getmantps512_mask: 4404 case X86::BI__builtin_ia32_vec_ext_v16qi: 4405 case X86::BI__builtin_ia32_vec_ext_v16hi: 4406 i = 1; l = 0; u = 15; 4407 break; 4408 case X86::BI__builtin_ia32_pblendd128: 4409 case X86::BI__builtin_ia32_blendps: 4410 case X86::BI__builtin_ia32_blendpd256: 4411 case X86::BI__builtin_ia32_shufpd256: 4412 case X86::BI__builtin_ia32_roundss: 4413 case X86::BI__builtin_ia32_roundsd: 4414 case X86::BI__builtin_ia32_rangepd128_mask: 4415 case X86::BI__builtin_ia32_rangepd256_mask: 4416 case X86::BI__builtin_ia32_rangepd512_mask: 4417 case X86::BI__builtin_ia32_rangeps128_mask: 4418 case X86::BI__builtin_ia32_rangeps256_mask: 4419 case X86::BI__builtin_ia32_rangeps512_mask: 4420 case X86::BI__builtin_ia32_getmantsd_round_mask: 4421 case X86::BI__builtin_ia32_getmantss_round_mask: 4422 case X86::BI__builtin_ia32_vec_set_v16qi: 4423 case X86::BI__builtin_ia32_vec_set_v16hi: 4424 i = 2; l = 0; u = 15; 4425 break; 4426 case X86::BI__builtin_ia32_vec_ext_v32qi: 4427 i = 1; l = 0; u = 31; 4428 break; 4429 case X86::BI__builtin_ia32_cmpps: 4430 case X86::BI__builtin_ia32_cmpss: 4431 case X86::BI__builtin_ia32_cmppd: 4432 case X86::BI__builtin_ia32_cmpsd: 4433 case X86::BI__builtin_ia32_cmpps256: 4434 case X86::BI__builtin_ia32_cmppd256: 4435 case X86::BI__builtin_ia32_cmpps128_mask: 4436 case X86::BI__builtin_ia32_cmppd128_mask: 4437 case X86::BI__builtin_ia32_cmpps256_mask: 4438 case X86::BI__builtin_ia32_cmppd256_mask: 4439 case X86::BI__builtin_ia32_cmpps512_mask: 4440 case X86::BI__builtin_ia32_cmppd512_mask: 4441 case X86::BI__builtin_ia32_cmpsd_mask: 4442 case X86::BI__builtin_ia32_cmpss_mask: 4443 case X86::BI__builtin_ia32_vec_set_v32qi: 4444 i = 2; l = 0; u = 31; 4445 break; 4446 case X86::BI__builtin_ia32_permdf256: 4447 case X86::BI__builtin_ia32_permdi256: 4448 case X86::BI__builtin_ia32_permdf512: 4449 case X86::BI__builtin_ia32_permdi512: 4450 case X86::BI__builtin_ia32_vpermilps: 4451 case X86::BI__builtin_ia32_vpermilps256: 4452 case X86::BI__builtin_ia32_vpermilpd512: 4453 case X86::BI__builtin_ia32_vpermilps512: 4454 case X86::BI__builtin_ia32_pshufd: 4455 case X86::BI__builtin_ia32_pshufd256: 4456 case X86::BI__builtin_ia32_pshufd512: 4457 case X86::BI__builtin_ia32_pshufhw: 4458 case X86::BI__builtin_ia32_pshufhw256: 4459 case X86::BI__builtin_ia32_pshufhw512: 4460 case X86::BI__builtin_ia32_pshuflw: 4461 case X86::BI__builtin_ia32_pshuflw256: 4462 case X86::BI__builtin_ia32_pshuflw512: 4463 case X86::BI__builtin_ia32_vcvtps2ph: 4464 case X86::BI__builtin_ia32_vcvtps2ph_mask: 4465 case X86::BI__builtin_ia32_vcvtps2ph256: 4466 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 4467 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 4468 case X86::BI__builtin_ia32_rndscaleps_128_mask: 4469 case X86::BI__builtin_ia32_rndscalepd_128_mask: 4470 case X86::BI__builtin_ia32_rndscaleps_256_mask: 4471 case X86::BI__builtin_ia32_rndscalepd_256_mask: 4472 case X86::BI__builtin_ia32_rndscaleps_mask: 4473 case X86::BI__builtin_ia32_rndscalepd_mask: 4474 case X86::BI__builtin_ia32_reducepd128_mask: 4475 case X86::BI__builtin_ia32_reducepd256_mask: 4476 case X86::BI__builtin_ia32_reducepd512_mask: 4477 case X86::BI__builtin_ia32_reduceps128_mask: 4478 case X86::BI__builtin_ia32_reduceps256_mask: 4479 case X86::BI__builtin_ia32_reduceps512_mask: 4480 case X86::BI__builtin_ia32_prold512: 4481 case X86::BI__builtin_ia32_prolq512: 4482 case X86::BI__builtin_ia32_prold128: 4483 case X86::BI__builtin_ia32_prold256: 4484 case X86::BI__builtin_ia32_prolq128: 4485 case X86::BI__builtin_ia32_prolq256: 4486 case X86::BI__builtin_ia32_prord512: 4487 case X86::BI__builtin_ia32_prorq512: 4488 case X86::BI__builtin_ia32_prord128: 4489 case X86::BI__builtin_ia32_prord256: 4490 case X86::BI__builtin_ia32_prorq128: 4491 case X86::BI__builtin_ia32_prorq256: 4492 case X86::BI__builtin_ia32_fpclasspd128_mask: 4493 case X86::BI__builtin_ia32_fpclasspd256_mask: 4494 case X86::BI__builtin_ia32_fpclassps128_mask: 4495 case X86::BI__builtin_ia32_fpclassps256_mask: 4496 case X86::BI__builtin_ia32_fpclassps512_mask: 4497 case X86::BI__builtin_ia32_fpclasspd512_mask: 4498 case X86::BI__builtin_ia32_fpclasssd_mask: 4499 case X86::BI__builtin_ia32_fpclassss_mask: 4500 case X86::BI__builtin_ia32_pslldqi128_byteshift: 4501 case X86::BI__builtin_ia32_pslldqi256_byteshift: 4502 case X86::BI__builtin_ia32_pslldqi512_byteshift: 4503 case X86::BI__builtin_ia32_psrldqi128_byteshift: 4504 case X86::BI__builtin_ia32_psrldqi256_byteshift: 4505 case X86::BI__builtin_ia32_psrldqi512_byteshift: 4506 case X86::BI__builtin_ia32_kshiftliqi: 4507 case X86::BI__builtin_ia32_kshiftlihi: 4508 case X86::BI__builtin_ia32_kshiftlisi: 4509 case X86::BI__builtin_ia32_kshiftlidi: 4510 case X86::BI__builtin_ia32_kshiftriqi: 4511 case X86::BI__builtin_ia32_kshiftrihi: 4512 case X86::BI__builtin_ia32_kshiftrisi: 4513 case X86::BI__builtin_ia32_kshiftridi: 4514 i = 1; l = 0; u = 255; 4515 break; 4516 case X86::BI__builtin_ia32_vperm2f128_pd256: 4517 case X86::BI__builtin_ia32_vperm2f128_ps256: 4518 case X86::BI__builtin_ia32_vperm2f128_si256: 4519 case X86::BI__builtin_ia32_permti256: 4520 case X86::BI__builtin_ia32_pblendw128: 4521 case X86::BI__builtin_ia32_pblendw256: 4522 case X86::BI__builtin_ia32_blendps256: 4523 case X86::BI__builtin_ia32_pblendd256: 4524 case X86::BI__builtin_ia32_palignr128: 4525 case X86::BI__builtin_ia32_palignr256: 4526 case X86::BI__builtin_ia32_palignr512: 4527 case X86::BI__builtin_ia32_alignq512: 4528 case X86::BI__builtin_ia32_alignd512: 4529 case X86::BI__builtin_ia32_alignd128: 4530 case X86::BI__builtin_ia32_alignd256: 4531 case X86::BI__builtin_ia32_alignq128: 4532 case X86::BI__builtin_ia32_alignq256: 4533 case X86::BI__builtin_ia32_vcomisd: 4534 case X86::BI__builtin_ia32_vcomiss: 4535 case X86::BI__builtin_ia32_shuf_f32x4: 4536 case X86::BI__builtin_ia32_shuf_f64x2: 4537 case X86::BI__builtin_ia32_shuf_i32x4: 4538 case X86::BI__builtin_ia32_shuf_i64x2: 4539 case X86::BI__builtin_ia32_shufpd512: 4540 case X86::BI__builtin_ia32_shufps: 4541 case X86::BI__builtin_ia32_shufps256: 4542 case X86::BI__builtin_ia32_shufps512: 4543 case X86::BI__builtin_ia32_dbpsadbw128: 4544 case X86::BI__builtin_ia32_dbpsadbw256: 4545 case X86::BI__builtin_ia32_dbpsadbw512: 4546 case X86::BI__builtin_ia32_vpshldd128: 4547 case X86::BI__builtin_ia32_vpshldd256: 4548 case X86::BI__builtin_ia32_vpshldd512: 4549 case X86::BI__builtin_ia32_vpshldq128: 4550 case X86::BI__builtin_ia32_vpshldq256: 4551 case X86::BI__builtin_ia32_vpshldq512: 4552 case X86::BI__builtin_ia32_vpshldw128: 4553 case X86::BI__builtin_ia32_vpshldw256: 4554 case X86::BI__builtin_ia32_vpshldw512: 4555 case X86::BI__builtin_ia32_vpshrdd128: 4556 case X86::BI__builtin_ia32_vpshrdd256: 4557 case X86::BI__builtin_ia32_vpshrdd512: 4558 case X86::BI__builtin_ia32_vpshrdq128: 4559 case X86::BI__builtin_ia32_vpshrdq256: 4560 case X86::BI__builtin_ia32_vpshrdq512: 4561 case X86::BI__builtin_ia32_vpshrdw128: 4562 case X86::BI__builtin_ia32_vpshrdw256: 4563 case X86::BI__builtin_ia32_vpshrdw512: 4564 i = 2; l = 0; u = 255; 4565 break; 4566 case X86::BI__builtin_ia32_fixupimmpd512_mask: 4567 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 4568 case X86::BI__builtin_ia32_fixupimmps512_mask: 4569 case X86::BI__builtin_ia32_fixupimmps512_maskz: 4570 case X86::BI__builtin_ia32_fixupimmsd_mask: 4571 case X86::BI__builtin_ia32_fixupimmsd_maskz: 4572 case X86::BI__builtin_ia32_fixupimmss_mask: 4573 case X86::BI__builtin_ia32_fixupimmss_maskz: 4574 case X86::BI__builtin_ia32_fixupimmpd128_mask: 4575 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 4576 case X86::BI__builtin_ia32_fixupimmpd256_mask: 4577 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 4578 case X86::BI__builtin_ia32_fixupimmps128_mask: 4579 case X86::BI__builtin_ia32_fixupimmps128_maskz: 4580 case X86::BI__builtin_ia32_fixupimmps256_mask: 4581 case X86::BI__builtin_ia32_fixupimmps256_maskz: 4582 case X86::BI__builtin_ia32_pternlogd512_mask: 4583 case X86::BI__builtin_ia32_pternlogd512_maskz: 4584 case X86::BI__builtin_ia32_pternlogq512_mask: 4585 case X86::BI__builtin_ia32_pternlogq512_maskz: 4586 case X86::BI__builtin_ia32_pternlogd128_mask: 4587 case X86::BI__builtin_ia32_pternlogd128_maskz: 4588 case X86::BI__builtin_ia32_pternlogd256_mask: 4589 case X86::BI__builtin_ia32_pternlogd256_maskz: 4590 case X86::BI__builtin_ia32_pternlogq128_mask: 4591 case X86::BI__builtin_ia32_pternlogq128_maskz: 4592 case X86::BI__builtin_ia32_pternlogq256_mask: 4593 case X86::BI__builtin_ia32_pternlogq256_maskz: 4594 i = 3; l = 0; u = 255; 4595 break; 4596 case X86::BI__builtin_ia32_gatherpfdpd: 4597 case X86::BI__builtin_ia32_gatherpfdps: 4598 case X86::BI__builtin_ia32_gatherpfqpd: 4599 case X86::BI__builtin_ia32_gatherpfqps: 4600 case X86::BI__builtin_ia32_scatterpfdpd: 4601 case X86::BI__builtin_ia32_scatterpfdps: 4602 case X86::BI__builtin_ia32_scatterpfqpd: 4603 case X86::BI__builtin_ia32_scatterpfqps: 4604 i = 4; l = 2; u = 3; 4605 break; 4606 case X86::BI__builtin_ia32_reducesd_mask: 4607 case X86::BI__builtin_ia32_reducess_mask: 4608 case X86::BI__builtin_ia32_rndscalesd_round_mask: 4609 case X86::BI__builtin_ia32_rndscaless_round_mask: 4610 i = 4; l = 0; u = 255; 4611 break; 4612 } 4613 4614 // Note that we don't force a hard error on the range check here, allowing 4615 // template-generated or macro-generated dead code to potentially have out-of- 4616 // range values. These need to code generate, but don't need to necessarily 4617 // make any sense. We use a warning that defaults to an error. 4618 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 4619 } 4620 4621 /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 4622 /// parameter with the FormatAttr's correct format_idx and firstDataArg. 4623 /// Returns true when the format fits the function and the FormatStringInfo has 4624 /// been populated. 4625 bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 4626 FormatStringInfo *FSI) { 4627 FSI->HasVAListArg = Format->getFirstArg() == 0; 4628 FSI->FormatIdx = Format->getFormatIdx() - 1; 4629 FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1; 4630 4631 // The way the format attribute works in GCC, the implicit this argument 4632 // of member functions is counted. However, it doesn't appear in our own 4633 // lists, so decrement format_idx in that case. 4634 if (IsCXXMember) { 4635 if(FSI->FormatIdx == 0) 4636 return false; 4637 --FSI->FormatIdx; 4638 if (FSI->FirstDataArg != 0) 4639 --FSI->FirstDataArg; 4640 } 4641 return true; 4642 } 4643 4644 /// Checks if a the given expression evaluates to null. 4645 /// 4646 /// Returns true if the value evaluates to null. 4647 static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 4648 // If the expression has non-null type, it doesn't evaluate to null. 4649 if (auto nullability 4650 = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { 4651 if (*nullability == NullabilityKind::NonNull) 4652 return false; 4653 } 4654 4655 // As a special case, transparent unions initialized with zero are 4656 // considered null for the purposes of the nonnull attribute. 4657 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 4658 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 4659 if (const CompoundLiteralExpr *CLE = 4660 dyn_cast<CompoundLiteralExpr>(Expr)) 4661 if (const InitListExpr *ILE = 4662 dyn_cast<InitListExpr>(CLE->getInitializer())) 4663 Expr = ILE->getInit(0); 4664 } 4665 4666 bool Result; 4667 return (!Expr->isValueDependent() && 4668 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 4669 !Result); 4670 } 4671 4672 static void CheckNonNullArgument(Sema &S, 4673 const Expr *ArgExpr, 4674 SourceLocation CallSiteLoc) { 4675 if (CheckNonNullExpr(S, ArgExpr)) 4676 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 4677 S.PDiag(diag::warn_null_arg) 4678 << ArgExpr->getSourceRange()); 4679 } 4680 4681 bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 4682 FormatStringInfo FSI; 4683 if ((GetFormatStringType(Format) == FST_NSString) && 4684 getFormatStringInfo(Format, false, &FSI)) { 4685 Idx = FSI.FormatIdx; 4686 return true; 4687 } 4688 return false; 4689 } 4690 4691 /// Diagnose use of %s directive in an NSString which is being passed 4692 /// as formatting string to formatting method. 4693 static void 4694 DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 4695 const NamedDecl *FDecl, 4696 Expr **Args, 4697 unsigned NumArgs) { 4698 unsigned Idx = 0; 4699 bool Format = false; 4700 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 4701 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 4702 Idx = 2; 4703 Format = true; 4704 } 4705 else 4706 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 4707 if (S.GetFormatNSStringIdx(I, Idx)) { 4708 Format = true; 4709 break; 4710 } 4711 } 4712 if (!Format || NumArgs <= Idx) 4713 return; 4714 const Expr *FormatExpr = Args[Idx]; 4715 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 4716 FormatExpr = CSCE->getSubExpr(); 4717 const StringLiteral *FormatString; 4718 if (const ObjCStringLiteral *OSL = 4719 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 4720 FormatString = OSL->getString(); 4721 else 4722 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 4723 if (!FormatString) 4724 return; 4725 if (S.FormatStringHasSArg(FormatString)) { 4726 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 4727 << "%s" << 1 << 1; 4728 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 4729 << FDecl->getDeclName(); 4730 } 4731 } 4732 4733 /// Determine whether the given type has a non-null nullability annotation. 4734 static bool isNonNullType(ASTContext &ctx, QualType type) { 4735 if (auto nullability = type->getNullability(ctx)) 4736 return *nullability == NullabilityKind::NonNull; 4737 4738 return false; 4739 } 4740 4741 static void CheckNonNullArguments(Sema &S, 4742 const NamedDecl *FDecl, 4743 const FunctionProtoType *Proto, 4744 ArrayRef<const Expr *> Args, 4745 SourceLocation CallSiteLoc) { 4746 assert((FDecl || Proto) && "Need a function declaration or prototype"); 4747 4748 // Already checked by by constant evaluator. 4749 if (S.isConstantEvaluated()) 4750 return; 4751 // Check the attributes attached to the method/function itself. 4752 llvm::SmallBitVector NonNullArgs; 4753 if (FDecl) { 4754 // Handle the nonnull attribute on the function/method declaration itself. 4755 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 4756 if (!NonNull->args_size()) { 4757 // Easy case: all pointer arguments are nonnull. 4758 for (const auto *Arg : Args) 4759 if (S.isValidPointerAttrType(Arg->getType())) 4760 CheckNonNullArgument(S, Arg, CallSiteLoc); 4761 return; 4762 } 4763 4764 for (const ParamIdx &Idx : NonNull->args()) { 4765 unsigned IdxAST = Idx.getASTIndex(); 4766 if (IdxAST >= Args.size()) 4767 continue; 4768 if (NonNullArgs.empty()) 4769 NonNullArgs.resize(Args.size()); 4770 NonNullArgs.set(IdxAST); 4771 } 4772 } 4773 } 4774 4775 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 4776 // Handle the nonnull attribute on the parameters of the 4777 // function/method. 4778 ArrayRef<ParmVarDecl*> parms; 4779 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 4780 parms = FD->parameters(); 4781 else 4782 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 4783 4784 unsigned ParamIndex = 0; 4785 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 4786 I != E; ++I, ++ParamIndex) { 4787 const ParmVarDecl *PVD = *I; 4788 if (PVD->hasAttr<NonNullAttr>() || 4789 isNonNullType(S.Context, PVD->getType())) { 4790 if (NonNullArgs.empty()) 4791 NonNullArgs.resize(Args.size()); 4792 4793 NonNullArgs.set(ParamIndex); 4794 } 4795 } 4796 } else { 4797 // If we have a non-function, non-method declaration but no 4798 // function prototype, try to dig out the function prototype. 4799 if (!Proto) { 4800 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 4801 QualType type = VD->getType().getNonReferenceType(); 4802 if (auto pointerType = type->getAs<PointerType>()) 4803 type = pointerType->getPointeeType(); 4804 else if (auto blockType = type->getAs<BlockPointerType>()) 4805 type = blockType->getPointeeType(); 4806 // FIXME: data member pointers? 4807 4808 // Dig out the function prototype, if there is one. 4809 Proto = type->getAs<FunctionProtoType>(); 4810 } 4811 } 4812 4813 // Fill in non-null argument information from the nullability 4814 // information on the parameter types (if we have them). 4815 if (Proto) { 4816 unsigned Index = 0; 4817 for (auto paramType : Proto->getParamTypes()) { 4818 if (isNonNullType(S.Context, paramType)) { 4819 if (NonNullArgs.empty()) 4820 NonNullArgs.resize(Args.size()); 4821 4822 NonNullArgs.set(Index); 4823 } 4824 4825 ++Index; 4826 } 4827 } 4828 } 4829 4830 // Check for non-null arguments. 4831 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 4832 ArgIndex != ArgIndexEnd; ++ArgIndex) { 4833 if (NonNullArgs[ArgIndex]) 4834 CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc); 4835 } 4836 } 4837 4838 /// Warn if a pointer or reference argument passed to a function points to an 4839 /// object that is less aligned than the parameter. This can happen when 4840 /// creating a typedef with a lower alignment than the original type and then 4841 /// calling functions defined in terms of the original type. 4842 void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, 4843 StringRef ParamName, QualType ArgTy, 4844 QualType ParamTy) { 4845 4846 // If a function accepts a pointer or reference type 4847 if (!ParamTy->isPointerType() && !ParamTy->isReferenceType()) 4848 return; 4849 4850 // If the parameter is a pointer type, get the pointee type for the 4851 // argument too. If the parameter is a reference type, don't try to get 4852 // the pointee type for the argument. 4853 if (ParamTy->isPointerType()) 4854 ArgTy = ArgTy->getPointeeType(); 4855 4856 // Remove reference or pointer 4857 ParamTy = ParamTy->getPointeeType(); 4858 4859 // Find expected alignment, and the actual alignment of the passed object. 4860 // getTypeAlignInChars requires complete types 4861 if (ArgTy.isNull() || ParamTy->isIncompleteType() || 4862 ArgTy->isIncompleteType() || ParamTy->isUndeducedType() || 4863 ArgTy->isUndeducedType()) 4864 return; 4865 4866 CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy); 4867 CharUnits ArgAlign = Context.getTypeAlignInChars(ArgTy); 4868 4869 // If the argument is less aligned than the parameter, there is a 4870 // potential alignment issue. 4871 if (ArgAlign < ParamAlign) 4872 Diag(Loc, diag::warn_param_mismatched_alignment) 4873 << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity() 4874 << ParamName << FDecl; 4875 } 4876 4877 /// Handles the checks for format strings, non-POD arguments to vararg 4878 /// functions, NULL arguments passed to non-NULL parameters, and diagnose_if 4879 /// attributes. 4880 void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 4881 const Expr *ThisArg, ArrayRef<const Expr *> Args, 4882 bool IsMemberFunction, SourceLocation Loc, 4883 SourceRange Range, VariadicCallType CallType) { 4884 // FIXME: We should check as much as we can in the template definition. 4885 if (CurContext->isDependentContext()) 4886 return; 4887 4888 // Printf and scanf checking. 4889 llvm::SmallBitVector CheckedVarArgs; 4890 if (FDecl) { 4891 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 4892 // Only create vector if there are format attributes. 4893 CheckedVarArgs.resize(Args.size()); 4894 4895 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 4896 CheckedVarArgs); 4897 } 4898 } 4899 4900 // Refuse POD arguments that weren't caught by the format string 4901 // checks above. 4902 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 4903 if (CallType != VariadicDoesNotApply && 4904 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 4905 unsigned NumParams = Proto ? Proto->getNumParams() 4906 : FDecl && isa<FunctionDecl>(FDecl) 4907 ? cast<FunctionDecl>(FDecl)->getNumParams() 4908 : FDecl && isa<ObjCMethodDecl>(FDecl) 4909 ? cast<ObjCMethodDecl>(FDecl)->param_size() 4910 : 0; 4911 4912 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 4913 // Args[ArgIdx] can be null in malformed code. 4914 if (const Expr *Arg = Args[ArgIdx]) { 4915 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 4916 checkVariadicArgument(Arg, CallType); 4917 } 4918 } 4919 } 4920 4921 if (FDecl || Proto) { 4922 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 4923 4924 // Type safety checking. 4925 if (FDecl) { 4926 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 4927 CheckArgumentWithTypeTag(I, Args, Loc); 4928 } 4929 } 4930 4931 // Check that passed arguments match the alignment of original arguments. 4932 // Try to get the missing prototype from the declaration. 4933 if (!Proto && FDecl) { 4934 const auto *FT = FDecl->getFunctionType(); 4935 if (isa_and_nonnull<FunctionProtoType>(FT)) 4936 Proto = cast<FunctionProtoType>(FDecl->getFunctionType()); 4937 } 4938 if (Proto) { 4939 // For variadic functions, we may have more args than parameters. 4940 // For some K&R functions, we may have less args than parameters. 4941 const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size()); 4942 for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) { 4943 // Args[ArgIdx] can be null in malformed code. 4944 if (const Expr *Arg = Args[ArgIdx]) { 4945 if (Arg->containsErrors()) 4946 continue; 4947 4948 QualType ParamTy = Proto->getParamType(ArgIdx); 4949 QualType ArgTy = Arg->getType(); 4950 CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1), 4951 ArgTy, ParamTy); 4952 } 4953 } 4954 } 4955 4956 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) { 4957 auto *AA = FDecl->getAttr<AllocAlignAttr>(); 4958 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()]; 4959 if (!Arg->isValueDependent()) { 4960 Expr::EvalResult Align; 4961 if (Arg->EvaluateAsInt(Align, Context)) { 4962 const llvm::APSInt &I = Align.Val.getInt(); 4963 if (!I.isPowerOf2()) 4964 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two) 4965 << Arg->getSourceRange(); 4966 4967 if (I > Sema::MaximumAlignment) 4968 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great) 4969 << Arg->getSourceRange() << Sema::MaximumAlignment; 4970 } 4971 } 4972 } 4973 4974 if (FD) 4975 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 4976 } 4977 4978 /// CheckConstructorCall - Check a constructor call for correctness and safety 4979 /// properties not enforced by the C type system. 4980 void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, 4981 ArrayRef<const Expr *> Args, 4982 const FunctionProtoType *Proto, 4983 SourceLocation Loc) { 4984 VariadicCallType CallType = 4985 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 4986 4987 auto *Ctor = cast<CXXConstructorDecl>(FDecl); 4988 CheckArgAlignment(Loc, FDecl, "'this'", Context.getPointerType(ThisType), 4989 Context.getPointerType(Ctor->getThisObjectType())); 4990 4991 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 4992 Loc, SourceRange(), CallType); 4993 } 4994 4995 /// CheckFunctionCall - Check a direct function call for various correctness 4996 /// and safety properties not strictly enforced by the C type system. 4997 bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 4998 const FunctionProtoType *Proto) { 4999 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 5000 isa<CXXMethodDecl>(FDecl); 5001 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 5002 IsMemberOperatorCall; 5003 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 5004 TheCall->getCallee()); 5005 Expr** Args = TheCall->getArgs(); 5006 unsigned NumArgs = TheCall->getNumArgs(); 5007 5008 Expr *ImplicitThis = nullptr; 5009 if (IsMemberOperatorCall) { 5010 // If this is a call to a member operator, hide the first argument 5011 // from checkCall. 5012 // FIXME: Our choice of AST representation here is less than ideal. 5013 ImplicitThis = Args[0]; 5014 ++Args; 5015 --NumArgs; 5016 } else if (IsMemberFunction) 5017 ImplicitThis = 5018 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 5019 5020 if (ImplicitThis) { 5021 // ImplicitThis may or may not be a pointer, depending on whether . or -> is 5022 // used. 5023 QualType ThisType = ImplicitThis->getType(); 5024 if (!ThisType->isPointerType()) { 5025 assert(!ThisType->isReferenceType()); 5026 ThisType = Context.getPointerType(ThisType); 5027 } 5028 5029 QualType ThisTypeFromDecl = 5030 Context.getPointerType(cast<CXXMethodDecl>(FDecl)->getThisObjectType()); 5031 5032 CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType, 5033 ThisTypeFromDecl); 5034 } 5035 5036 checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs), 5037 IsMemberFunction, TheCall->getRParenLoc(), 5038 TheCall->getCallee()->getSourceRange(), CallType); 5039 5040 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 5041 // None of the checks below are needed for functions that don't have 5042 // simple names (e.g., C++ conversion functions). 5043 if (!FnInfo) 5044 return false; 5045 5046 CheckTCBEnforcement(TheCall, FDecl); 5047 5048 CheckAbsoluteValueFunction(TheCall, FDecl); 5049 CheckMaxUnsignedZero(TheCall, FDecl); 5050 5051 if (getLangOpts().ObjC) 5052 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 5053 5054 unsigned CMId = FDecl->getMemoryFunctionKind(); 5055 5056 // Handle memory setting and copying functions. 5057 switch (CMId) { 5058 case 0: 5059 return false; 5060 case Builtin::BIstrlcpy: // fallthrough 5061 case Builtin::BIstrlcat: 5062 CheckStrlcpycatArguments(TheCall, FnInfo); 5063 break; 5064 case Builtin::BIstrncat: 5065 CheckStrncatArguments(TheCall, FnInfo); 5066 break; 5067 case Builtin::BIfree: 5068 CheckFreeArguments(TheCall); 5069 break; 5070 default: 5071 CheckMemaccessArguments(TheCall, CMId, FnInfo); 5072 } 5073 5074 return false; 5075 } 5076 5077 bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 5078 ArrayRef<const Expr *> Args) { 5079 VariadicCallType CallType = 5080 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 5081 5082 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 5083 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 5084 CallType); 5085 5086 return false; 5087 } 5088 5089 bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 5090 const FunctionProtoType *Proto) { 5091 QualType Ty; 5092 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 5093 Ty = V->getType().getNonReferenceType(); 5094 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 5095 Ty = F->getType().getNonReferenceType(); 5096 else 5097 return false; 5098 5099 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 5100 !Ty->isFunctionProtoType()) 5101 return false; 5102 5103 VariadicCallType CallType; 5104 if (!Proto || !Proto->isVariadic()) { 5105 CallType = VariadicDoesNotApply; 5106 } else if (Ty->isBlockPointerType()) { 5107 CallType = VariadicBlock; 5108 } else { // Ty->isFunctionPointerType() 5109 CallType = VariadicFunction; 5110 } 5111 5112 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 5113 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 5114 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 5115 TheCall->getCallee()->getSourceRange(), CallType); 5116 5117 return false; 5118 } 5119 5120 /// Checks function calls when a FunctionDecl or a NamedDecl is not available, 5121 /// such as function pointers returned from functions. 5122 bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 5123 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 5124 TheCall->getCallee()); 5125 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 5126 llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 5127 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 5128 TheCall->getCallee()->getSourceRange(), CallType); 5129 5130 return false; 5131 } 5132 5133 static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 5134 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 5135 return false; 5136 5137 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 5138 switch (Op) { 5139 case AtomicExpr::AO__c11_atomic_init: 5140 case AtomicExpr::AO__opencl_atomic_init: 5141 llvm_unreachable("There is no ordering argument for an init"); 5142 5143 case AtomicExpr::AO__c11_atomic_load: 5144 case AtomicExpr::AO__opencl_atomic_load: 5145 case AtomicExpr::AO__atomic_load_n: 5146 case AtomicExpr::AO__atomic_load: 5147 return OrderingCABI != llvm::AtomicOrderingCABI::release && 5148 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 5149 5150 case AtomicExpr::AO__c11_atomic_store: 5151 case AtomicExpr::AO__opencl_atomic_store: 5152 case AtomicExpr::AO__atomic_store: 5153 case AtomicExpr::AO__atomic_store_n: 5154 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 5155 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 5156 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 5157 5158 default: 5159 return true; 5160 } 5161 } 5162 5163 ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 5164 AtomicExpr::AtomicOp Op) { 5165 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 5166 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 5167 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()}; 5168 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()}, 5169 DRE->getSourceRange(), TheCall->getRParenLoc(), Args, 5170 Op); 5171 } 5172 5173 ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, 5174 SourceLocation RParenLoc, MultiExprArg Args, 5175 AtomicExpr::AtomicOp Op, 5176 AtomicArgumentOrder ArgOrder) { 5177 // All the non-OpenCL operations take one of the following forms. 5178 // The OpenCL operations take the __c11 forms with one extra argument for 5179 // synchronization scope. 5180 enum { 5181 // C __c11_atomic_init(A *, C) 5182 Init, 5183 5184 // C __c11_atomic_load(A *, int) 5185 Load, 5186 5187 // void __atomic_load(A *, CP, int) 5188 LoadCopy, 5189 5190 // void __atomic_store(A *, CP, int) 5191 Copy, 5192 5193 // C __c11_atomic_add(A *, M, int) 5194 Arithmetic, 5195 5196 // C __atomic_exchange_n(A *, CP, int) 5197 Xchg, 5198 5199 // void __atomic_exchange(A *, C *, CP, int) 5200 GNUXchg, 5201 5202 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 5203 C11CmpXchg, 5204 5205 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 5206 GNUCmpXchg 5207 } Form = Init; 5208 5209 const unsigned NumForm = GNUCmpXchg + 1; 5210 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 5211 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 5212 // where: 5213 // C is an appropriate type, 5214 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 5215 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 5216 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 5217 // the int parameters are for orderings. 5218 5219 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 5220 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 5221 "need to update code for modified forms"); 5222 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 5223 AtomicExpr::AO__c11_atomic_fetch_min + 1 == 5224 AtomicExpr::AO__atomic_load, 5225 "need to update code for modified C11 atomics"); 5226 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 5227 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 5228 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 5229 Op <= AtomicExpr::AO__c11_atomic_fetch_min) || 5230 IsOpenCL; 5231 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 5232 Op == AtomicExpr::AO__atomic_store_n || 5233 Op == AtomicExpr::AO__atomic_exchange_n || 5234 Op == AtomicExpr::AO__atomic_compare_exchange_n; 5235 bool IsAddSub = false; 5236 5237 switch (Op) { 5238 case AtomicExpr::AO__c11_atomic_init: 5239 case AtomicExpr::AO__opencl_atomic_init: 5240 Form = Init; 5241 break; 5242 5243 case AtomicExpr::AO__c11_atomic_load: 5244 case AtomicExpr::AO__opencl_atomic_load: 5245 case AtomicExpr::AO__atomic_load_n: 5246 Form = Load; 5247 break; 5248 5249 case AtomicExpr::AO__atomic_load: 5250 Form = LoadCopy; 5251 break; 5252 5253 case AtomicExpr::AO__c11_atomic_store: 5254 case AtomicExpr::AO__opencl_atomic_store: 5255 case AtomicExpr::AO__atomic_store: 5256 case AtomicExpr::AO__atomic_store_n: 5257 Form = Copy; 5258 break; 5259 5260 case AtomicExpr::AO__c11_atomic_fetch_add: 5261 case AtomicExpr::AO__c11_atomic_fetch_sub: 5262 case AtomicExpr::AO__opencl_atomic_fetch_add: 5263 case AtomicExpr::AO__opencl_atomic_fetch_sub: 5264 case AtomicExpr::AO__atomic_fetch_add: 5265 case AtomicExpr::AO__atomic_fetch_sub: 5266 case AtomicExpr::AO__atomic_add_fetch: 5267 case AtomicExpr::AO__atomic_sub_fetch: 5268 IsAddSub = true; 5269 Form = Arithmetic; 5270 break; 5271 case AtomicExpr::AO__c11_atomic_fetch_and: 5272 case AtomicExpr::AO__c11_atomic_fetch_or: 5273 case AtomicExpr::AO__c11_atomic_fetch_xor: 5274 case AtomicExpr::AO__opencl_atomic_fetch_and: 5275 case AtomicExpr::AO__opencl_atomic_fetch_or: 5276 case AtomicExpr::AO__opencl_atomic_fetch_xor: 5277 case AtomicExpr::AO__atomic_fetch_and: 5278 case AtomicExpr::AO__atomic_fetch_or: 5279 case AtomicExpr::AO__atomic_fetch_xor: 5280 case AtomicExpr::AO__atomic_fetch_nand: 5281 case AtomicExpr::AO__atomic_and_fetch: 5282 case AtomicExpr::AO__atomic_or_fetch: 5283 case AtomicExpr::AO__atomic_xor_fetch: 5284 case AtomicExpr::AO__atomic_nand_fetch: 5285 Form = Arithmetic; 5286 break; 5287 case AtomicExpr::AO__c11_atomic_fetch_min: 5288 case AtomicExpr::AO__c11_atomic_fetch_max: 5289 case AtomicExpr::AO__opencl_atomic_fetch_min: 5290 case AtomicExpr::AO__opencl_atomic_fetch_max: 5291 case AtomicExpr::AO__atomic_min_fetch: 5292 case AtomicExpr::AO__atomic_max_fetch: 5293 case AtomicExpr::AO__atomic_fetch_min: 5294 case AtomicExpr::AO__atomic_fetch_max: 5295 Form = Arithmetic; 5296 break; 5297 5298 case AtomicExpr::AO__c11_atomic_exchange: 5299 case AtomicExpr::AO__opencl_atomic_exchange: 5300 case AtomicExpr::AO__atomic_exchange_n: 5301 Form = Xchg; 5302 break; 5303 5304 case AtomicExpr::AO__atomic_exchange: 5305 Form = GNUXchg; 5306 break; 5307 5308 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 5309 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 5310 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 5311 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 5312 Form = C11CmpXchg; 5313 break; 5314 5315 case AtomicExpr::AO__atomic_compare_exchange: 5316 case AtomicExpr::AO__atomic_compare_exchange_n: 5317 Form = GNUCmpXchg; 5318 break; 5319 } 5320 5321 unsigned AdjustedNumArgs = NumArgs[Form]; 5322 if (IsOpenCL && Op != AtomicExpr::AO__opencl_atomic_init) 5323 ++AdjustedNumArgs; 5324 // Check we have the right number of arguments. 5325 if (Args.size() < AdjustedNumArgs) { 5326 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args) 5327 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 5328 << ExprRange; 5329 return ExprError(); 5330 } else if (Args.size() > AdjustedNumArgs) { 5331 Diag(Args[AdjustedNumArgs]->getBeginLoc(), 5332 diag::err_typecheck_call_too_many_args) 5333 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 5334 << ExprRange; 5335 return ExprError(); 5336 } 5337 5338 // Inspect the first argument of the atomic operation. 5339 Expr *Ptr = Args[0]; 5340 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 5341 if (ConvertedPtr.isInvalid()) 5342 return ExprError(); 5343 5344 Ptr = ConvertedPtr.get(); 5345 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 5346 if (!pointerType) { 5347 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer) 5348 << Ptr->getType() << Ptr->getSourceRange(); 5349 return ExprError(); 5350 } 5351 5352 // For a __c11 builtin, this should be a pointer to an _Atomic type. 5353 QualType AtomTy = pointerType->getPointeeType(); // 'A' 5354 QualType ValType = AtomTy; // 'C' 5355 if (IsC11) { 5356 if (!AtomTy->isAtomicType()) { 5357 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic) 5358 << Ptr->getType() << Ptr->getSourceRange(); 5359 return ExprError(); 5360 } 5361 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 5362 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 5363 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic) 5364 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 5365 << Ptr->getSourceRange(); 5366 return ExprError(); 5367 } 5368 ValType = AtomTy->castAs<AtomicType>()->getValueType(); 5369 } else if (Form != Load && Form != LoadCopy) { 5370 if (ValType.isConstQualified()) { 5371 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer) 5372 << Ptr->getType() << Ptr->getSourceRange(); 5373 return ExprError(); 5374 } 5375 } 5376 5377 // For an arithmetic operation, the implied arithmetic must be well-formed. 5378 if (Form == Arithmetic) { 5379 // gcc does not enforce these rules for GNU atomics, but we do so for 5380 // sanity. 5381 auto IsAllowedValueType = [&](QualType ValType) { 5382 if (ValType->isIntegerType()) 5383 return true; 5384 if (ValType->isPointerType()) 5385 return true; 5386 if (!ValType->isFloatingType()) 5387 return false; 5388 // LLVM Parser does not allow atomicrmw with x86_fp80 type. 5389 if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) && 5390 &Context.getTargetInfo().getLongDoubleFormat() == 5391 &llvm::APFloat::x87DoubleExtended()) 5392 return false; 5393 return true; 5394 }; 5395 if (IsAddSub && !IsAllowedValueType(ValType)) { 5396 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_ptr_or_fp) 5397 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 5398 return ExprError(); 5399 } 5400 if (!IsAddSub && !ValType->isIntegerType()) { 5401 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int) 5402 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 5403 return ExprError(); 5404 } 5405 if (IsC11 && ValType->isPointerType() && 5406 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 5407 diag::err_incomplete_type)) { 5408 return ExprError(); 5409 } 5410 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 5411 // For __atomic_*_n operations, the value type must be a scalar integral or 5412 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 5413 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 5414 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 5415 return ExprError(); 5416 } 5417 5418 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 5419 !AtomTy->isScalarType()) { 5420 // For GNU atomics, require a trivially-copyable type. This is not part of 5421 // the GNU atomics specification, but we enforce it for sanity. 5422 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy) 5423 << Ptr->getType() << Ptr->getSourceRange(); 5424 return ExprError(); 5425 } 5426 5427 switch (ValType.getObjCLifetime()) { 5428 case Qualifiers::OCL_None: 5429 case Qualifiers::OCL_ExplicitNone: 5430 // okay 5431 break; 5432 5433 case Qualifiers::OCL_Weak: 5434 case Qualifiers::OCL_Strong: 5435 case Qualifiers::OCL_Autoreleasing: 5436 // FIXME: Can this happen? By this point, ValType should be known 5437 // to be trivially copyable. 5438 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership) 5439 << ValType << Ptr->getSourceRange(); 5440 return ExprError(); 5441 } 5442 5443 // All atomic operations have an overload which takes a pointer to a volatile 5444 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 5445 // into the result or the other operands. Similarly atomic_load takes a 5446 // pointer to a const 'A'. 5447 ValType.removeLocalVolatile(); 5448 ValType.removeLocalConst(); 5449 QualType ResultType = ValType; 5450 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 5451 Form == Init) 5452 ResultType = Context.VoidTy; 5453 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 5454 ResultType = Context.BoolTy; 5455 5456 // The type of a parameter passed 'by value'. In the GNU atomics, such 5457 // arguments are actually passed as pointers. 5458 QualType ByValType = ValType; // 'CP' 5459 bool IsPassedByAddress = false; 5460 if (!IsC11 && !IsN) { 5461 ByValType = Ptr->getType(); 5462 IsPassedByAddress = true; 5463 } 5464 5465 SmallVector<Expr *, 5> APIOrderedArgs; 5466 if (ArgOrder == Sema::AtomicArgumentOrder::AST) { 5467 APIOrderedArgs.push_back(Args[0]); 5468 switch (Form) { 5469 case Init: 5470 case Load: 5471 APIOrderedArgs.push_back(Args[1]); // Val1/Order 5472 break; 5473 case LoadCopy: 5474 case Copy: 5475 case Arithmetic: 5476 case Xchg: 5477 APIOrderedArgs.push_back(Args[2]); // Val1 5478 APIOrderedArgs.push_back(Args[1]); // Order 5479 break; 5480 case GNUXchg: 5481 APIOrderedArgs.push_back(Args[2]); // Val1 5482 APIOrderedArgs.push_back(Args[3]); // Val2 5483 APIOrderedArgs.push_back(Args[1]); // Order 5484 break; 5485 case C11CmpXchg: 5486 APIOrderedArgs.push_back(Args[2]); // Val1 5487 APIOrderedArgs.push_back(Args[4]); // Val2 5488 APIOrderedArgs.push_back(Args[1]); // Order 5489 APIOrderedArgs.push_back(Args[3]); // OrderFail 5490 break; 5491 case GNUCmpXchg: 5492 APIOrderedArgs.push_back(Args[2]); // Val1 5493 APIOrderedArgs.push_back(Args[4]); // Val2 5494 APIOrderedArgs.push_back(Args[5]); // Weak 5495 APIOrderedArgs.push_back(Args[1]); // Order 5496 APIOrderedArgs.push_back(Args[3]); // OrderFail 5497 break; 5498 } 5499 } else 5500 APIOrderedArgs.append(Args.begin(), Args.end()); 5501 5502 // The first argument's non-CV pointer type is used to deduce the type of 5503 // subsequent arguments, except for: 5504 // - weak flag (always converted to bool) 5505 // - memory order (always converted to int) 5506 // - scope (always converted to int) 5507 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) { 5508 QualType Ty; 5509 if (i < NumVals[Form] + 1) { 5510 switch (i) { 5511 case 0: 5512 // The first argument is always a pointer. It has a fixed type. 5513 // It is always dereferenced, a nullptr is undefined. 5514 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 5515 // Nothing else to do: we already know all we want about this pointer. 5516 continue; 5517 case 1: 5518 // The second argument is the non-atomic operand. For arithmetic, this 5519 // is always passed by value, and for a compare_exchange it is always 5520 // passed by address. For the rest, GNU uses by-address and C11 uses 5521 // by-value. 5522 assert(Form != Load); 5523 if (Form == Arithmetic && ValType->isPointerType()) 5524 Ty = Context.getPointerDiffType(); 5525 else if (Form == Init || Form == Arithmetic) 5526 Ty = ValType; 5527 else if (Form == Copy || Form == Xchg) { 5528 if (IsPassedByAddress) { 5529 // The value pointer is always dereferenced, a nullptr is undefined. 5530 CheckNonNullArgument(*this, APIOrderedArgs[i], 5531 ExprRange.getBegin()); 5532 } 5533 Ty = ByValType; 5534 } else { 5535 Expr *ValArg = APIOrderedArgs[i]; 5536 // The value pointer is always dereferenced, a nullptr is undefined. 5537 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin()); 5538 LangAS AS = LangAS::Default; 5539 // Keep address space of non-atomic pointer type. 5540 if (const PointerType *PtrTy = 5541 ValArg->getType()->getAs<PointerType>()) { 5542 AS = PtrTy->getPointeeType().getAddressSpace(); 5543 } 5544 Ty = Context.getPointerType( 5545 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 5546 } 5547 break; 5548 case 2: 5549 // The third argument to compare_exchange / GNU exchange is the desired 5550 // value, either by-value (for the C11 and *_n variant) or as a pointer. 5551 if (IsPassedByAddress) 5552 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 5553 Ty = ByValType; 5554 break; 5555 case 3: 5556 // The fourth argument to GNU compare_exchange is a 'weak' flag. 5557 Ty = Context.BoolTy; 5558 break; 5559 } 5560 } else { 5561 // The order(s) and scope are always converted to int. 5562 Ty = Context.IntTy; 5563 } 5564 5565 InitializedEntity Entity = 5566 InitializedEntity::InitializeParameter(Context, Ty, false); 5567 ExprResult Arg = APIOrderedArgs[i]; 5568 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 5569 if (Arg.isInvalid()) 5570 return true; 5571 APIOrderedArgs[i] = Arg.get(); 5572 } 5573 5574 // Permute the arguments into a 'consistent' order. 5575 SmallVector<Expr*, 5> SubExprs; 5576 SubExprs.push_back(Ptr); 5577 switch (Form) { 5578 case Init: 5579 // Note, AtomicExpr::getVal1() has a special case for this atomic. 5580 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5581 break; 5582 case Load: 5583 SubExprs.push_back(APIOrderedArgs[1]); // Order 5584 break; 5585 case LoadCopy: 5586 case Copy: 5587 case Arithmetic: 5588 case Xchg: 5589 SubExprs.push_back(APIOrderedArgs[2]); // Order 5590 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5591 break; 5592 case GNUXchg: 5593 // Note, AtomicExpr::getVal2() has a special case for this atomic. 5594 SubExprs.push_back(APIOrderedArgs[3]); // Order 5595 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5596 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5597 break; 5598 case C11CmpXchg: 5599 SubExprs.push_back(APIOrderedArgs[3]); // Order 5600 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5601 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail 5602 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5603 break; 5604 case GNUCmpXchg: 5605 SubExprs.push_back(APIOrderedArgs[4]); // Order 5606 SubExprs.push_back(APIOrderedArgs[1]); // Val1 5607 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail 5608 SubExprs.push_back(APIOrderedArgs[2]); // Val2 5609 SubExprs.push_back(APIOrderedArgs[3]); // Weak 5610 break; 5611 } 5612 5613 if (SubExprs.size() >= 2 && Form != Init) { 5614 if (Optional<llvm::APSInt> Result = 5615 SubExprs[1]->getIntegerConstantExpr(Context)) 5616 if (!isValidOrderingForOp(Result->getSExtValue(), Op)) 5617 Diag(SubExprs[1]->getBeginLoc(), 5618 diag::warn_atomic_op_has_invalid_memory_order) 5619 << SubExprs[1]->getSourceRange(); 5620 } 5621 5622 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 5623 auto *Scope = Args[Args.size() - 1]; 5624 if (Optional<llvm::APSInt> Result = 5625 Scope->getIntegerConstantExpr(Context)) { 5626 if (!ScopeModel->isValid(Result->getZExtValue())) 5627 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 5628 << Scope->getSourceRange(); 5629 } 5630 SubExprs.push_back(Scope); 5631 } 5632 5633 AtomicExpr *AE = new (Context) 5634 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc); 5635 5636 if ((Op == AtomicExpr::AO__c11_atomic_load || 5637 Op == AtomicExpr::AO__c11_atomic_store || 5638 Op == AtomicExpr::AO__opencl_atomic_load || 5639 Op == AtomicExpr::AO__opencl_atomic_store ) && 5640 Context.AtomicUsesUnsupportedLibcall(AE)) 5641 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 5642 << ((Op == AtomicExpr::AO__c11_atomic_load || 5643 Op == AtomicExpr::AO__opencl_atomic_load) 5644 ? 0 5645 : 1); 5646 5647 if (ValType->isExtIntType()) { 5648 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_ext_int_prohibit); 5649 return ExprError(); 5650 } 5651 5652 return AE; 5653 } 5654 5655 /// checkBuiltinArgument - Given a call to a builtin function, perform 5656 /// normal type-checking on the given argument, updating the call in 5657 /// place. This is useful when a builtin function requires custom 5658 /// type-checking for some of its arguments but not necessarily all of 5659 /// them. 5660 /// 5661 /// Returns true on error. 5662 static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 5663 FunctionDecl *Fn = E->getDirectCallee(); 5664 assert(Fn && "builtin call without direct callee!"); 5665 5666 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 5667 InitializedEntity Entity = 5668 InitializedEntity::InitializeParameter(S.Context, Param); 5669 5670 ExprResult Arg = E->getArg(0); 5671 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 5672 if (Arg.isInvalid()) 5673 return true; 5674 5675 E->setArg(ArgIndex, Arg.get()); 5676 return false; 5677 } 5678 5679 /// We have a call to a function like __sync_fetch_and_add, which is an 5680 /// overloaded function based on the pointer type of its first argument. 5681 /// The main BuildCallExpr routines have already promoted the types of 5682 /// arguments because all of these calls are prototyped as void(...). 5683 /// 5684 /// This function goes through and does final semantic checking for these 5685 /// builtins, as well as generating any warnings. 5686 ExprResult 5687 Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 5688 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 5689 Expr *Callee = TheCall->getCallee(); 5690 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 5691 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 5692 5693 // Ensure that we have at least one argument to do type inference from. 5694 if (TheCall->getNumArgs() < 1) { 5695 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 5696 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange(); 5697 return ExprError(); 5698 } 5699 5700 // Inspect the first argument of the atomic builtin. This should always be 5701 // a pointer type, whose element is an integral scalar or pointer type. 5702 // Because it is a pointer type, we don't have to worry about any implicit 5703 // casts here. 5704 // FIXME: We don't allow floating point scalars as input. 5705 Expr *FirstArg = TheCall->getArg(0); 5706 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 5707 if (FirstArgResult.isInvalid()) 5708 return ExprError(); 5709 FirstArg = FirstArgResult.get(); 5710 TheCall->setArg(0, FirstArg); 5711 5712 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 5713 if (!pointerType) { 5714 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 5715 << FirstArg->getType() << FirstArg->getSourceRange(); 5716 return ExprError(); 5717 } 5718 5719 QualType ValType = pointerType->getPointeeType(); 5720 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 5721 !ValType->isBlockPointerType()) { 5722 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 5723 << FirstArg->getType() << FirstArg->getSourceRange(); 5724 return ExprError(); 5725 } 5726 5727 if (ValType.isConstQualified()) { 5728 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 5729 << FirstArg->getType() << FirstArg->getSourceRange(); 5730 return ExprError(); 5731 } 5732 5733 switch (ValType.getObjCLifetime()) { 5734 case Qualifiers::OCL_None: 5735 case Qualifiers::OCL_ExplicitNone: 5736 // okay 5737 break; 5738 5739 case Qualifiers::OCL_Weak: 5740 case Qualifiers::OCL_Strong: 5741 case Qualifiers::OCL_Autoreleasing: 5742 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 5743 << ValType << FirstArg->getSourceRange(); 5744 return ExprError(); 5745 } 5746 5747 // Strip any qualifiers off ValType. 5748 ValType = ValType.getUnqualifiedType(); 5749 5750 // The majority of builtins return a value, but a few have special return 5751 // types, so allow them to override appropriately below. 5752 QualType ResultType = ValType; 5753 5754 // We need to figure out which concrete builtin this maps onto. For example, 5755 // __sync_fetch_and_add with a 2 byte object turns into 5756 // __sync_fetch_and_add_2. 5757 #define BUILTIN_ROW(x) \ 5758 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 5759 Builtin::BI##x##_8, Builtin::BI##x##_16 } 5760 5761 static const unsigned BuiltinIndices[][5] = { 5762 BUILTIN_ROW(__sync_fetch_and_add), 5763 BUILTIN_ROW(__sync_fetch_and_sub), 5764 BUILTIN_ROW(__sync_fetch_and_or), 5765 BUILTIN_ROW(__sync_fetch_and_and), 5766 BUILTIN_ROW(__sync_fetch_and_xor), 5767 BUILTIN_ROW(__sync_fetch_and_nand), 5768 5769 BUILTIN_ROW(__sync_add_and_fetch), 5770 BUILTIN_ROW(__sync_sub_and_fetch), 5771 BUILTIN_ROW(__sync_and_and_fetch), 5772 BUILTIN_ROW(__sync_or_and_fetch), 5773 BUILTIN_ROW(__sync_xor_and_fetch), 5774 BUILTIN_ROW(__sync_nand_and_fetch), 5775 5776 BUILTIN_ROW(__sync_val_compare_and_swap), 5777 BUILTIN_ROW(__sync_bool_compare_and_swap), 5778 BUILTIN_ROW(__sync_lock_test_and_set), 5779 BUILTIN_ROW(__sync_lock_release), 5780 BUILTIN_ROW(__sync_swap) 5781 }; 5782 #undef BUILTIN_ROW 5783 5784 // Determine the index of the size. 5785 unsigned SizeIndex; 5786 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 5787 case 1: SizeIndex = 0; break; 5788 case 2: SizeIndex = 1; break; 5789 case 4: SizeIndex = 2; break; 5790 case 8: SizeIndex = 3; break; 5791 case 16: SizeIndex = 4; break; 5792 default: 5793 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 5794 << FirstArg->getType() << FirstArg->getSourceRange(); 5795 return ExprError(); 5796 } 5797 5798 // Each of these builtins has one pointer argument, followed by some number of 5799 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 5800 // that we ignore. Find out which row of BuiltinIndices to read from as well 5801 // as the number of fixed args. 5802 unsigned BuiltinID = FDecl->getBuiltinID(); 5803 unsigned BuiltinIndex, NumFixed = 1; 5804 bool WarnAboutSemanticsChange = false; 5805 switch (BuiltinID) { 5806 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 5807 case Builtin::BI__sync_fetch_and_add: 5808 case Builtin::BI__sync_fetch_and_add_1: 5809 case Builtin::BI__sync_fetch_and_add_2: 5810 case Builtin::BI__sync_fetch_and_add_4: 5811 case Builtin::BI__sync_fetch_and_add_8: 5812 case Builtin::BI__sync_fetch_and_add_16: 5813 BuiltinIndex = 0; 5814 break; 5815 5816 case Builtin::BI__sync_fetch_and_sub: 5817 case Builtin::BI__sync_fetch_and_sub_1: 5818 case Builtin::BI__sync_fetch_and_sub_2: 5819 case Builtin::BI__sync_fetch_and_sub_4: 5820 case Builtin::BI__sync_fetch_and_sub_8: 5821 case Builtin::BI__sync_fetch_and_sub_16: 5822 BuiltinIndex = 1; 5823 break; 5824 5825 case Builtin::BI__sync_fetch_and_or: 5826 case Builtin::BI__sync_fetch_and_or_1: 5827 case Builtin::BI__sync_fetch_and_or_2: 5828 case Builtin::BI__sync_fetch_and_or_4: 5829 case Builtin::BI__sync_fetch_and_or_8: 5830 case Builtin::BI__sync_fetch_and_or_16: 5831 BuiltinIndex = 2; 5832 break; 5833 5834 case Builtin::BI__sync_fetch_and_and: 5835 case Builtin::BI__sync_fetch_and_and_1: 5836 case Builtin::BI__sync_fetch_and_and_2: 5837 case Builtin::BI__sync_fetch_and_and_4: 5838 case Builtin::BI__sync_fetch_and_and_8: 5839 case Builtin::BI__sync_fetch_and_and_16: 5840 BuiltinIndex = 3; 5841 break; 5842 5843 case Builtin::BI__sync_fetch_and_xor: 5844 case Builtin::BI__sync_fetch_and_xor_1: 5845 case Builtin::BI__sync_fetch_and_xor_2: 5846 case Builtin::BI__sync_fetch_and_xor_4: 5847 case Builtin::BI__sync_fetch_and_xor_8: 5848 case Builtin::BI__sync_fetch_and_xor_16: 5849 BuiltinIndex = 4; 5850 break; 5851 5852 case Builtin::BI__sync_fetch_and_nand: 5853 case Builtin::BI__sync_fetch_and_nand_1: 5854 case Builtin::BI__sync_fetch_and_nand_2: 5855 case Builtin::BI__sync_fetch_and_nand_4: 5856 case Builtin::BI__sync_fetch_and_nand_8: 5857 case Builtin::BI__sync_fetch_and_nand_16: 5858 BuiltinIndex = 5; 5859 WarnAboutSemanticsChange = true; 5860 break; 5861 5862 case Builtin::BI__sync_add_and_fetch: 5863 case Builtin::BI__sync_add_and_fetch_1: 5864 case Builtin::BI__sync_add_and_fetch_2: 5865 case Builtin::BI__sync_add_and_fetch_4: 5866 case Builtin::BI__sync_add_and_fetch_8: 5867 case Builtin::BI__sync_add_and_fetch_16: 5868 BuiltinIndex = 6; 5869 break; 5870 5871 case Builtin::BI__sync_sub_and_fetch: 5872 case Builtin::BI__sync_sub_and_fetch_1: 5873 case Builtin::BI__sync_sub_and_fetch_2: 5874 case Builtin::BI__sync_sub_and_fetch_4: 5875 case Builtin::BI__sync_sub_and_fetch_8: 5876 case Builtin::BI__sync_sub_and_fetch_16: 5877 BuiltinIndex = 7; 5878 break; 5879 5880 case Builtin::BI__sync_and_and_fetch: 5881 case Builtin::BI__sync_and_and_fetch_1: 5882 case Builtin::BI__sync_and_and_fetch_2: 5883 case Builtin::BI__sync_and_and_fetch_4: 5884 case Builtin::BI__sync_and_and_fetch_8: 5885 case Builtin::BI__sync_and_and_fetch_16: 5886 BuiltinIndex = 8; 5887 break; 5888 5889 case Builtin::BI__sync_or_and_fetch: 5890 case Builtin::BI__sync_or_and_fetch_1: 5891 case Builtin::BI__sync_or_and_fetch_2: 5892 case Builtin::BI__sync_or_and_fetch_4: 5893 case Builtin::BI__sync_or_and_fetch_8: 5894 case Builtin::BI__sync_or_and_fetch_16: 5895 BuiltinIndex = 9; 5896 break; 5897 5898 case Builtin::BI__sync_xor_and_fetch: 5899 case Builtin::BI__sync_xor_and_fetch_1: 5900 case Builtin::BI__sync_xor_and_fetch_2: 5901 case Builtin::BI__sync_xor_and_fetch_4: 5902 case Builtin::BI__sync_xor_and_fetch_8: 5903 case Builtin::BI__sync_xor_and_fetch_16: 5904 BuiltinIndex = 10; 5905 break; 5906 5907 case Builtin::BI__sync_nand_and_fetch: 5908 case Builtin::BI__sync_nand_and_fetch_1: 5909 case Builtin::BI__sync_nand_and_fetch_2: 5910 case Builtin::BI__sync_nand_and_fetch_4: 5911 case Builtin::BI__sync_nand_and_fetch_8: 5912 case Builtin::BI__sync_nand_and_fetch_16: 5913 BuiltinIndex = 11; 5914 WarnAboutSemanticsChange = true; 5915 break; 5916 5917 case Builtin::BI__sync_val_compare_and_swap: 5918 case Builtin::BI__sync_val_compare_and_swap_1: 5919 case Builtin::BI__sync_val_compare_and_swap_2: 5920 case Builtin::BI__sync_val_compare_and_swap_4: 5921 case Builtin::BI__sync_val_compare_and_swap_8: 5922 case Builtin::BI__sync_val_compare_and_swap_16: 5923 BuiltinIndex = 12; 5924 NumFixed = 2; 5925 break; 5926 5927 case Builtin::BI__sync_bool_compare_and_swap: 5928 case Builtin::BI__sync_bool_compare_and_swap_1: 5929 case Builtin::BI__sync_bool_compare_and_swap_2: 5930 case Builtin::BI__sync_bool_compare_and_swap_4: 5931 case Builtin::BI__sync_bool_compare_and_swap_8: 5932 case Builtin::BI__sync_bool_compare_and_swap_16: 5933 BuiltinIndex = 13; 5934 NumFixed = 2; 5935 ResultType = Context.BoolTy; 5936 break; 5937 5938 case Builtin::BI__sync_lock_test_and_set: 5939 case Builtin::BI__sync_lock_test_and_set_1: 5940 case Builtin::BI__sync_lock_test_and_set_2: 5941 case Builtin::BI__sync_lock_test_and_set_4: 5942 case Builtin::BI__sync_lock_test_and_set_8: 5943 case Builtin::BI__sync_lock_test_and_set_16: 5944 BuiltinIndex = 14; 5945 break; 5946 5947 case Builtin::BI__sync_lock_release: 5948 case Builtin::BI__sync_lock_release_1: 5949 case Builtin::BI__sync_lock_release_2: 5950 case Builtin::BI__sync_lock_release_4: 5951 case Builtin::BI__sync_lock_release_8: 5952 case Builtin::BI__sync_lock_release_16: 5953 BuiltinIndex = 15; 5954 NumFixed = 0; 5955 ResultType = Context.VoidTy; 5956 break; 5957 5958 case Builtin::BI__sync_swap: 5959 case Builtin::BI__sync_swap_1: 5960 case Builtin::BI__sync_swap_2: 5961 case Builtin::BI__sync_swap_4: 5962 case Builtin::BI__sync_swap_8: 5963 case Builtin::BI__sync_swap_16: 5964 BuiltinIndex = 16; 5965 break; 5966 } 5967 5968 // Now that we know how many fixed arguments we expect, first check that we 5969 // have at least that many. 5970 if (TheCall->getNumArgs() < 1+NumFixed) { 5971 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 5972 << 0 << 1 + NumFixed << TheCall->getNumArgs() 5973 << Callee->getSourceRange(); 5974 return ExprError(); 5975 } 5976 5977 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 5978 << Callee->getSourceRange(); 5979 5980 if (WarnAboutSemanticsChange) { 5981 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 5982 << Callee->getSourceRange(); 5983 } 5984 5985 // Get the decl for the concrete builtin from this, we can tell what the 5986 // concrete integer type we should convert to is. 5987 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 5988 const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 5989 FunctionDecl *NewBuiltinDecl; 5990 if (NewBuiltinID == BuiltinID) 5991 NewBuiltinDecl = FDecl; 5992 else { 5993 // Perform builtin lookup to avoid redeclaring it. 5994 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 5995 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 5996 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 5997 assert(Res.getFoundDecl()); 5998 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 5999 if (!NewBuiltinDecl) 6000 return ExprError(); 6001 } 6002 6003 // The first argument --- the pointer --- has a fixed type; we 6004 // deduce the types of the rest of the arguments accordingly. Walk 6005 // the remaining arguments, converting them to the deduced value type. 6006 for (unsigned i = 0; i != NumFixed; ++i) { 6007 ExprResult Arg = TheCall->getArg(i+1); 6008 6009 // GCC does an implicit conversion to the pointer or integer ValType. This 6010 // can fail in some cases (1i -> int**), check for this error case now. 6011 // Initialize the argument. 6012 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 6013 ValType, /*consume*/ false); 6014 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6015 if (Arg.isInvalid()) 6016 return ExprError(); 6017 6018 // Okay, we have something that *can* be converted to the right type. Check 6019 // to see if there is a potentially weird extension going on here. This can 6020 // happen when you do an atomic operation on something like an char* and 6021 // pass in 42. The 42 gets converted to char. This is even more strange 6022 // for things like 45.123 -> char, etc. 6023 // FIXME: Do this check. 6024 TheCall->setArg(i+1, Arg.get()); 6025 } 6026 6027 // Create a new DeclRefExpr to refer to the new decl. 6028 DeclRefExpr *NewDRE = DeclRefExpr::Create( 6029 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, 6030 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, 6031 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse()); 6032 6033 // Set the callee in the CallExpr. 6034 // FIXME: This loses syntactic information. 6035 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 6036 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 6037 CK_BuiltinFnToFnPtr); 6038 TheCall->setCallee(PromotedCall.get()); 6039 6040 // Change the result type of the call to match the original value type. This 6041 // is arbitrary, but the codegen for these builtins ins design to handle it 6042 // gracefully. 6043 TheCall->setType(ResultType); 6044 6045 // Prohibit use of _ExtInt with atomic builtins. 6046 // The arguments would have already been converted to the first argument's 6047 // type, so only need to check the first argument. 6048 const auto *ExtIntValType = ValType->getAs<ExtIntType>(); 6049 if (ExtIntValType && !llvm::isPowerOf2_64(ExtIntValType->getNumBits())) { 6050 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size); 6051 return ExprError(); 6052 } 6053 6054 return TheCallResult; 6055 } 6056 6057 /// SemaBuiltinNontemporalOverloaded - We have a call to 6058 /// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 6059 /// overloaded function based on the pointer type of its last argument. 6060 /// 6061 /// This function goes through and does final semantic checking for these 6062 /// builtins. 6063 ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 6064 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 6065 DeclRefExpr *DRE = 6066 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6067 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6068 unsigned BuiltinID = FDecl->getBuiltinID(); 6069 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 6070 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 6071 "Unexpected nontemporal load/store builtin!"); 6072 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 6073 unsigned numArgs = isStore ? 2 : 1; 6074 6075 // Ensure that we have the proper number of arguments. 6076 if (checkArgCount(*this, TheCall, numArgs)) 6077 return ExprError(); 6078 6079 // Inspect the last argument of the nontemporal builtin. This should always 6080 // be a pointer type, from which we imply the type of the memory access. 6081 // Because it is a pointer type, we don't have to worry about any implicit 6082 // casts here. 6083 Expr *PointerArg = TheCall->getArg(numArgs - 1); 6084 ExprResult PointerArgResult = 6085 DefaultFunctionArrayLvalueConversion(PointerArg); 6086 6087 if (PointerArgResult.isInvalid()) 6088 return ExprError(); 6089 PointerArg = PointerArgResult.get(); 6090 TheCall->setArg(numArgs - 1, PointerArg); 6091 6092 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 6093 if (!pointerType) { 6094 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 6095 << PointerArg->getType() << PointerArg->getSourceRange(); 6096 return ExprError(); 6097 } 6098 6099 QualType ValType = pointerType->getPointeeType(); 6100 6101 // Strip any qualifiers off ValType. 6102 ValType = ValType.getUnqualifiedType(); 6103 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 6104 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 6105 !ValType->isVectorType()) { 6106 Diag(DRE->getBeginLoc(), 6107 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 6108 << PointerArg->getType() << PointerArg->getSourceRange(); 6109 return ExprError(); 6110 } 6111 6112 if (!isStore) { 6113 TheCall->setType(ValType); 6114 return TheCallResult; 6115 } 6116 6117 ExprResult ValArg = TheCall->getArg(0); 6118 InitializedEntity Entity = InitializedEntity::InitializeParameter( 6119 Context, ValType, /*consume*/ false); 6120 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 6121 if (ValArg.isInvalid()) 6122 return ExprError(); 6123 6124 TheCall->setArg(0, ValArg.get()); 6125 TheCall->setType(Context.VoidTy); 6126 return TheCallResult; 6127 } 6128 6129 /// CheckObjCString - Checks that the argument to the builtin 6130 /// CFString constructor is correct 6131 /// Note: It might also make sense to do the UTF-16 conversion here (would 6132 /// simplify the backend). 6133 bool Sema::CheckObjCString(Expr *Arg) { 6134 Arg = Arg->IgnoreParenCasts(); 6135 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 6136 6137 if (!Literal || !Literal->isAscii()) { 6138 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 6139 << Arg->getSourceRange(); 6140 return true; 6141 } 6142 6143 if (Literal->containsNonAsciiOrNull()) { 6144 StringRef String = Literal->getString(); 6145 unsigned NumBytes = String.size(); 6146 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 6147 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 6148 llvm::UTF16 *ToPtr = &ToBuf[0]; 6149 6150 llvm::ConversionResult Result = 6151 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 6152 ToPtr + NumBytes, llvm::strictConversion); 6153 // Check for conversion failure. 6154 if (Result != llvm::conversionOK) 6155 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 6156 << Arg->getSourceRange(); 6157 } 6158 return false; 6159 } 6160 6161 /// CheckObjCString - Checks that the format string argument to the os_log() 6162 /// and os_trace() functions is correct, and converts it to const char *. 6163 ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 6164 Arg = Arg->IgnoreParenCasts(); 6165 auto *Literal = dyn_cast<StringLiteral>(Arg); 6166 if (!Literal) { 6167 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 6168 Literal = ObjcLiteral->getString(); 6169 } 6170 } 6171 6172 if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) { 6173 return ExprError( 6174 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 6175 << Arg->getSourceRange()); 6176 } 6177 6178 ExprResult Result(Literal); 6179 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 6180 InitializedEntity Entity = 6181 InitializedEntity::InitializeParameter(Context, ResultTy, false); 6182 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 6183 return Result; 6184 } 6185 6186 /// Check that the user is calling the appropriate va_start builtin for the 6187 /// target and calling convention. 6188 static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 6189 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 6190 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 6191 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 || 6192 TT.getArch() == llvm::Triple::aarch64_32); 6193 bool IsWindows = TT.isOSWindows(); 6194 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 6195 if (IsX64 || IsAArch64) { 6196 CallingConv CC = CC_C; 6197 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 6198 CC = FD->getType()->castAs<FunctionType>()->getCallConv(); 6199 if (IsMSVAStart) { 6200 // Don't allow this in System V ABI functions. 6201 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 6202 return S.Diag(Fn->getBeginLoc(), 6203 diag::err_ms_va_start_used_in_sysv_function); 6204 } else { 6205 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 6206 // On x64 Windows, don't allow this in System V ABI functions. 6207 // (Yes, that means there's no corresponding way to support variadic 6208 // System V ABI functions on Windows.) 6209 if ((IsWindows && CC == CC_X86_64SysV) || 6210 (!IsWindows && CC == CC_Win64)) 6211 return S.Diag(Fn->getBeginLoc(), 6212 diag::err_va_start_used_in_wrong_abi_function) 6213 << !IsWindows; 6214 } 6215 return false; 6216 } 6217 6218 if (IsMSVAStart) 6219 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 6220 return false; 6221 } 6222 6223 static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 6224 ParmVarDecl **LastParam = nullptr) { 6225 // Determine whether the current function, block, or obj-c method is variadic 6226 // and get its parameter list. 6227 bool IsVariadic = false; 6228 ArrayRef<ParmVarDecl *> Params; 6229 DeclContext *Caller = S.CurContext; 6230 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 6231 IsVariadic = Block->isVariadic(); 6232 Params = Block->parameters(); 6233 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 6234 IsVariadic = FD->isVariadic(); 6235 Params = FD->parameters(); 6236 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 6237 IsVariadic = MD->isVariadic(); 6238 // FIXME: This isn't correct for methods (results in bogus warning). 6239 Params = MD->parameters(); 6240 } else if (isa<CapturedDecl>(Caller)) { 6241 // We don't support va_start in a CapturedDecl. 6242 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 6243 return true; 6244 } else { 6245 // This must be some other declcontext that parses exprs. 6246 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 6247 return true; 6248 } 6249 6250 if (!IsVariadic) { 6251 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 6252 return true; 6253 } 6254 6255 if (LastParam) 6256 *LastParam = Params.empty() ? nullptr : Params.back(); 6257 6258 return false; 6259 } 6260 6261 /// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 6262 /// for validity. Emit an error and return true on failure; return false 6263 /// on success. 6264 bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 6265 Expr *Fn = TheCall->getCallee(); 6266 6267 if (checkVAStartABI(*this, BuiltinID, Fn)) 6268 return true; 6269 6270 if (checkArgCount(*this, TheCall, 2)) 6271 return true; 6272 6273 // Type-check the first argument normally. 6274 if (checkBuiltinArgument(*this, TheCall, 0)) 6275 return true; 6276 6277 // Check that the current function is variadic, and get its last parameter. 6278 ParmVarDecl *LastParam; 6279 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 6280 return true; 6281 6282 // Verify that the second argument to the builtin is the last argument of the 6283 // current function or method. 6284 bool SecondArgIsLastNamedArgument = false; 6285 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 6286 6287 // These are valid if SecondArgIsLastNamedArgument is false after the next 6288 // block. 6289 QualType Type; 6290 SourceLocation ParamLoc; 6291 bool IsCRegister = false; 6292 6293 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 6294 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 6295 SecondArgIsLastNamedArgument = PV == LastParam; 6296 6297 Type = PV->getType(); 6298 ParamLoc = PV->getLocation(); 6299 IsCRegister = 6300 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 6301 } 6302 } 6303 6304 if (!SecondArgIsLastNamedArgument) 6305 Diag(TheCall->getArg(1)->getBeginLoc(), 6306 diag::warn_second_arg_of_va_start_not_last_named_param); 6307 else if (IsCRegister || Type->isReferenceType() || 6308 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 6309 // Promotable integers are UB, but enumerations need a bit of 6310 // extra checking to see what their promotable type actually is. 6311 if (!Type->isPromotableIntegerType()) 6312 return false; 6313 if (!Type->isEnumeralType()) 6314 return true; 6315 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl(); 6316 return !(ED && 6317 Context.typesAreCompatible(ED->getPromotionType(), Type)); 6318 }()) { 6319 unsigned Reason = 0; 6320 if (Type->isReferenceType()) Reason = 1; 6321 else if (IsCRegister) Reason = 2; 6322 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 6323 Diag(ParamLoc, diag::note_parameter_type) << Type; 6324 } 6325 6326 TheCall->setType(Context.VoidTy); 6327 return false; 6328 } 6329 6330 bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 6331 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 6332 // const char *named_addr); 6333 6334 Expr *Func = Call->getCallee(); 6335 6336 if (Call->getNumArgs() < 3) 6337 return Diag(Call->getEndLoc(), 6338 diag::err_typecheck_call_too_few_args_at_least) 6339 << 0 /*function call*/ << 3 << Call->getNumArgs(); 6340 6341 // Type-check the first argument normally. 6342 if (checkBuiltinArgument(*this, Call, 0)) 6343 return true; 6344 6345 // Check that the current function is variadic. 6346 if (checkVAStartIsInVariadicFunction(*this, Func)) 6347 return true; 6348 6349 // __va_start on Windows does not validate the parameter qualifiers 6350 6351 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 6352 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 6353 6354 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 6355 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 6356 6357 const QualType &ConstCharPtrTy = 6358 Context.getPointerType(Context.CharTy.withConst()); 6359 if (!Arg1Ty->isPointerType() || 6360 Arg1Ty->getPointeeType().withoutLocalFastQualifiers() != Context.CharTy) 6361 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 6362 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 6363 << 0 /* qualifier difference */ 6364 << 3 /* parameter mismatch */ 6365 << 2 << Arg1->getType() << ConstCharPtrTy; 6366 6367 const QualType SizeTy = Context.getSizeType(); 6368 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 6369 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 6370 << Arg2->getType() << SizeTy << 1 /* different class */ 6371 << 0 /* qualifier difference */ 6372 << 3 /* parameter mismatch */ 6373 << 3 << Arg2->getType() << SizeTy; 6374 6375 return false; 6376 } 6377 6378 /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 6379 /// friends. This is declared to take (...), so we have to check everything. 6380 bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 6381 if (checkArgCount(*this, TheCall, 2)) 6382 return true; 6383 6384 ExprResult OrigArg0 = TheCall->getArg(0); 6385 ExprResult OrigArg1 = TheCall->getArg(1); 6386 6387 // Do standard promotions between the two arguments, returning their common 6388 // type. 6389 QualType Res = UsualArithmeticConversions( 6390 OrigArg0, OrigArg1, TheCall->getExprLoc(), ACK_Comparison); 6391 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 6392 return true; 6393 6394 // Make sure any conversions are pushed back into the call; this is 6395 // type safe since unordered compare builtins are declared as "_Bool 6396 // foo(...)". 6397 TheCall->setArg(0, OrigArg0.get()); 6398 TheCall->setArg(1, OrigArg1.get()); 6399 6400 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 6401 return false; 6402 6403 // If the common type isn't a real floating type, then the arguments were 6404 // invalid for this operation. 6405 if (Res.isNull() || !Res->isRealFloatingType()) 6406 return Diag(OrigArg0.get()->getBeginLoc(), 6407 diag::err_typecheck_call_invalid_ordered_compare) 6408 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 6409 << SourceRange(OrigArg0.get()->getBeginLoc(), 6410 OrigArg1.get()->getEndLoc()); 6411 6412 return false; 6413 } 6414 6415 /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 6416 /// __builtin_isnan and friends. This is declared to take (...), so we have 6417 /// to check everything. We expect the last argument to be a floating point 6418 /// value. 6419 bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 6420 if (checkArgCount(*this, TheCall, NumArgs)) 6421 return true; 6422 6423 // __builtin_fpclassify is the only case where NumArgs != 1, so we can count 6424 // on all preceding parameters just being int. Try all of those. 6425 for (unsigned i = 0; i < NumArgs - 1; ++i) { 6426 Expr *Arg = TheCall->getArg(i); 6427 6428 if (Arg->isTypeDependent()) 6429 return false; 6430 6431 ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing); 6432 6433 if (Res.isInvalid()) 6434 return true; 6435 TheCall->setArg(i, Res.get()); 6436 } 6437 6438 Expr *OrigArg = TheCall->getArg(NumArgs-1); 6439 6440 if (OrigArg->isTypeDependent()) 6441 return false; 6442 6443 // Usual Unary Conversions will convert half to float, which we want for 6444 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the 6445 // type how it is, but do normal L->Rvalue conversions. 6446 if (Context.getTargetInfo().useFP16ConversionIntrinsics()) 6447 OrigArg = UsualUnaryConversions(OrigArg).get(); 6448 else 6449 OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get(); 6450 TheCall->setArg(NumArgs - 1, OrigArg); 6451 6452 // This operation requires a non-_Complex floating-point number. 6453 if (!OrigArg->getType()->isRealFloatingType()) 6454 return Diag(OrigArg->getBeginLoc(), 6455 diag::err_typecheck_call_invalid_unary_fp) 6456 << OrigArg->getType() << OrigArg->getSourceRange(); 6457 6458 return false; 6459 } 6460 6461 /// Perform semantic analysis for a call to __builtin_complex. 6462 bool Sema::SemaBuiltinComplex(CallExpr *TheCall) { 6463 if (checkArgCount(*this, TheCall, 2)) 6464 return true; 6465 6466 bool Dependent = false; 6467 for (unsigned I = 0; I != 2; ++I) { 6468 Expr *Arg = TheCall->getArg(I); 6469 QualType T = Arg->getType(); 6470 if (T->isDependentType()) { 6471 Dependent = true; 6472 continue; 6473 } 6474 6475 // Despite supporting _Complex int, GCC requires a real floating point type 6476 // for the operands of __builtin_complex. 6477 if (!T->isRealFloatingType()) { 6478 return Diag(Arg->getBeginLoc(), diag::err_typecheck_call_requires_real_fp) 6479 << Arg->getType() << Arg->getSourceRange(); 6480 } 6481 6482 ExprResult Converted = DefaultLvalueConversion(Arg); 6483 if (Converted.isInvalid()) 6484 return true; 6485 TheCall->setArg(I, Converted.get()); 6486 } 6487 6488 if (Dependent) { 6489 TheCall->setType(Context.DependentTy); 6490 return false; 6491 } 6492 6493 Expr *Real = TheCall->getArg(0); 6494 Expr *Imag = TheCall->getArg(1); 6495 if (!Context.hasSameType(Real->getType(), Imag->getType())) { 6496 return Diag(Real->getBeginLoc(), 6497 diag::err_typecheck_call_different_arg_types) 6498 << Real->getType() << Imag->getType() 6499 << Real->getSourceRange() << Imag->getSourceRange(); 6500 } 6501 6502 // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers; 6503 // don't allow this builtin to form those types either. 6504 // FIXME: Should we allow these types? 6505 if (Real->getType()->isFloat16Type()) 6506 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 6507 << "_Float16"; 6508 if (Real->getType()->isHalfType()) 6509 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 6510 << "half"; 6511 6512 TheCall->setType(Context.getComplexType(Real->getType())); 6513 return false; 6514 } 6515 6516 // Customized Sema Checking for VSX builtins that have the following signature: 6517 // vector [...] builtinName(vector [...], vector [...], const int); 6518 // Which takes the same type of vectors (any legal vector type) for the first 6519 // two arguments and takes compile time constant for the third argument. 6520 // Example builtins are : 6521 // vector double vec_xxpermdi(vector double, vector double, int); 6522 // vector short vec_xxsldwi(vector short, vector short, int); 6523 bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 6524 unsigned ExpectedNumArgs = 3; 6525 if (checkArgCount(*this, TheCall, ExpectedNumArgs)) 6526 return true; 6527 6528 // Check the third argument is a compile time constant 6529 if (!TheCall->getArg(2)->isIntegerConstantExpr(Context)) 6530 return Diag(TheCall->getBeginLoc(), 6531 diag::err_vsx_builtin_nonconstant_argument) 6532 << 3 /* argument index */ << TheCall->getDirectCallee() 6533 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 6534 TheCall->getArg(2)->getEndLoc()); 6535 6536 QualType Arg1Ty = TheCall->getArg(0)->getType(); 6537 QualType Arg2Ty = TheCall->getArg(1)->getType(); 6538 6539 // Check the type of argument 1 and argument 2 are vectors. 6540 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 6541 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 6542 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 6543 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 6544 << TheCall->getDirectCallee() 6545 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6546 TheCall->getArg(1)->getEndLoc()); 6547 } 6548 6549 // Check the first two arguments are the same type. 6550 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 6551 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 6552 << TheCall->getDirectCallee() 6553 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6554 TheCall->getArg(1)->getEndLoc()); 6555 } 6556 6557 // When default clang type checking is turned off and the customized type 6558 // checking is used, the returning type of the function must be explicitly 6559 // set. Otherwise it is _Bool by default. 6560 TheCall->setType(Arg1Ty); 6561 6562 return false; 6563 } 6564 6565 /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 6566 // This is declared to take (...), so we have to check everything. 6567 ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 6568 if (TheCall->getNumArgs() < 2) 6569 return ExprError(Diag(TheCall->getEndLoc(), 6570 diag::err_typecheck_call_too_few_args_at_least) 6571 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 6572 << TheCall->getSourceRange()); 6573 6574 // Determine which of the following types of shufflevector we're checking: 6575 // 1) unary, vector mask: (lhs, mask) 6576 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 6577 QualType resType = TheCall->getArg(0)->getType(); 6578 unsigned numElements = 0; 6579 6580 if (!TheCall->getArg(0)->isTypeDependent() && 6581 !TheCall->getArg(1)->isTypeDependent()) { 6582 QualType LHSType = TheCall->getArg(0)->getType(); 6583 QualType RHSType = TheCall->getArg(1)->getType(); 6584 6585 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 6586 return ExprError( 6587 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 6588 << TheCall->getDirectCallee() 6589 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6590 TheCall->getArg(1)->getEndLoc())); 6591 6592 numElements = LHSType->castAs<VectorType>()->getNumElements(); 6593 unsigned numResElements = TheCall->getNumArgs() - 2; 6594 6595 // Check to see if we have a call with 2 vector arguments, the unary shuffle 6596 // with mask. If so, verify that RHS is an integer vector type with the 6597 // same number of elts as lhs. 6598 if (TheCall->getNumArgs() == 2) { 6599 if (!RHSType->hasIntegerRepresentation() || 6600 RHSType->castAs<VectorType>()->getNumElements() != numElements) 6601 return ExprError(Diag(TheCall->getBeginLoc(), 6602 diag::err_vec_builtin_incompatible_vector) 6603 << TheCall->getDirectCallee() 6604 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 6605 TheCall->getArg(1)->getEndLoc())); 6606 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 6607 return ExprError(Diag(TheCall->getBeginLoc(), 6608 diag::err_vec_builtin_incompatible_vector) 6609 << TheCall->getDirectCallee() 6610 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 6611 TheCall->getArg(1)->getEndLoc())); 6612 } else if (numElements != numResElements) { 6613 QualType eltType = LHSType->castAs<VectorType>()->getElementType(); 6614 resType = Context.getVectorType(eltType, numResElements, 6615 VectorType::GenericVector); 6616 } 6617 } 6618 6619 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 6620 if (TheCall->getArg(i)->isTypeDependent() || 6621 TheCall->getArg(i)->isValueDependent()) 6622 continue; 6623 6624 Optional<llvm::APSInt> Result; 6625 if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context))) 6626 return ExprError(Diag(TheCall->getBeginLoc(), 6627 diag::err_shufflevector_nonconstant_argument) 6628 << TheCall->getArg(i)->getSourceRange()); 6629 6630 // Allow -1 which will be translated to undef in the IR. 6631 if (Result->isSigned() && Result->isAllOnesValue()) 6632 continue; 6633 6634 if (Result->getActiveBits() > 64 || 6635 Result->getZExtValue() >= numElements * 2) 6636 return ExprError(Diag(TheCall->getBeginLoc(), 6637 diag::err_shufflevector_argument_too_large) 6638 << TheCall->getArg(i)->getSourceRange()); 6639 } 6640 6641 SmallVector<Expr*, 32> exprs; 6642 6643 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 6644 exprs.push_back(TheCall->getArg(i)); 6645 TheCall->setArg(i, nullptr); 6646 } 6647 6648 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 6649 TheCall->getCallee()->getBeginLoc(), 6650 TheCall->getRParenLoc()); 6651 } 6652 6653 /// SemaConvertVectorExpr - Handle __builtin_convertvector 6654 ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 6655 SourceLocation BuiltinLoc, 6656 SourceLocation RParenLoc) { 6657 ExprValueKind VK = VK_PRValue; 6658 ExprObjectKind OK = OK_Ordinary; 6659 QualType DstTy = TInfo->getType(); 6660 QualType SrcTy = E->getType(); 6661 6662 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 6663 return ExprError(Diag(BuiltinLoc, 6664 diag::err_convertvector_non_vector) 6665 << E->getSourceRange()); 6666 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 6667 return ExprError(Diag(BuiltinLoc, 6668 diag::err_convertvector_non_vector_type)); 6669 6670 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 6671 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements(); 6672 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements(); 6673 if (SrcElts != DstElts) 6674 return ExprError(Diag(BuiltinLoc, 6675 diag::err_convertvector_incompatible_vector) 6676 << E->getSourceRange()); 6677 } 6678 6679 return new (Context) 6680 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 6681 } 6682 6683 /// SemaBuiltinPrefetch - Handle __builtin_prefetch. 6684 // This is declared to take (const void*, ...) and can take two 6685 // optional constant int args. 6686 bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 6687 unsigned NumArgs = TheCall->getNumArgs(); 6688 6689 if (NumArgs > 3) 6690 return Diag(TheCall->getEndLoc(), 6691 diag::err_typecheck_call_too_many_args_at_most) 6692 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 6693 6694 // Argument 0 is checked for us and the remaining arguments must be 6695 // constant integers. 6696 for (unsigned i = 1; i != NumArgs; ++i) 6697 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 6698 return true; 6699 6700 return false; 6701 } 6702 6703 /// SemaBuiltinArithmeticFence - Handle __arithmetic_fence. 6704 bool Sema::SemaBuiltinArithmeticFence(CallExpr *TheCall) { 6705 if (!Context.getTargetInfo().checkArithmeticFenceSupported()) 6706 return Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 6707 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 6708 if (checkArgCount(*this, TheCall, 1)) 6709 return true; 6710 Expr *Arg = TheCall->getArg(0); 6711 if (Arg->isInstantiationDependent()) 6712 return false; 6713 6714 QualType ArgTy = Arg->getType(); 6715 if (!ArgTy->hasFloatingRepresentation()) 6716 return Diag(TheCall->getEndLoc(), diag::err_typecheck_expect_flt_or_vector) 6717 << ArgTy; 6718 if (Arg->isLValue()) { 6719 ExprResult FirstArg = DefaultLvalueConversion(Arg); 6720 TheCall->setArg(0, FirstArg.get()); 6721 } 6722 TheCall->setType(TheCall->getArg(0)->getType()); 6723 return false; 6724 } 6725 6726 /// SemaBuiltinAssume - Handle __assume (MS Extension). 6727 // __assume does not evaluate its arguments, and should warn if its argument 6728 // has side effects. 6729 bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 6730 Expr *Arg = TheCall->getArg(0); 6731 if (Arg->isInstantiationDependent()) return false; 6732 6733 if (Arg->HasSideEffects(Context)) 6734 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 6735 << Arg->getSourceRange() 6736 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 6737 6738 return false; 6739 } 6740 6741 /// Handle __builtin_alloca_with_align. This is declared 6742 /// as (size_t, size_t) where the second size_t must be a power of 2 greater 6743 /// than 8. 6744 bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 6745 // The alignment must be a constant integer. 6746 Expr *Arg = TheCall->getArg(1); 6747 6748 // We can't check the value of a dependent argument. 6749 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 6750 if (const auto *UE = 6751 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 6752 if (UE->getKind() == UETT_AlignOf || 6753 UE->getKind() == UETT_PreferredAlignOf) 6754 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 6755 << Arg->getSourceRange(); 6756 6757 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 6758 6759 if (!Result.isPowerOf2()) 6760 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 6761 << Arg->getSourceRange(); 6762 6763 if (Result < Context.getCharWidth()) 6764 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 6765 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 6766 6767 if (Result > std::numeric_limits<int32_t>::max()) 6768 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 6769 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 6770 } 6771 6772 return false; 6773 } 6774 6775 /// Handle __builtin_assume_aligned. This is declared 6776 /// as (const void*, size_t, ...) and can take one optional constant int arg. 6777 bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 6778 unsigned NumArgs = TheCall->getNumArgs(); 6779 6780 if (NumArgs > 3) 6781 return Diag(TheCall->getEndLoc(), 6782 diag::err_typecheck_call_too_many_args_at_most) 6783 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 6784 6785 // The alignment must be a constant integer. 6786 Expr *Arg = TheCall->getArg(1); 6787 6788 // We can't check the value of a dependent argument. 6789 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 6790 llvm::APSInt Result; 6791 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 6792 return true; 6793 6794 if (!Result.isPowerOf2()) 6795 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 6796 << Arg->getSourceRange(); 6797 6798 if (Result > Sema::MaximumAlignment) 6799 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great) 6800 << Arg->getSourceRange() << Sema::MaximumAlignment; 6801 } 6802 6803 if (NumArgs > 2) { 6804 ExprResult Arg(TheCall->getArg(2)); 6805 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 6806 Context.getSizeType(), false); 6807 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6808 if (Arg.isInvalid()) return true; 6809 TheCall->setArg(2, Arg.get()); 6810 } 6811 6812 return false; 6813 } 6814 6815 bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 6816 unsigned BuiltinID = 6817 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 6818 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 6819 6820 unsigned NumArgs = TheCall->getNumArgs(); 6821 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 6822 if (NumArgs < NumRequiredArgs) { 6823 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 6824 << 0 /* function call */ << NumRequiredArgs << NumArgs 6825 << TheCall->getSourceRange(); 6826 } 6827 if (NumArgs >= NumRequiredArgs + 0x100) { 6828 return Diag(TheCall->getEndLoc(), 6829 diag::err_typecheck_call_too_many_args_at_most) 6830 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 6831 << TheCall->getSourceRange(); 6832 } 6833 unsigned i = 0; 6834 6835 // For formatting call, check buffer arg. 6836 if (!IsSizeCall) { 6837 ExprResult Arg(TheCall->getArg(i)); 6838 InitializedEntity Entity = InitializedEntity::InitializeParameter( 6839 Context, Context.VoidPtrTy, false); 6840 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6841 if (Arg.isInvalid()) 6842 return true; 6843 TheCall->setArg(i, Arg.get()); 6844 i++; 6845 } 6846 6847 // Check string literal arg. 6848 unsigned FormatIdx = i; 6849 { 6850 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 6851 if (Arg.isInvalid()) 6852 return true; 6853 TheCall->setArg(i, Arg.get()); 6854 i++; 6855 } 6856 6857 // Make sure variadic args are scalar. 6858 unsigned FirstDataArg = i; 6859 while (i < NumArgs) { 6860 ExprResult Arg = DefaultVariadicArgumentPromotion( 6861 TheCall->getArg(i), VariadicFunction, nullptr); 6862 if (Arg.isInvalid()) 6863 return true; 6864 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 6865 if (ArgSize.getQuantity() >= 0x100) { 6866 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 6867 << i << (int)ArgSize.getQuantity() << 0xff 6868 << TheCall->getSourceRange(); 6869 } 6870 TheCall->setArg(i, Arg.get()); 6871 i++; 6872 } 6873 6874 // Check formatting specifiers. NOTE: We're only doing this for the non-size 6875 // call to avoid duplicate diagnostics. 6876 if (!IsSizeCall) { 6877 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 6878 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 6879 bool Success = CheckFormatArguments( 6880 Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog, 6881 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 6882 CheckedVarArgs); 6883 if (!Success) 6884 return true; 6885 } 6886 6887 if (IsSizeCall) { 6888 TheCall->setType(Context.getSizeType()); 6889 } else { 6890 TheCall->setType(Context.VoidPtrTy); 6891 } 6892 return false; 6893 } 6894 6895 /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 6896 /// TheCall is a constant expression. 6897 bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 6898 llvm::APSInt &Result) { 6899 Expr *Arg = TheCall->getArg(ArgNum); 6900 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6901 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6902 6903 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 6904 6905 Optional<llvm::APSInt> R; 6906 if (!(R = Arg->getIntegerConstantExpr(Context))) 6907 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 6908 << FDecl->getDeclName() << Arg->getSourceRange(); 6909 Result = *R; 6910 return false; 6911 } 6912 6913 /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 6914 /// TheCall is a constant expression in the range [Low, High]. 6915 bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 6916 int Low, int High, bool RangeIsError) { 6917 if (isConstantEvaluated()) 6918 return false; 6919 llvm::APSInt Result; 6920 6921 // We can't check the value of a dependent argument. 6922 Expr *Arg = TheCall->getArg(ArgNum); 6923 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6924 return false; 6925 6926 // Check constant-ness first. 6927 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6928 return true; 6929 6930 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 6931 if (RangeIsError) 6932 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 6933 << toString(Result, 10) << Low << High << Arg->getSourceRange(); 6934 else 6935 // Defer the warning until we know if the code will be emitted so that 6936 // dead code can ignore this. 6937 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 6938 PDiag(diag::warn_argument_invalid_range) 6939 << toString(Result, 10) << Low << High 6940 << Arg->getSourceRange()); 6941 } 6942 6943 return false; 6944 } 6945 6946 /// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 6947 /// TheCall is a constant expression is a multiple of Num.. 6948 bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 6949 unsigned Num) { 6950 llvm::APSInt Result; 6951 6952 // We can't check the value of a dependent argument. 6953 Expr *Arg = TheCall->getArg(ArgNum); 6954 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6955 return false; 6956 6957 // Check constant-ness first. 6958 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6959 return true; 6960 6961 if (Result.getSExtValue() % Num != 0) 6962 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 6963 << Num << Arg->getSourceRange(); 6964 6965 return false; 6966 } 6967 6968 /// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a 6969 /// constant expression representing a power of 2. 6970 bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) { 6971 llvm::APSInt Result; 6972 6973 // We can't check the value of a dependent argument. 6974 Expr *Arg = TheCall->getArg(ArgNum); 6975 if (Arg->isTypeDependent() || Arg->isValueDependent()) 6976 return false; 6977 6978 // Check constant-ness first. 6979 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 6980 return true; 6981 6982 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if 6983 // and only if x is a power of 2. 6984 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0) 6985 return false; 6986 6987 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2) 6988 << Arg->getSourceRange(); 6989 } 6990 6991 static bool IsShiftedByte(llvm::APSInt Value) { 6992 if (Value.isNegative()) 6993 return false; 6994 6995 // Check if it's a shifted byte, by shifting it down 6996 while (true) { 6997 // If the value fits in the bottom byte, the check passes. 6998 if (Value < 0x100) 6999 return true; 7000 7001 // Otherwise, if the value has _any_ bits in the bottom byte, the check 7002 // fails. 7003 if ((Value & 0xFF) != 0) 7004 return false; 7005 7006 // If the bottom 8 bits are all 0, but something above that is nonzero, 7007 // then shifting the value right by 8 bits won't affect whether it's a 7008 // shifted byte or not. So do that, and go round again. 7009 Value >>= 8; 7010 } 7011 } 7012 7013 /// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is 7014 /// a constant expression representing an arbitrary byte value shifted left by 7015 /// a multiple of 8 bits. 7016 bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, 7017 unsigned ArgBits) { 7018 llvm::APSInt Result; 7019 7020 // We can't check the value of a dependent argument. 7021 Expr *Arg = TheCall->getArg(ArgNum); 7022 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7023 return false; 7024 7025 // Check constant-ness first. 7026 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7027 return true; 7028 7029 // Truncate to the given size. 7030 Result = Result.getLoBits(ArgBits); 7031 Result.setIsUnsigned(true); 7032 7033 if (IsShiftedByte(Result)) 7034 return false; 7035 7036 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte) 7037 << Arg->getSourceRange(); 7038 } 7039 7040 /// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of 7041 /// TheCall is a constant expression representing either a shifted byte value, 7042 /// or a value of the form 0x??FF (i.e. a member of the arithmetic progression 7043 /// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some 7044 /// Arm MVE intrinsics. 7045 bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, 7046 int ArgNum, 7047 unsigned ArgBits) { 7048 llvm::APSInt Result; 7049 7050 // We can't check the value of a dependent argument. 7051 Expr *Arg = TheCall->getArg(ArgNum); 7052 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7053 return false; 7054 7055 // Check constant-ness first. 7056 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 7057 return true; 7058 7059 // Truncate to the given size. 7060 Result = Result.getLoBits(ArgBits); 7061 Result.setIsUnsigned(true); 7062 7063 // Check to see if it's in either of the required forms. 7064 if (IsShiftedByte(Result) || 7065 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF)) 7066 return false; 7067 7068 return Diag(TheCall->getBeginLoc(), 7069 diag::err_argument_not_shifted_byte_or_xxff) 7070 << Arg->getSourceRange(); 7071 } 7072 7073 /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 7074 bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { 7075 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 7076 if (checkArgCount(*this, TheCall, 2)) 7077 return true; 7078 Expr *Arg0 = TheCall->getArg(0); 7079 Expr *Arg1 = TheCall->getArg(1); 7080 7081 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7082 if (FirstArg.isInvalid()) 7083 return true; 7084 QualType FirstArgType = FirstArg.get()->getType(); 7085 if (!FirstArgType->isAnyPointerType()) 7086 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7087 << "first" << FirstArgType << Arg0->getSourceRange(); 7088 TheCall->setArg(0, FirstArg.get()); 7089 7090 ExprResult SecArg = DefaultLvalueConversion(Arg1); 7091 if (SecArg.isInvalid()) 7092 return true; 7093 QualType SecArgType = SecArg.get()->getType(); 7094 if (!SecArgType->isIntegerType()) 7095 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 7096 << "second" << SecArgType << Arg1->getSourceRange(); 7097 7098 // Derive the return type from the pointer argument. 7099 TheCall->setType(FirstArgType); 7100 return false; 7101 } 7102 7103 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 7104 if (checkArgCount(*this, TheCall, 2)) 7105 return true; 7106 7107 Expr *Arg0 = TheCall->getArg(0); 7108 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7109 if (FirstArg.isInvalid()) 7110 return true; 7111 QualType FirstArgType = FirstArg.get()->getType(); 7112 if (!FirstArgType->isAnyPointerType()) 7113 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7114 << "first" << FirstArgType << Arg0->getSourceRange(); 7115 TheCall->setArg(0, FirstArg.get()); 7116 7117 // Derive the return type from the pointer argument. 7118 TheCall->setType(FirstArgType); 7119 7120 // Second arg must be an constant in range [0,15] 7121 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 7122 } 7123 7124 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 7125 if (checkArgCount(*this, TheCall, 2)) 7126 return true; 7127 Expr *Arg0 = TheCall->getArg(0); 7128 Expr *Arg1 = TheCall->getArg(1); 7129 7130 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7131 if (FirstArg.isInvalid()) 7132 return true; 7133 QualType FirstArgType = FirstArg.get()->getType(); 7134 if (!FirstArgType->isAnyPointerType()) 7135 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7136 << "first" << FirstArgType << Arg0->getSourceRange(); 7137 7138 QualType SecArgType = Arg1->getType(); 7139 if (!SecArgType->isIntegerType()) 7140 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 7141 << "second" << SecArgType << Arg1->getSourceRange(); 7142 TheCall->setType(Context.IntTy); 7143 return false; 7144 } 7145 7146 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 7147 BuiltinID == AArch64::BI__builtin_arm_stg) { 7148 if (checkArgCount(*this, TheCall, 1)) 7149 return true; 7150 Expr *Arg0 = TheCall->getArg(0); 7151 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 7152 if (FirstArg.isInvalid()) 7153 return true; 7154 7155 QualType FirstArgType = FirstArg.get()->getType(); 7156 if (!FirstArgType->isAnyPointerType()) 7157 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 7158 << "first" << FirstArgType << Arg0->getSourceRange(); 7159 TheCall->setArg(0, FirstArg.get()); 7160 7161 // Derive the return type from the pointer argument. 7162 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 7163 TheCall->setType(FirstArgType); 7164 return false; 7165 } 7166 7167 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 7168 Expr *ArgA = TheCall->getArg(0); 7169 Expr *ArgB = TheCall->getArg(1); 7170 7171 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA); 7172 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB); 7173 7174 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 7175 return true; 7176 7177 QualType ArgTypeA = ArgExprA.get()->getType(); 7178 QualType ArgTypeB = ArgExprB.get()->getType(); 7179 7180 auto isNull = [&] (Expr *E) -> bool { 7181 return E->isNullPointerConstant( 7182 Context, Expr::NPC_ValueDependentIsNotNull); }; 7183 7184 // argument should be either a pointer or null 7185 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 7186 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 7187 << "first" << ArgTypeA << ArgA->getSourceRange(); 7188 7189 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 7190 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 7191 << "second" << ArgTypeB << ArgB->getSourceRange(); 7192 7193 // Ensure Pointee types are compatible 7194 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 7195 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 7196 QualType pointeeA = ArgTypeA->getPointeeType(); 7197 QualType pointeeB = ArgTypeB->getPointeeType(); 7198 if (!Context.typesAreCompatible( 7199 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 7200 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 7201 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) 7202 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 7203 << ArgB->getSourceRange(); 7204 } 7205 } 7206 7207 // at least one argument should be pointer type 7208 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 7209 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 7210 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 7211 7212 if (isNull(ArgA)) // adopt type of the other pointer 7213 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 7214 7215 if (isNull(ArgB)) 7216 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 7217 7218 TheCall->setArg(0, ArgExprA.get()); 7219 TheCall->setArg(1, ArgExprB.get()); 7220 TheCall->setType(Context.LongLongTy); 7221 return false; 7222 } 7223 assert(false && "Unhandled ARM MTE intrinsic"); 7224 return true; 7225 } 7226 7227 /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 7228 /// TheCall is an ARM/AArch64 special register string literal. 7229 bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 7230 int ArgNum, unsigned ExpectedFieldNum, 7231 bool AllowName) { 7232 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 7233 BuiltinID == ARM::BI__builtin_arm_wsr64 || 7234 BuiltinID == ARM::BI__builtin_arm_rsr || 7235 BuiltinID == ARM::BI__builtin_arm_rsrp || 7236 BuiltinID == ARM::BI__builtin_arm_wsr || 7237 BuiltinID == ARM::BI__builtin_arm_wsrp; 7238 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 7239 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 7240 BuiltinID == AArch64::BI__builtin_arm_rsr || 7241 BuiltinID == AArch64::BI__builtin_arm_rsrp || 7242 BuiltinID == AArch64::BI__builtin_arm_wsr || 7243 BuiltinID == AArch64::BI__builtin_arm_wsrp; 7244 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 7245 7246 // We can't check the value of a dependent argument. 7247 Expr *Arg = TheCall->getArg(ArgNum); 7248 if (Arg->isTypeDependent() || Arg->isValueDependent()) 7249 return false; 7250 7251 // Check if the argument is a string literal. 7252 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 7253 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 7254 << Arg->getSourceRange(); 7255 7256 // Check the type of special register given. 7257 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 7258 SmallVector<StringRef, 6> Fields; 7259 Reg.split(Fields, ":"); 7260 7261 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 7262 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 7263 << Arg->getSourceRange(); 7264 7265 // If the string is the name of a register then we cannot check that it is 7266 // valid here but if the string is of one the forms described in ACLE then we 7267 // can check that the supplied fields are integers and within the valid 7268 // ranges. 7269 if (Fields.size() > 1) { 7270 bool FiveFields = Fields.size() == 5; 7271 7272 bool ValidString = true; 7273 if (IsARMBuiltin) { 7274 ValidString &= Fields[0].startswith_insensitive("cp") || 7275 Fields[0].startswith_insensitive("p"); 7276 if (ValidString) 7277 Fields[0] = Fields[0].drop_front( 7278 Fields[0].startswith_insensitive("cp") ? 2 : 1); 7279 7280 ValidString &= Fields[2].startswith_insensitive("c"); 7281 if (ValidString) 7282 Fields[2] = Fields[2].drop_front(1); 7283 7284 if (FiveFields) { 7285 ValidString &= Fields[3].startswith_insensitive("c"); 7286 if (ValidString) 7287 Fields[3] = Fields[3].drop_front(1); 7288 } 7289 } 7290 7291 SmallVector<int, 5> Ranges; 7292 if (FiveFields) 7293 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 7294 else 7295 Ranges.append({15, 7, 15}); 7296 7297 for (unsigned i=0; i<Fields.size(); ++i) { 7298 int IntField; 7299 ValidString &= !Fields[i].getAsInteger(10, IntField); 7300 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 7301 } 7302 7303 if (!ValidString) 7304 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 7305 << Arg->getSourceRange(); 7306 } else if (IsAArch64Builtin && Fields.size() == 1) { 7307 // If the register name is one of those that appear in the condition below 7308 // and the special register builtin being used is one of the write builtins, 7309 // then we require that the argument provided for writing to the register 7310 // is an integer constant expression. This is because it will be lowered to 7311 // an MSR (immediate) instruction, so we need to know the immediate at 7312 // compile time. 7313 if (TheCall->getNumArgs() != 2) 7314 return false; 7315 7316 std::string RegLower = Reg.lower(); 7317 if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" && 7318 RegLower != "pan" && RegLower != "uao") 7319 return false; 7320 7321 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 7322 } 7323 7324 return false; 7325 } 7326 7327 /// SemaBuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity. 7328 /// Emit an error and return true on failure; return false on success. 7329 /// TypeStr is a string containing the type descriptor of the value returned by 7330 /// the builtin and the descriptors of the expected type of the arguments. 7331 bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, const char *TypeStr) { 7332 7333 assert((TypeStr[0] != '\0') && 7334 "Invalid types in PPC MMA builtin declaration"); 7335 7336 unsigned Mask = 0; 7337 unsigned ArgNum = 0; 7338 7339 // The first type in TypeStr is the type of the value returned by the 7340 // builtin. So we first read that type and change the type of TheCall. 7341 QualType type = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 7342 TheCall->setType(type); 7343 7344 while (*TypeStr != '\0') { 7345 Mask = 0; 7346 QualType ExpectedType = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 7347 if (ArgNum >= TheCall->getNumArgs()) { 7348 ArgNum++; 7349 break; 7350 } 7351 7352 Expr *Arg = TheCall->getArg(ArgNum); 7353 QualType ArgType = Arg->getType(); 7354 7355 if ((ExpectedType->isVoidPointerType() && !ArgType->isPointerType()) || 7356 (!ExpectedType->isVoidPointerType() && 7357 ArgType.getCanonicalType() != ExpectedType)) 7358 return Diag(Arg->getBeginLoc(), diag::err_typecheck_convert_incompatible) 7359 << ArgType << ExpectedType << 1 << 0 << 0; 7360 7361 // If the value of the Mask is not 0, we have a constraint in the size of 7362 // the integer argument so here we ensure the argument is a constant that 7363 // is in the valid range. 7364 if (Mask != 0 && 7365 SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true)) 7366 return true; 7367 7368 ArgNum++; 7369 } 7370 7371 // In case we exited early from the previous loop, there are other types to 7372 // read from TypeStr. So we need to read them all to ensure we have the right 7373 // number of arguments in TheCall and if it is not the case, to display a 7374 // better error message. 7375 while (*TypeStr != '\0') { 7376 (void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 7377 ArgNum++; 7378 } 7379 if (checkArgCount(*this, TheCall, ArgNum)) 7380 return true; 7381 7382 return false; 7383 } 7384 7385 /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 7386 /// This checks that the target supports __builtin_longjmp and 7387 /// that val is a constant 1. 7388 bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 7389 if (!Context.getTargetInfo().hasSjLjLowering()) 7390 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 7391 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 7392 7393 Expr *Arg = TheCall->getArg(1); 7394 llvm::APSInt Result; 7395 7396 // TODO: This is less than ideal. Overload this to take a value. 7397 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 7398 return true; 7399 7400 if (Result != 1) 7401 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 7402 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 7403 7404 return false; 7405 } 7406 7407 /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 7408 /// This checks that the target supports __builtin_setjmp. 7409 bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 7410 if (!Context.getTargetInfo().hasSjLjLowering()) 7411 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 7412 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 7413 return false; 7414 } 7415 7416 namespace { 7417 7418 class UncoveredArgHandler { 7419 enum { Unknown = -1, AllCovered = -2 }; 7420 7421 signed FirstUncoveredArg = Unknown; 7422 SmallVector<const Expr *, 4> DiagnosticExprs; 7423 7424 public: 7425 UncoveredArgHandler() = default; 7426 7427 bool hasUncoveredArg() const { 7428 return (FirstUncoveredArg >= 0); 7429 } 7430 7431 unsigned getUncoveredArg() const { 7432 assert(hasUncoveredArg() && "no uncovered argument"); 7433 return FirstUncoveredArg; 7434 } 7435 7436 void setAllCovered() { 7437 // A string has been found with all arguments covered, so clear out 7438 // the diagnostics. 7439 DiagnosticExprs.clear(); 7440 FirstUncoveredArg = AllCovered; 7441 } 7442 7443 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 7444 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 7445 7446 // Don't update if a previous string covers all arguments. 7447 if (FirstUncoveredArg == AllCovered) 7448 return; 7449 7450 // UncoveredArgHandler tracks the highest uncovered argument index 7451 // and with it all the strings that match this index. 7452 if (NewFirstUncoveredArg == FirstUncoveredArg) 7453 DiagnosticExprs.push_back(StrExpr); 7454 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 7455 DiagnosticExprs.clear(); 7456 DiagnosticExprs.push_back(StrExpr); 7457 FirstUncoveredArg = NewFirstUncoveredArg; 7458 } 7459 } 7460 7461 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 7462 }; 7463 7464 enum StringLiteralCheckType { 7465 SLCT_NotALiteral, 7466 SLCT_UncheckedLiteral, 7467 SLCT_CheckedLiteral 7468 }; 7469 7470 } // namespace 7471 7472 static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 7473 BinaryOperatorKind BinOpKind, 7474 bool AddendIsRight) { 7475 unsigned BitWidth = Offset.getBitWidth(); 7476 unsigned AddendBitWidth = Addend.getBitWidth(); 7477 // There might be negative interim results. 7478 if (Addend.isUnsigned()) { 7479 Addend = Addend.zext(++AddendBitWidth); 7480 Addend.setIsSigned(true); 7481 } 7482 // Adjust the bit width of the APSInts. 7483 if (AddendBitWidth > BitWidth) { 7484 Offset = Offset.sext(AddendBitWidth); 7485 BitWidth = AddendBitWidth; 7486 } else if (BitWidth > AddendBitWidth) { 7487 Addend = Addend.sext(BitWidth); 7488 } 7489 7490 bool Ov = false; 7491 llvm::APSInt ResOffset = Offset; 7492 if (BinOpKind == BO_Add) 7493 ResOffset = Offset.sadd_ov(Addend, Ov); 7494 else { 7495 assert(AddendIsRight && BinOpKind == BO_Sub && 7496 "operator must be add or sub with addend on the right"); 7497 ResOffset = Offset.ssub_ov(Addend, Ov); 7498 } 7499 7500 // We add an offset to a pointer here so we should support an offset as big as 7501 // possible. 7502 if (Ov) { 7503 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 7504 "index (intermediate) result too big"); 7505 Offset = Offset.sext(2 * BitWidth); 7506 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 7507 return; 7508 } 7509 7510 Offset = ResOffset; 7511 } 7512 7513 namespace { 7514 7515 // This is a wrapper class around StringLiteral to support offsetted string 7516 // literals as format strings. It takes the offset into account when returning 7517 // the string and its length or the source locations to display notes correctly. 7518 class FormatStringLiteral { 7519 const StringLiteral *FExpr; 7520 int64_t Offset; 7521 7522 public: 7523 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 7524 : FExpr(fexpr), Offset(Offset) {} 7525 7526 StringRef getString() const { 7527 return FExpr->getString().drop_front(Offset); 7528 } 7529 7530 unsigned getByteLength() const { 7531 return FExpr->getByteLength() - getCharByteWidth() * Offset; 7532 } 7533 7534 unsigned getLength() const { return FExpr->getLength() - Offset; } 7535 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 7536 7537 StringLiteral::StringKind getKind() const { return FExpr->getKind(); } 7538 7539 QualType getType() const { return FExpr->getType(); } 7540 7541 bool isAscii() const { return FExpr->isAscii(); } 7542 bool isWide() const { return FExpr->isWide(); } 7543 bool isUTF8() const { return FExpr->isUTF8(); } 7544 bool isUTF16() const { return FExpr->isUTF16(); } 7545 bool isUTF32() const { return FExpr->isUTF32(); } 7546 bool isPascal() const { return FExpr->isPascal(); } 7547 7548 SourceLocation getLocationOfByte( 7549 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 7550 const TargetInfo &Target, unsigned *StartToken = nullptr, 7551 unsigned *StartTokenByteOffset = nullptr) const { 7552 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 7553 StartToken, StartTokenByteOffset); 7554 } 7555 7556 SourceLocation getBeginLoc() const LLVM_READONLY { 7557 return FExpr->getBeginLoc().getLocWithOffset(Offset); 7558 } 7559 7560 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 7561 }; 7562 7563 } // namespace 7564 7565 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 7566 const Expr *OrigFormatExpr, 7567 ArrayRef<const Expr *> Args, 7568 bool HasVAListArg, unsigned format_idx, 7569 unsigned firstDataArg, 7570 Sema::FormatStringType Type, 7571 bool inFunctionCall, 7572 Sema::VariadicCallType CallType, 7573 llvm::SmallBitVector &CheckedVarArgs, 7574 UncoveredArgHandler &UncoveredArg, 7575 bool IgnoreStringsWithoutSpecifiers); 7576 7577 // Determine if an expression is a string literal or constant string. 7578 // If this function returns false on the arguments to a function expecting a 7579 // format string, we will usually need to emit a warning. 7580 // True string literals are then checked by CheckFormatString. 7581 static StringLiteralCheckType 7582 checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 7583 bool HasVAListArg, unsigned format_idx, 7584 unsigned firstDataArg, Sema::FormatStringType Type, 7585 Sema::VariadicCallType CallType, bool InFunctionCall, 7586 llvm::SmallBitVector &CheckedVarArgs, 7587 UncoveredArgHandler &UncoveredArg, 7588 llvm::APSInt Offset, 7589 bool IgnoreStringsWithoutSpecifiers = false) { 7590 if (S.isConstantEvaluated()) 7591 return SLCT_NotALiteral; 7592 tryAgain: 7593 assert(Offset.isSigned() && "invalid offset"); 7594 7595 if (E->isTypeDependent() || E->isValueDependent()) 7596 return SLCT_NotALiteral; 7597 7598 E = E->IgnoreParenCasts(); 7599 7600 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 7601 // Technically -Wformat-nonliteral does not warn about this case. 7602 // The behavior of printf and friends in this case is implementation 7603 // dependent. Ideally if the format string cannot be null then 7604 // it should have a 'nonnull' attribute in the function prototype. 7605 return SLCT_UncheckedLiteral; 7606 7607 switch (E->getStmtClass()) { 7608 case Stmt::BinaryConditionalOperatorClass: 7609 case Stmt::ConditionalOperatorClass: { 7610 // The expression is a literal if both sub-expressions were, and it was 7611 // completely checked only if both sub-expressions were checked. 7612 const AbstractConditionalOperator *C = 7613 cast<AbstractConditionalOperator>(E); 7614 7615 // Determine whether it is necessary to check both sub-expressions, for 7616 // example, because the condition expression is a constant that can be 7617 // evaluated at compile time. 7618 bool CheckLeft = true, CheckRight = true; 7619 7620 bool Cond; 7621 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(), 7622 S.isConstantEvaluated())) { 7623 if (Cond) 7624 CheckRight = false; 7625 else 7626 CheckLeft = false; 7627 } 7628 7629 // We need to maintain the offsets for the right and the left hand side 7630 // separately to check if every possible indexed expression is a valid 7631 // string literal. They might have different offsets for different string 7632 // literals in the end. 7633 StringLiteralCheckType Left; 7634 if (!CheckLeft) 7635 Left = SLCT_UncheckedLiteral; 7636 else { 7637 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, 7638 HasVAListArg, format_idx, firstDataArg, 7639 Type, CallType, InFunctionCall, 7640 CheckedVarArgs, UncoveredArg, Offset, 7641 IgnoreStringsWithoutSpecifiers); 7642 if (Left == SLCT_NotALiteral || !CheckRight) { 7643 return Left; 7644 } 7645 } 7646 7647 StringLiteralCheckType Right = checkFormatStringExpr( 7648 S, C->getFalseExpr(), Args, HasVAListArg, format_idx, firstDataArg, 7649 Type, CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 7650 IgnoreStringsWithoutSpecifiers); 7651 7652 return (CheckLeft && Left < Right) ? Left : Right; 7653 } 7654 7655 case Stmt::ImplicitCastExprClass: 7656 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 7657 goto tryAgain; 7658 7659 case Stmt::OpaqueValueExprClass: 7660 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 7661 E = src; 7662 goto tryAgain; 7663 } 7664 return SLCT_NotALiteral; 7665 7666 case Stmt::PredefinedExprClass: 7667 // While __func__, etc., are technically not string literals, they 7668 // cannot contain format specifiers and thus are not a security 7669 // liability. 7670 return SLCT_UncheckedLiteral; 7671 7672 case Stmt::DeclRefExprClass: { 7673 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 7674 7675 // As an exception, do not flag errors for variables binding to 7676 // const string literals. 7677 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 7678 bool isConstant = false; 7679 QualType T = DR->getType(); 7680 7681 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 7682 isConstant = AT->getElementType().isConstant(S.Context); 7683 } else if (const PointerType *PT = T->getAs<PointerType>()) { 7684 isConstant = T.isConstant(S.Context) && 7685 PT->getPointeeType().isConstant(S.Context); 7686 } else if (T->isObjCObjectPointerType()) { 7687 // In ObjC, there is usually no "const ObjectPointer" type, 7688 // so don't check if the pointee type is constant. 7689 isConstant = T.isConstant(S.Context); 7690 } 7691 7692 if (isConstant) { 7693 if (const Expr *Init = VD->getAnyInitializer()) { 7694 // Look through initializers like const char c[] = { "foo" } 7695 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 7696 if (InitList->isStringLiteralInit()) 7697 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 7698 } 7699 return checkFormatStringExpr(S, Init, Args, 7700 HasVAListArg, format_idx, 7701 firstDataArg, Type, CallType, 7702 /*InFunctionCall*/ false, CheckedVarArgs, 7703 UncoveredArg, Offset); 7704 } 7705 } 7706 7707 // For vprintf* functions (i.e., HasVAListArg==true), we add a 7708 // special check to see if the format string is a function parameter 7709 // of the function calling the printf function. If the function 7710 // has an attribute indicating it is a printf-like function, then we 7711 // should suppress warnings concerning non-literals being used in a call 7712 // to a vprintf function. For example: 7713 // 7714 // void 7715 // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){ 7716 // va_list ap; 7717 // va_start(ap, fmt); 7718 // vprintf(fmt, ap); // Do NOT emit a warning about "fmt". 7719 // ... 7720 // } 7721 if (HasVAListArg) { 7722 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) { 7723 if (const NamedDecl *ND = dyn_cast<NamedDecl>(PV->getDeclContext())) { 7724 int PVIndex = PV->getFunctionScopeIndex() + 1; 7725 for (const auto *PVFormat : ND->specific_attrs<FormatAttr>()) { 7726 // adjust for implicit parameter 7727 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND)) 7728 if (MD->isInstance()) 7729 ++PVIndex; 7730 // We also check if the formats are compatible. 7731 // We can't pass a 'scanf' string to a 'printf' function. 7732 if (PVIndex == PVFormat->getFormatIdx() && 7733 Type == S.GetFormatStringType(PVFormat)) 7734 return SLCT_UncheckedLiteral; 7735 } 7736 } 7737 } 7738 } 7739 } 7740 7741 return SLCT_NotALiteral; 7742 } 7743 7744 case Stmt::CallExprClass: 7745 case Stmt::CXXMemberCallExprClass: { 7746 const CallExpr *CE = cast<CallExpr>(E); 7747 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 7748 bool IsFirst = true; 7749 StringLiteralCheckType CommonResult; 7750 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 7751 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 7752 StringLiteralCheckType Result = checkFormatStringExpr( 7753 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 7754 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 7755 IgnoreStringsWithoutSpecifiers); 7756 if (IsFirst) { 7757 CommonResult = Result; 7758 IsFirst = false; 7759 } 7760 } 7761 if (!IsFirst) 7762 return CommonResult; 7763 7764 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 7765 unsigned BuiltinID = FD->getBuiltinID(); 7766 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 7767 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 7768 const Expr *Arg = CE->getArg(0); 7769 return checkFormatStringExpr(S, Arg, Args, 7770 HasVAListArg, format_idx, 7771 firstDataArg, Type, CallType, 7772 InFunctionCall, CheckedVarArgs, 7773 UncoveredArg, Offset, 7774 IgnoreStringsWithoutSpecifiers); 7775 } 7776 } 7777 } 7778 7779 return SLCT_NotALiteral; 7780 } 7781 case Stmt::ObjCMessageExprClass: { 7782 const auto *ME = cast<ObjCMessageExpr>(E); 7783 if (const auto *MD = ME->getMethodDecl()) { 7784 if (const auto *FA = MD->getAttr<FormatArgAttr>()) { 7785 // As a special case heuristic, if we're using the method -[NSBundle 7786 // localizedStringForKey:value:table:], ignore any key strings that lack 7787 // format specifiers. The idea is that if the key doesn't have any 7788 // format specifiers then its probably just a key to map to the 7789 // localized strings. If it does have format specifiers though, then its 7790 // likely that the text of the key is the format string in the 7791 // programmer's language, and should be checked. 7792 const ObjCInterfaceDecl *IFace; 7793 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) && 7794 IFace->getIdentifier()->isStr("NSBundle") && 7795 MD->getSelector().isKeywordSelector( 7796 {"localizedStringForKey", "value", "table"})) { 7797 IgnoreStringsWithoutSpecifiers = true; 7798 } 7799 7800 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 7801 return checkFormatStringExpr( 7802 S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, 7803 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 7804 IgnoreStringsWithoutSpecifiers); 7805 } 7806 } 7807 7808 return SLCT_NotALiteral; 7809 } 7810 case Stmt::ObjCStringLiteralClass: 7811 case Stmt::StringLiteralClass: { 7812 const StringLiteral *StrE = nullptr; 7813 7814 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 7815 StrE = ObjCFExpr->getString(); 7816 else 7817 StrE = cast<StringLiteral>(E); 7818 7819 if (StrE) { 7820 if (Offset.isNegative() || Offset > StrE->getLength()) { 7821 // TODO: It would be better to have an explicit warning for out of 7822 // bounds literals. 7823 return SLCT_NotALiteral; 7824 } 7825 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 7826 CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx, 7827 firstDataArg, Type, InFunctionCall, CallType, 7828 CheckedVarArgs, UncoveredArg, 7829 IgnoreStringsWithoutSpecifiers); 7830 return SLCT_CheckedLiteral; 7831 } 7832 7833 return SLCT_NotALiteral; 7834 } 7835 case Stmt::BinaryOperatorClass: { 7836 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 7837 7838 // A string literal + an int offset is still a string literal. 7839 if (BinOp->isAdditiveOp()) { 7840 Expr::EvalResult LResult, RResult; 7841 7842 bool LIsInt = BinOp->getLHS()->EvaluateAsInt( 7843 LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 7844 bool RIsInt = BinOp->getRHS()->EvaluateAsInt( 7845 RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 7846 7847 if (LIsInt != RIsInt) { 7848 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 7849 7850 if (LIsInt) { 7851 if (BinOpKind == BO_Add) { 7852 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt); 7853 E = BinOp->getRHS(); 7854 goto tryAgain; 7855 } 7856 } else { 7857 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt); 7858 E = BinOp->getLHS(); 7859 goto tryAgain; 7860 } 7861 } 7862 } 7863 7864 return SLCT_NotALiteral; 7865 } 7866 case Stmt::UnaryOperatorClass: { 7867 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 7868 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 7869 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 7870 Expr::EvalResult IndexResult; 7871 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context, 7872 Expr::SE_NoSideEffects, 7873 S.isConstantEvaluated())) { 7874 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add, 7875 /*RHS is int*/ true); 7876 E = ASE->getBase(); 7877 goto tryAgain; 7878 } 7879 } 7880 7881 return SLCT_NotALiteral; 7882 } 7883 7884 default: 7885 return SLCT_NotALiteral; 7886 } 7887 } 7888 7889 Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 7890 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 7891 .Case("scanf", FST_Scanf) 7892 .Cases("printf", "printf0", FST_Printf) 7893 .Cases("NSString", "CFString", FST_NSString) 7894 .Case("strftime", FST_Strftime) 7895 .Case("strfmon", FST_Strfmon) 7896 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 7897 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 7898 .Case("os_trace", FST_OSLog) 7899 .Case("os_log", FST_OSLog) 7900 .Default(FST_Unknown); 7901 } 7902 7903 /// CheckFormatArguments - Check calls to printf and scanf (and similar 7904 /// functions) for correct use of format strings. 7905 /// Returns true if a format string has been fully checked. 7906 bool Sema::CheckFormatArguments(const FormatAttr *Format, 7907 ArrayRef<const Expr *> Args, 7908 bool IsCXXMember, 7909 VariadicCallType CallType, 7910 SourceLocation Loc, SourceRange Range, 7911 llvm::SmallBitVector &CheckedVarArgs) { 7912 FormatStringInfo FSI; 7913 if (getFormatStringInfo(Format, IsCXXMember, &FSI)) 7914 return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx, 7915 FSI.FirstDataArg, GetFormatStringType(Format), 7916 CallType, Loc, Range, CheckedVarArgs); 7917 return false; 7918 } 7919 7920 bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 7921 bool HasVAListArg, unsigned format_idx, 7922 unsigned firstDataArg, FormatStringType Type, 7923 VariadicCallType CallType, 7924 SourceLocation Loc, SourceRange Range, 7925 llvm::SmallBitVector &CheckedVarArgs) { 7926 // CHECK: printf/scanf-like function is called with no format string. 7927 if (format_idx >= Args.size()) { 7928 Diag(Loc, diag::warn_missing_format_string) << Range; 7929 return false; 7930 } 7931 7932 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 7933 7934 // CHECK: format string is not a string literal. 7935 // 7936 // Dynamically generated format strings are difficult to 7937 // automatically vet at compile time. Requiring that format strings 7938 // are string literals: (1) permits the checking of format strings by 7939 // the compiler and thereby (2) can practically remove the source of 7940 // many format string exploits. 7941 7942 // Format string can be either ObjC string (e.g. @"%d") or 7943 // C string (e.g. "%d") 7944 // ObjC string uses the same format specifiers as C string, so we can use 7945 // the same format string checking logic for both ObjC and C strings. 7946 UncoveredArgHandler UncoveredArg; 7947 StringLiteralCheckType CT = 7948 checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg, 7949 format_idx, firstDataArg, Type, CallType, 7950 /*IsFunctionCall*/ true, CheckedVarArgs, 7951 UncoveredArg, 7952 /*no string offset*/ llvm::APSInt(64, false) = 0); 7953 7954 // Generate a diagnostic where an uncovered argument is detected. 7955 if (UncoveredArg.hasUncoveredArg()) { 7956 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 7957 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 7958 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 7959 } 7960 7961 if (CT != SLCT_NotALiteral) 7962 // Literal format string found, check done! 7963 return CT == SLCT_CheckedLiteral; 7964 7965 // Strftime is particular as it always uses a single 'time' argument, 7966 // so it is safe to pass a non-literal string. 7967 if (Type == FST_Strftime) 7968 return false; 7969 7970 // Do not emit diag when the string param is a macro expansion and the 7971 // format is either NSString or CFString. This is a hack to prevent 7972 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 7973 // which are usually used in place of NS and CF string literals. 7974 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 7975 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 7976 return false; 7977 7978 // If there are no arguments specified, warn with -Wformat-security, otherwise 7979 // warn only with -Wformat-nonliteral. 7980 if (Args.size() == firstDataArg) { 7981 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 7982 << OrigFormatExpr->getSourceRange(); 7983 switch (Type) { 7984 default: 7985 break; 7986 case FST_Kprintf: 7987 case FST_FreeBSDKPrintf: 7988 case FST_Printf: 7989 Diag(FormatLoc, diag::note_format_security_fixit) 7990 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 7991 break; 7992 case FST_NSString: 7993 Diag(FormatLoc, diag::note_format_security_fixit) 7994 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 7995 break; 7996 } 7997 } else { 7998 Diag(FormatLoc, diag::warn_format_nonliteral) 7999 << OrigFormatExpr->getSourceRange(); 8000 } 8001 return false; 8002 } 8003 8004 namespace { 8005 8006 class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 8007 protected: 8008 Sema &S; 8009 const FormatStringLiteral *FExpr; 8010 const Expr *OrigFormatExpr; 8011 const Sema::FormatStringType FSType; 8012 const unsigned FirstDataArg; 8013 const unsigned NumDataArgs; 8014 const char *Beg; // Start of format string. 8015 const bool HasVAListArg; 8016 ArrayRef<const Expr *> Args; 8017 unsigned FormatIdx; 8018 llvm::SmallBitVector CoveredArgs; 8019 bool usesPositionalArgs = false; 8020 bool atFirstArg = true; 8021 bool inFunctionCall; 8022 Sema::VariadicCallType CallType; 8023 llvm::SmallBitVector &CheckedVarArgs; 8024 UncoveredArgHandler &UncoveredArg; 8025 8026 public: 8027 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 8028 const Expr *origFormatExpr, 8029 const Sema::FormatStringType type, unsigned firstDataArg, 8030 unsigned numDataArgs, const char *beg, bool hasVAListArg, 8031 ArrayRef<const Expr *> Args, unsigned formatIdx, 8032 bool inFunctionCall, Sema::VariadicCallType callType, 8033 llvm::SmallBitVector &CheckedVarArgs, 8034 UncoveredArgHandler &UncoveredArg) 8035 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 8036 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 8037 HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx), 8038 inFunctionCall(inFunctionCall), CallType(callType), 8039 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 8040 CoveredArgs.resize(numDataArgs); 8041 CoveredArgs.reset(); 8042 } 8043 8044 void DoneProcessing(); 8045 8046 void HandleIncompleteSpecifier(const char *startSpecifier, 8047 unsigned specifierLen) override; 8048 8049 void HandleInvalidLengthModifier( 8050 const analyze_format_string::FormatSpecifier &FS, 8051 const analyze_format_string::ConversionSpecifier &CS, 8052 const char *startSpecifier, unsigned specifierLen, 8053 unsigned DiagID); 8054 8055 void HandleNonStandardLengthModifier( 8056 const analyze_format_string::FormatSpecifier &FS, 8057 const char *startSpecifier, unsigned specifierLen); 8058 8059 void HandleNonStandardConversionSpecifier( 8060 const analyze_format_string::ConversionSpecifier &CS, 8061 const char *startSpecifier, unsigned specifierLen); 8062 8063 void HandlePosition(const char *startPos, unsigned posLen) override; 8064 8065 void HandleInvalidPosition(const char *startSpecifier, 8066 unsigned specifierLen, 8067 analyze_format_string::PositionContext p) override; 8068 8069 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 8070 8071 void HandleNullChar(const char *nullCharacter) override; 8072 8073 template <typename Range> 8074 static void 8075 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 8076 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 8077 bool IsStringLocation, Range StringRange, 8078 ArrayRef<FixItHint> Fixit = None); 8079 8080 protected: 8081 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 8082 const char *startSpec, 8083 unsigned specifierLen, 8084 const char *csStart, unsigned csLen); 8085 8086 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 8087 const char *startSpec, 8088 unsigned specifierLen); 8089 8090 SourceRange getFormatStringRange(); 8091 CharSourceRange getSpecifierRange(const char *startSpecifier, 8092 unsigned specifierLen); 8093 SourceLocation getLocationOfByte(const char *x); 8094 8095 const Expr *getDataArg(unsigned i) const; 8096 8097 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 8098 const analyze_format_string::ConversionSpecifier &CS, 8099 const char *startSpecifier, unsigned specifierLen, 8100 unsigned argIndex); 8101 8102 template <typename Range> 8103 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 8104 bool IsStringLocation, Range StringRange, 8105 ArrayRef<FixItHint> Fixit = None); 8106 }; 8107 8108 } // namespace 8109 8110 SourceRange CheckFormatHandler::getFormatStringRange() { 8111 return OrigFormatExpr->getSourceRange(); 8112 } 8113 8114 CharSourceRange CheckFormatHandler:: 8115 getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 8116 SourceLocation Start = getLocationOfByte(startSpecifier); 8117 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 8118 8119 // Advance the end SourceLocation by one due to half-open ranges. 8120 End = End.getLocWithOffset(1); 8121 8122 return CharSourceRange::getCharRange(Start, End); 8123 } 8124 8125 SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 8126 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 8127 S.getLangOpts(), S.Context.getTargetInfo()); 8128 } 8129 8130 void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 8131 unsigned specifierLen){ 8132 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 8133 getLocationOfByte(startSpecifier), 8134 /*IsStringLocation*/true, 8135 getSpecifierRange(startSpecifier, specifierLen)); 8136 } 8137 8138 void CheckFormatHandler::HandleInvalidLengthModifier( 8139 const analyze_format_string::FormatSpecifier &FS, 8140 const analyze_format_string::ConversionSpecifier &CS, 8141 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 8142 using namespace analyze_format_string; 8143 8144 const LengthModifier &LM = FS.getLengthModifier(); 8145 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 8146 8147 // See if we know how to fix this length modifier. 8148 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 8149 if (FixedLM) { 8150 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 8151 getLocationOfByte(LM.getStart()), 8152 /*IsStringLocation*/true, 8153 getSpecifierRange(startSpecifier, specifierLen)); 8154 8155 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 8156 << FixedLM->toString() 8157 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 8158 8159 } else { 8160 FixItHint Hint; 8161 if (DiagID == diag::warn_format_nonsensical_length) 8162 Hint = FixItHint::CreateRemoval(LMRange); 8163 8164 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 8165 getLocationOfByte(LM.getStart()), 8166 /*IsStringLocation*/true, 8167 getSpecifierRange(startSpecifier, specifierLen), 8168 Hint); 8169 } 8170 } 8171 8172 void CheckFormatHandler::HandleNonStandardLengthModifier( 8173 const analyze_format_string::FormatSpecifier &FS, 8174 const char *startSpecifier, unsigned specifierLen) { 8175 using namespace analyze_format_string; 8176 8177 const LengthModifier &LM = FS.getLengthModifier(); 8178 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 8179 8180 // See if we know how to fix this length modifier. 8181 Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 8182 if (FixedLM) { 8183 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8184 << LM.toString() << 0, 8185 getLocationOfByte(LM.getStart()), 8186 /*IsStringLocation*/true, 8187 getSpecifierRange(startSpecifier, specifierLen)); 8188 8189 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 8190 << FixedLM->toString() 8191 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 8192 8193 } else { 8194 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8195 << LM.toString() << 0, 8196 getLocationOfByte(LM.getStart()), 8197 /*IsStringLocation*/true, 8198 getSpecifierRange(startSpecifier, specifierLen)); 8199 } 8200 } 8201 8202 void CheckFormatHandler::HandleNonStandardConversionSpecifier( 8203 const analyze_format_string::ConversionSpecifier &CS, 8204 const char *startSpecifier, unsigned specifierLen) { 8205 using namespace analyze_format_string; 8206 8207 // See if we know how to fix this conversion specifier. 8208 Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 8209 if (FixedCS) { 8210 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8211 << CS.toString() << /*conversion specifier*/1, 8212 getLocationOfByte(CS.getStart()), 8213 /*IsStringLocation*/true, 8214 getSpecifierRange(startSpecifier, specifierLen)); 8215 8216 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 8217 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 8218 << FixedCS->toString() 8219 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 8220 } else { 8221 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 8222 << CS.toString() << /*conversion specifier*/1, 8223 getLocationOfByte(CS.getStart()), 8224 /*IsStringLocation*/true, 8225 getSpecifierRange(startSpecifier, specifierLen)); 8226 } 8227 } 8228 8229 void CheckFormatHandler::HandlePosition(const char *startPos, 8230 unsigned posLen) { 8231 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 8232 getLocationOfByte(startPos), 8233 /*IsStringLocation*/true, 8234 getSpecifierRange(startPos, posLen)); 8235 } 8236 8237 void 8238 CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, 8239 analyze_format_string::PositionContext p) { 8240 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) 8241 << (unsigned) p, 8242 getLocationOfByte(startPos), /*IsStringLocation*/true, 8243 getSpecifierRange(startPos, posLen)); 8244 } 8245 8246 void CheckFormatHandler::HandleZeroPosition(const char *startPos, 8247 unsigned posLen) { 8248 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 8249 getLocationOfByte(startPos), 8250 /*IsStringLocation*/true, 8251 getSpecifierRange(startPos, posLen)); 8252 } 8253 8254 void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 8255 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 8256 // The presence of a null character is likely an error. 8257 EmitFormatDiagnostic( 8258 S.PDiag(diag::warn_printf_format_string_contains_null_char), 8259 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 8260 getFormatStringRange()); 8261 } 8262 } 8263 8264 // Note that this may return NULL if there was an error parsing or building 8265 // one of the argument expressions. 8266 const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 8267 return Args[FirstDataArg + i]; 8268 } 8269 8270 void CheckFormatHandler::DoneProcessing() { 8271 // Does the number of data arguments exceed the number of 8272 // format conversions in the format string? 8273 if (!HasVAListArg) { 8274 // Find any arguments that weren't covered. 8275 CoveredArgs.flip(); 8276 signed notCoveredArg = CoveredArgs.find_first(); 8277 if (notCoveredArg >= 0) { 8278 assert((unsigned)notCoveredArg < NumDataArgs); 8279 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 8280 } else { 8281 UncoveredArg.setAllCovered(); 8282 } 8283 } 8284 } 8285 8286 void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 8287 const Expr *ArgExpr) { 8288 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 && 8289 "Invalid state"); 8290 8291 if (!ArgExpr) 8292 return; 8293 8294 SourceLocation Loc = ArgExpr->getBeginLoc(); 8295 8296 if (S.getSourceManager().isInSystemMacro(Loc)) 8297 return; 8298 8299 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 8300 for (auto E : DiagnosticExprs) 8301 PDiag << E->getSourceRange(); 8302 8303 CheckFormatHandler::EmitFormatDiagnostic( 8304 S, IsFunctionCall, DiagnosticExprs[0], 8305 PDiag, Loc, /*IsStringLocation*/false, 8306 DiagnosticExprs[0]->getSourceRange()); 8307 } 8308 8309 bool 8310 CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 8311 SourceLocation Loc, 8312 const char *startSpec, 8313 unsigned specifierLen, 8314 const char *csStart, 8315 unsigned csLen) { 8316 bool keepGoing = true; 8317 if (argIndex < NumDataArgs) { 8318 // Consider the argument coverered, even though the specifier doesn't 8319 // make sense. 8320 CoveredArgs.set(argIndex); 8321 } 8322 else { 8323 // If argIndex exceeds the number of data arguments we 8324 // don't issue a warning because that is just a cascade of warnings (and 8325 // they may have intended '%%' anyway). We don't want to continue processing 8326 // the format string after this point, however, as we will like just get 8327 // gibberish when trying to match arguments. 8328 keepGoing = false; 8329 } 8330 8331 StringRef Specifier(csStart, csLen); 8332 8333 // If the specifier in non-printable, it could be the first byte of a UTF-8 8334 // sequence. In that case, print the UTF-8 code point. If not, print the byte 8335 // hex value. 8336 std::string CodePointStr; 8337 if (!llvm::sys::locale::isPrint(*csStart)) { 8338 llvm::UTF32 CodePoint; 8339 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 8340 const llvm::UTF8 *E = 8341 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 8342 llvm::ConversionResult Result = 8343 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 8344 8345 if (Result != llvm::conversionOK) { 8346 unsigned char FirstChar = *csStart; 8347 CodePoint = (llvm::UTF32)FirstChar; 8348 } 8349 8350 llvm::raw_string_ostream OS(CodePointStr); 8351 if (CodePoint < 256) 8352 OS << "\\x" << llvm::format("%02x", CodePoint); 8353 else if (CodePoint <= 0xFFFF) 8354 OS << "\\u" << llvm::format("%04x", CodePoint); 8355 else 8356 OS << "\\U" << llvm::format("%08x", CodePoint); 8357 OS.flush(); 8358 Specifier = CodePointStr; 8359 } 8360 8361 EmitFormatDiagnostic( 8362 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 8363 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 8364 8365 return keepGoing; 8366 } 8367 8368 void 8369 CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 8370 const char *startSpec, 8371 unsigned specifierLen) { 8372 EmitFormatDiagnostic( 8373 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 8374 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 8375 } 8376 8377 bool 8378 CheckFormatHandler::CheckNumArgs( 8379 const analyze_format_string::FormatSpecifier &FS, 8380 const analyze_format_string::ConversionSpecifier &CS, 8381 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 8382 8383 if (argIndex >= NumDataArgs) { 8384 PartialDiagnostic PDiag = FS.usesPositionalArg() 8385 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 8386 << (argIndex+1) << NumDataArgs) 8387 : S.PDiag(diag::warn_printf_insufficient_data_args); 8388 EmitFormatDiagnostic( 8389 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 8390 getSpecifierRange(startSpecifier, specifierLen)); 8391 8392 // Since more arguments than conversion tokens are given, by extension 8393 // all arguments are covered, so mark this as so. 8394 UncoveredArg.setAllCovered(); 8395 return false; 8396 } 8397 return true; 8398 } 8399 8400 template<typename Range> 8401 void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 8402 SourceLocation Loc, 8403 bool IsStringLocation, 8404 Range StringRange, 8405 ArrayRef<FixItHint> FixIt) { 8406 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 8407 Loc, IsStringLocation, StringRange, FixIt); 8408 } 8409 8410 /// If the format string is not within the function call, emit a note 8411 /// so that the function call and string are in diagnostic messages. 8412 /// 8413 /// \param InFunctionCall if true, the format string is within the function 8414 /// call and only one diagnostic message will be produced. Otherwise, an 8415 /// extra note will be emitted pointing to location of the format string. 8416 /// 8417 /// \param ArgumentExpr the expression that is passed as the format string 8418 /// argument in the function call. Used for getting locations when two 8419 /// diagnostics are emitted. 8420 /// 8421 /// \param PDiag the callee should already have provided any strings for the 8422 /// diagnostic message. This function only adds locations and fixits 8423 /// to diagnostics. 8424 /// 8425 /// \param Loc primary location for diagnostic. If two diagnostics are 8426 /// required, one will be at Loc and a new SourceLocation will be created for 8427 /// the other one. 8428 /// 8429 /// \param IsStringLocation if true, Loc points to the format string should be 8430 /// used for the note. Otherwise, Loc points to the argument list and will 8431 /// be used with PDiag. 8432 /// 8433 /// \param StringRange some or all of the string to highlight. This is 8434 /// templated so it can accept either a CharSourceRange or a SourceRange. 8435 /// 8436 /// \param FixIt optional fix it hint for the format string. 8437 template <typename Range> 8438 void CheckFormatHandler::EmitFormatDiagnostic( 8439 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 8440 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 8441 Range StringRange, ArrayRef<FixItHint> FixIt) { 8442 if (InFunctionCall) { 8443 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 8444 D << StringRange; 8445 D << FixIt; 8446 } else { 8447 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 8448 << ArgumentExpr->getSourceRange(); 8449 8450 const Sema::SemaDiagnosticBuilder &Note = 8451 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 8452 diag::note_format_string_defined); 8453 8454 Note << StringRange; 8455 Note << FixIt; 8456 } 8457 } 8458 8459 //===--- CHECK: Printf format string checking ------------------------------===// 8460 8461 namespace { 8462 8463 class CheckPrintfHandler : public CheckFormatHandler { 8464 public: 8465 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 8466 const Expr *origFormatExpr, 8467 const Sema::FormatStringType type, unsigned firstDataArg, 8468 unsigned numDataArgs, bool isObjC, const char *beg, 8469 bool hasVAListArg, ArrayRef<const Expr *> Args, 8470 unsigned formatIdx, bool inFunctionCall, 8471 Sema::VariadicCallType CallType, 8472 llvm::SmallBitVector &CheckedVarArgs, 8473 UncoveredArgHandler &UncoveredArg) 8474 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 8475 numDataArgs, beg, hasVAListArg, Args, formatIdx, 8476 inFunctionCall, CallType, CheckedVarArgs, 8477 UncoveredArg) {} 8478 8479 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 8480 8481 /// Returns true if '%@' specifiers are allowed in the format string. 8482 bool allowsObjCArg() const { 8483 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 8484 FSType == Sema::FST_OSTrace; 8485 } 8486 8487 bool HandleInvalidPrintfConversionSpecifier( 8488 const analyze_printf::PrintfSpecifier &FS, 8489 const char *startSpecifier, 8490 unsigned specifierLen) override; 8491 8492 void handleInvalidMaskType(StringRef MaskType) override; 8493 8494 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 8495 const char *startSpecifier, 8496 unsigned specifierLen) override; 8497 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 8498 const char *StartSpecifier, 8499 unsigned SpecifierLen, 8500 const Expr *E); 8501 8502 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 8503 const char *startSpecifier, unsigned specifierLen); 8504 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 8505 const analyze_printf::OptionalAmount &Amt, 8506 unsigned type, 8507 const char *startSpecifier, unsigned specifierLen); 8508 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 8509 const analyze_printf::OptionalFlag &flag, 8510 const char *startSpecifier, unsigned specifierLen); 8511 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 8512 const analyze_printf::OptionalFlag &ignoredFlag, 8513 const analyze_printf::OptionalFlag &flag, 8514 const char *startSpecifier, unsigned specifierLen); 8515 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 8516 const Expr *E); 8517 8518 void HandleEmptyObjCModifierFlag(const char *startFlag, 8519 unsigned flagLen) override; 8520 8521 void HandleInvalidObjCModifierFlag(const char *startFlag, 8522 unsigned flagLen) override; 8523 8524 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 8525 const char *flagsEnd, 8526 const char *conversionPosition) 8527 override; 8528 }; 8529 8530 } // namespace 8531 8532 bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 8533 const analyze_printf::PrintfSpecifier &FS, 8534 const char *startSpecifier, 8535 unsigned specifierLen) { 8536 const analyze_printf::PrintfConversionSpecifier &CS = 8537 FS.getConversionSpecifier(); 8538 8539 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 8540 getLocationOfByte(CS.getStart()), 8541 startSpecifier, specifierLen, 8542 CS.getStart(), CS.getLength()); 8543 } 8544 8545 void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 8546 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 8547 } 8548 8549 bool CheckPrintfHandler::HandleAmount( 8550 const analyze_format_string::OptionalAmount &Amt, 8551 unsigned k, const char *startSpecifier, 8552 unsigned specifierLen) { 8553 if (Amt.hasDataArgument()) { 8554 if (!HasVAListArg) { 8555 unsigned argIndex = Amt.getArgIndex(); 8556 if (argIndex >= NumDataArgs) { 8557 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 8558 << k, 8559 getLocationOfByte(Amt.getStart()), 8560 /*IsStringLocation*/true, 8561 getSpecifierRange(startSpecifier, specifierLen)); 8562 // Don't do any more checking. We will just emit 8563 // spurious errors. 8564 return false; 8565 } 8566 8567 // Type check the data argument. It should be an 'int'. 8568 // Although not in conformance with C99, we also allow the argument to be 8569 // an 'unsigned int' as that is a reasonably safe case. GCC also 8570 // doesn't emit a warning for that case. 8571 CoveredArgs.set(argIndex); 8572 const Expr *Arg = getDataArg(argIndex); 8573 if (!Arg) 8574 return false; 8575 8576 QualType T = Arg->getType(); 8577 8578 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 8579 assert(AT.isValid()); 8580 8581 if (!AT.matchesType(S.Context, T)) { 8582 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 8583 << k << AT.getRepresentativeTypeName(S.Context) 8584 << T << Arg->getSourceRange(), 8585 getLocationOfByte(Amt.getStart()), 8586 /*IsStringLocation*/true, 8587 getSpecifierRange(startSpecifier, specifierLen)); 8588 // Don't do any more checking. We will just emit 8589 // spurious errors. 8590 return false; 8591 } 8592 } 8593 } 8594 return true; 8595 } 8596 8597 void CheckPrintfHandler::HandleInvalidAmount( 8598 const analyze_printf::PrintfSpecifier &FS, 8599 const analyze_printf::OptionalAmount &Amt, 8600 unsigned type, 8601 const char *startSpecifier, 8602 unsigned specifierLen) { 8603 const analyze_printf::PrintfConversionSpecifier &CS = 8604 FS.getConversionSpecifier(); 8605 8606 FixItHint fixit = 8607 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 8608 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 8609 Amt.getConstantLength())) 8610 : FixItHint(); 8611 8612 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 8613 << type << CS.toString(), 8614 getLocationOfByte(Amt.getStart()), 8615 /*IsStringLocation*/true, 8616 getSpecifierRange(startSpecifier, specifierLen), 8617 fixit); 8618 } 8619 8620 void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 8621 const analyze_printf::OptionalFlag &flag, 8622 const char *startSpecifier, 8623 unsigned specifierLen) { 8624 // Warn about pointless flag with a fixit removal. 8625 const analyze_printf::PrintfConversionSpecifier &CS = 8626 FS.getConversionSpecifier(); 8627 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 8628 << flag.toString() << CS.toString(), 8629 getLocationOfByte(flag.getPosition()), 8630 /*IsStringLocation*/true, 8631 getSpecifierRange(startSpecifier, specifierLen), 8632 FixItHint::CreateRemoval( 8633 getSpecifierRange(flag.getPosition(), 1))); 8634 } 8635 8636 void CheckPrintfHandler::HandleIgnoredFlag( 8637 const analyze_printf::PrintfSpecifier &FS, 8638 const analyze_printf::OptionalFlag &ignoredFlag, 8639 const analyze_printf::OptionalFlag &flag, 8640 const char *startSpecifier, 8641 unsigned specifierLen) { 8642 // Warn about ignored flag with a fixit removal. 8643 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 8644 << ignoredFlag.toString() << flag.toString(), 8645 getLocationOfByte(ignoredFlag.getPosition()), 8646 /*IsStringLocation*/true, 8647 getSpecifierRange(startSpecifier, specifierLen), 8648 FixItHint::CreateRemoval( 8649 getSpecifierRange(ignoredFlag.getPosition(), 1))); 8650 } 8651 8652 void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 8653 unsigned flagLen) { 8654 // Warn about an empty flag. 8655 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 8656 getLocationOfByte(startFlag), 8657 /*IsStringLocation*/true, 8658 getSpecifierRange(startFlag, flagLen)); 8659 } 8660 8661 void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 8662 unsigned flagLen) { 8663 // Warn about an invalid flag. 8664 auto Range = getSpecifierRange(startFlag, flagLen); 8665 StringRef flag(startFlag, flagLen); 8666 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 8667 getLocationOfByte(startFlag), 8668 /*IsStringLocation*/true, 8669 Range, FixItHint::CreateRemoval(Range)); 8670 } 8671 8672 void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 8673 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 8674 // Warn about using '[...]' without a '@' conversion. 8675 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 8676 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 8677 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 8678 getLocationOfByte(conversionPosition), 8679 /*IsStringLocation*/true, 8680 Range, FixItHint::CreateRemoval(Range)); 8681 } 8682 8683 // Determines if the specified is a C++ class or struct containing 8684 // a member with the specified name and kind (e.g. a CXXMethodDecl named 8685 // "c_str()"). 8686 template<typename MemberKind> 8687 static llvm::SmallPtrSet<MemberKind*, 1> 8688 CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 8689 const RecordType *RT = Ty->getAs<RecordType>(); 8690 llvm::SmallPtrSet<MemberKind*, 1> Results; 8691 8692 if (!RT) 8693 return Results; 8694 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 8695 if (!RD || !RD->getDefinition()) 8696 return Results; 8697 8698 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 8699 Sema::LookupMemberName); 8700 R.suppressDiagnostics(); 8701 8702 // We just need to include all members of the right kind turned up by the 8703 // filter, at this point. 8704 if (S.LookupQualifiedName(R, RT->getDecl())) 8705 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 8706 NamedDecl *decl = (*I)->getUnderlyingDecl(); 8707 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 8708 Results.insert(FK); 8709 } 8710 return Results; 8711 } 8712 8713 /// Check if we could call '.c_str()' on an object. 8714 /// 8715 /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 8716 /// allow the call, or if it would be ambiguous). 8717 bool Sema::hasCStrMethod(const Expr *E) { 8718 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 8719 8720 MethodSet Results = 8721 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 8722 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 8723 MI != ME; ++MI) 8724 if ((*MI)->getMinRequiredArguments() == 0) 8725 return true; 8726 return false; 8727 } 8728 8729 // Check if a (w)string was passed when a (w)char* was needed, and offer a 8730 // better diagnostic if so. AT is assumed to be valid. 8731 // Returns true when a c_str() conversion method is found. 8732 bool CheckPrintfHandler::checkForCStrMembers( 8733 const analyze_printf::ArgType &AT, const Expr *E) { 8734 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 8735 8736 MethodSet Results = 8737 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 8738 8739 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 8740 MI != ME; ++MI) { 8741 const CXXMethodDecl *Method = *MI; 8742 if (Method->getMinRequiredArguments() == 0 && 8743 AT.matchesType(S.Context, Method->getReturnType())) { 8744 // FIXME: Suggest parens if the expression needs them. 8745 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 8746 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 8747 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 8748 return true; 8749 } 8750 } 8751 8752 return false; 8753 } 8754 8755 bool 8756 CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier 8757 &FS, 8758 const char *startSpecifier, 8759 unsigned specifierLen) { 8760 using namespace analyze_format_string; 8761 using namespace analyze_printf; 8762 8763 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 8764 8765 if (FS.consumesDataArgument()) { 8766 if (atFirstArg) { 8767 atFirstArg = false; 8768 usesPositionalArgs = FS.usesPositionalArg(); 8769 } 8770 else if (usesPositionalArgs != FS.usesPositionalArg()) { 8771 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 8772 startSpecifier, specifierLen); 8773 return false; 8774 } 8775 } 8776 8777 // First check if the field width, precision, and conversion specifier 8778 // have matching data arguments. 8779 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 8780 startSpecifier, specifierLen)) { 8781 return false; 8782 } 8783 8784 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 8785 startSpecifier, specifierLen)) { 8786 return false; 8787 } 8788 8789 if (!CS.consumesDataArgument()) { 8790 // FIXME: Technically specifying a precision or field width here 8791 // makes no sense. Worth issuing a warning at some point. 8792 return true; 8793 } 8794 8795 // Consume the argument. 8796 unsigned argIndex = FS.getArgIndex(); 8797 if (argIndex < NumDataArgs) { 8798 // The check to see if the argIndex is valid will come later. 8799 // We set the bit here because we may exit early from this 8800 // function if we encounter some other error. 8801 CoveredArgs.set(argIndex); 8802 } 8803 8804 // FreeBSD kernel extensions. 8805 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 8806 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 8807 // We need at least two arguments. 8808 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 8809 return false; 8810 8811 // Claim the second argument. 8812 CoveredArgs.set(argIndex + 1); 8813 8814 // Type check the first argument (int for %b, pointer for %D) 8815 const Expr *Ex = getDataArg(argIndex); 8816 const analyze_printf::ArgType &AT = 8817 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? 8818 ArgType(S.Context.IntTy) : ArgType::CPointerTy; 8819 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 8820 EmitFormatDiagnostic( 8821 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 8822 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 8823 << false << Ex->getSourceRange(), 8824 Ex->getBeginLoc(), /*IsStringLocation*/ false, 8825 getSpecifierRange(startSpecifier, specifierLen)); 8826 8827 // Type check the second argument (char * for both %b and %D) 8828 Ex = getDataArg(argIndex + 1); 8829 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 8830 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 8831 EmitFormatDiagnostic( 8832 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 8833 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 8834 << false << Ex->getSourceRange(), 8835 Ex->getBeginLoc(), /*IsStringLocation*/ false, 8836 getSpecifierRange(startSpecifier, specifierLen)); 8837 8838 return true; 8839 } 8840 8841 // Check for using an Objective-C specific conversion specifier 8842 // in a non-ObjC literal. 8843 if (!allowsObjCArg() && CS.isObjCArg()) { 8844 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 8845 specifierLen); 8846 } 8847 8848 // %P can only be used with os_log. 8849 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 8850 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 8851 specifierLen); 8852 } 8853 8854 // %n is not allowed with os_log. 8855 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 8856 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 8857 getLocationOfByte(CS.getStart()), 8858 /*IsStringLocation*/ false, 8859 getSpecifierRange(startSpecifier, specifierLen)); 8860 8861 return true; 8862 } 8863 8864 // Only scalars are allowed for os_trace. 8865 if (FSType == Sema::FST_OSTrace && 8866 (CS.getKind() == ConversionSpecifier::PArg || 8867 CS.getKind() == ConversionSpecifier::sArg || 8868 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 8869 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 8870 specifierLen); 8871 } 8872 8873 // Check for use of public/private annotation outside of os_log(). 8874 if (FSType != Sema::FST_OSLog) { 8875 if (FS.isPublic().isSet()) { 8876 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 8877 << "public", 8878 getLocationOfByte(FS.isPublic().getPosition()), 8879 /*IsStringLocation*/ false, 8880 getSpecifierRange(startSpecifier, specifierLen)); 8881 } 8882 if (FS.isPrivate().isSet()) { 8883 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 8884 << "private", 8885 getLocationOfByte(FS.isPrivate().getPosition()), 8886 /*IsStringLocation*/ false, 8887 getSpecifierRange(startSpecifier, specifierLen)); 8888 } 8889 } 8890 8891 // Check for invalid use of field width 8892 if (!FS.hasValidFieldWidth()) { 8893 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 8894 startSpecifier, specifierLen); 8895 } 8896 8897 // Check for invalid use of precision 8898 if (!FS.hasValidPrecision()) { 8899 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 8900 startSpecifier, specifierLen); 8901 } 8902 8903 // Precision is mandatory for %P specifier. 8904 if (CS.getKind() == ConversionSpecifier::PArg && 8905 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 8906 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 8907 getLocationOfByte(startSpecifier), 8908 /*IsStringLocation*/ false, 8909 getSpecifierRange(startSpecifier, specifierLen)); 8910 } 8911 8912 // Check each flag does not conflict with any other component. 8913 if (!FS.hasValidThousandsGroupingPrefix()) 8914 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 8915 if (!FS.hasValidLeadingZeros()) 8916 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 8917 if (!FS.hasValidPlusPrefix()) 8918 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 8919 if (!FS.hasValidSpacePrefix()) 8920 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 8921 if (!FS.hasValidAlternativeForm()) 8922 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 8923 if (!FS.hasValidLeftJustified()) 8924 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 8925 8926 // Check that flags are not ignored by another flag 8927 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 8928 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 8929 startSpecifier, specifierLen); 8930 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 8931 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 8932 startSpecifier, specifierLen); 8933 8934 // Check the length modifier is valid with the given conversion specifier. 8935 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 8936 S.getLangOpts())) 8937 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8938 diag::warn_format_nonsensical_length); 8939 else if (!FS.hasStandardLengthModifier()) 8940 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 8941 else if (!FS.hasStandardLengthConversionCombination()) 8942 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 8943 diag::warn_format_non_standard_conversion_spec); 8944 8945 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 8946 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 8947 8948 // The remaining checks depend on the data arguments. 8949 if (HasVAListArg) 8950 return true; 8951 8952 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 8953 return false; 8954 8955 const Expr *Arg = getDataArg(argIndex); 8956 if (!Arg) 8957 return true; 8958 8959 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 8960 } 8961 8962 static bool requiresParensToAddCast(const Expr *E) { 8963 // FIXME: We should have a general way to reason about operator 8964 // precedence and whether parens are actually needed here. 8965 // Take care of a few common cases where they aren't. 8966 const Expr *Inside = E->IgnoreImpCasts(); 8967 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 8968 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 8969 8970 switch (Inside->getStmtClass()) { 8971 case Stmt::ArraySubscriptExprClass: 8972 case Stmt::CallExprClass: 8973 case Stmt::CharacterLiteralClass: 8974 case Stmt::CXXBoolLiteralExprClass: 8975 case Stmt::DeclRefExprClass: 8976 case Stmt::FloatingLiteralClass: 8977 case Stmt::IntegerLiteralClass: 8978 case Stmt::MemberExprClass: 8979 case Stmt::ObjCArrayLiteralClass: 8980 case Stmt::ObjCBoolLiteralExprClass: 8981 case Stmt::ObjCBoxedExprClass: 8982 case Stmt::ObjCDictionaryLiteralClass: 8983 case Stmt::ObjCEncodeExprClass: 8984 case Stmt::ObjCIvarRefExprClass: 8985 case Stmt::ObjCMessageExprClass: 8986 case Stmt::ObjCPropertyRefExprClass: 8987 case Stmt::ObjCStringLiteralClass: 8988 case Stmt::ObjCSubscriptRefExprClass: 8989 case Stmt::ParenExprClass: 8990 case Stmt::StringLiteralClass: 8991 case Stmt::UnaryOperatorClass: 8992 return false; 8993 default: 8994 return true; 8995 } 8996 } 8997 8998 static std::pair<QualType, StringRef> 8999 shouldNotPrintDirectly(const ASTContext &Context, 9000 QualType IntendedTy, 9001 const Expr *E) { 9002 // Use a 'while' to peel off layers of typedefs. 9003 QualType TyTy = IntendedTy; 9004 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 9005 StringRef Name = UserTy->getDecl()->getName(); 9006 QualType CastTy = llvm::StringSwitch<QualType>(Name) 9007 .Case("CFIndex", Context.getNSIntegerType()) 9008 .Case("NSInteger", Context.getNSIntegerType()) 9009 .Case("NSUInteger", Context.getNSUIntegerType()) 9010 .Case("SInt32", Context.IntTy) 9011 .Case("UInt32", Context.UnsignedIntTy) 9012 .Default(QualType()); 9013 9014 if (!CastTy.isNull()) 9015 return std::make_pair(CastTy, Name); 9016 9017 TyTy = UserTy->desugar(); 9018 } 9019 9020 // Strip parens if necessary. 9021 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 9022 return shouldNotPrintDirectly(Context, 9023 PE->getSubExpr()->getType(), 9024 PE->getSubExpr()); 9025 9026 // If this is a conditional expression, then its result type is constructed 9027 // via usual arithmetic conversions and thus there might be no necessary 9028 // typedef sugar there. Recurse to operands to check for NSInteger & 9029 // Co. usage condition. 9030 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 9031 QualType TrueTy, FalseTy; 9032 StringRef TrueName, FalseName; 9033 9034 std::tie(TrueTy, TrueName) = 9035 shouldNotPrintDirectly(Context, 9036 CO->getTrueExpr()->getType(), 9037 CO->getTrueExpr()); 9038 std::tie(FalseTy, FalseName) = 9039 shouldNotPrintDirectly(Context, 9040 CO->getFalseExpr()->getType(), 9041 CO->getFalseExpr()); 9042 9043 if (TrueTy == FalseTy) 9044 return std::make_pair(TrueTy, TrueName); 9045 else if (TrueTy.isNull()) 9046 return std::make_pair(FalseTy, FalseName); 9047 else if (FalseTy.isNull()) 9048 return std::make_pair(TrueTy, TrueName); 9049 } 9050 9051 return std::make_pair(QualType(), StringRef()); 9052 } 9053 9054 /// Return true if \p ICE is an implicit argument promotion of an arithmetic 9055 /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked 9056 /// type do not count. 9057 static bool 9058 isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { 9059 QualType From = ICE->getSubExpr()->getType(); 9060 QualType To = ICE->getType(); 9061 // It's an integer promotion if the destination type is the promoted 9062 // source type. 9063 if (ICE->getCastKind() == CK_IntegralCast && 9064 From->isPromotableIntegerType() && 9065 S.Context.getPromotedIntegerType(From) == To) 9066 return true; 9067 // Look through vector types, since we do default argument promotion for 9068 // those in OpenCL. 9069 if (const auto *VecTy = From->getAs<ExtVectorType>()) 9070 From = VecTy->getElementType(); 9071 if (const auto *VecTy = To->getAs<ExtVectorType>()) 9072 To = VecTy->getElementType(); 9073 // It's a floating promotion if the source type is a lower rank. 9074 return ICE->getCastKind() == CK_FloatingCast && 9075 S.Context.getFloatingTypeOrder(From, To) < 0; 9076 } 9077 9078 bool 9079 CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 9080 const char *StartSpecifier, 9081 unsigned SpecifierLen, 9082 const Expr *E) { 9083 using namespace analyze_format_string; 9084 using namespace analyze_printf; 9085 9086 // Now type check the data expression that matches the 9087 // format specifier. 9088 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 9089 if (!AT.isValid()) 9090 return true; 9091 9092 QualType ExprTy = E->getType(); 9093 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 9094 ExprTy = TET->getUnderlyingExpr()->getType(); 9095 } 9096 9097 // Diagnose attempts to print a boolean value as a character. Unlike other 9098 // -Wformat diagnostics, this is fine from a type perspective, but it still 9099 // doesn't make sense. 9100 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg && 9101 E->isKnownToHaveBooleanValue()) { 9102 const CharSourceRange &CSR = 9103 getSpecifierRange(StartSpecifier, SpecifierLen); 9104 SmallString<4> FSString; 9105 llvm::raw_svector_ostream os(FSString); 9106 FS.toString(os); 9107 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character) 9108 << FSString, 9109 E->getExprLoc(), false, CSR); 9110 return true; 9111 } 9112 9113 analyze_printf::ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy); 9114 if (Match == analyze_printf::ArgType::Match) 9115 return true; 9116 9117 // Look through argument promotions for our error message's reported type. 9118 // This includes the integral and floating promotions, but excludes array 9119 // and function pointer decay (seeing that an argument intended to be a 9120 // string has type 'char [6]' is probably more confusing than 'char *') and 9121 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type). 9122 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 9123 if (isArithmeticArgumentPromotion(S, ICE)) { 9124 E = ICE->getSubExpr(); 9125 ExprTy = E->getType(); 9126 9127 // Check if we didn't match because of an implicit cast from a 'char' 9128 // or 'short' to an 'int'. This is done because printf is a varargs 9129 // function. 9130 if (ICE->getType() == S.Context.IntTy || 9131 ICE->getType() == S.Context.UnsignedIntTy) { 9132 // All further checking is done on the subexpression 9133 const analyze_printf::ArgType::MatchKind ImplicitMatch = 9134 AT.matchesType(S.Context, ExprTy); 9135 if (ImplicitMatch == analyze_printf::ArgType::Match) 9136 return true; 9137 if (ImplicitMatch == ArgType::NoMatchPedantic || 9138 ImplicitMatch == ArgType::NoMatchTypeConfusion) 9139 Match = ImplicitMatch; 9140 } 9141 } 9142 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 9143 // Special case for 'a', which has type 'int' in C. 9144 // Note, however, that we do /not/ want to treat multibyte constants like 9145 // 'MooV' as characters! This form is deprecated but still exists. In 9146 // addition, don't treat expressions as of type 'char' if one byte length 9147 // modifier is provided. 9148 if (ExprTy == S.Context.IntTy && 9149 FS.getLengthModifier().getKind() != LengthModifier::AsChar) 9150 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) 9151 ExprTy = S.Context.CharTy; 9152 } 9153 9154 // Look through enums to their underlying type. 9155 bool IsEnum = false; 9156 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 9157 ExprTy = EnumTy->getDecl()->getIntegerType(); 9158 IsEnum = true; 9159 } 9160 9161 // %C in an Objective-C context prints a unichar, not a wchar_t. 9162 // If the argument is an integer of some kind, believe the %C and suggest 9163 // a cast instead of changing the conversion specifier. 9164 QualType IntendedTy = ExprTy; 9165 if (isObjCContext() && 9166 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 9167 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 9168 !ExprTy->isCharType()) { 9169 // 'unichar' is defined as a typedef of unsigned short, but we should 9170 // prefer using the typedef if it is visible. 9171 IntendedTy = S.Context.UnsignedShortTy; 9172 9173 // While we are here, check if the value is an IntegerLiteral that happens 9174 // to be within the valid range. 9175 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 9176 const llvm::APInt &V = IL->getValue(); 9177 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 9178 return true; 9179 } 9180 9181 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 9182 Sema::LookupOrdinaryName); 9183 if (S.LookupName(Result, S.getCurScope())) { 9184 NamedDecl *ND = Result.getFoundDecl(); 9185 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 9186 if (TD->getUnderlyingType() == IntendedTy) 9187 IntendedTy = S.Context.getTypedefType(TD); 9188 } 9189 } 9190 } 9191 9192 // Special-case some of Darwin's platform-independence types by suggesting 9193 // casts to primitive types that are known to be large enough. 9194 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 9195 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 9196 QualType CastTy; 9197 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 9198 if (!CastTy.isNull()) { 9199 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 9200 // (long in ASTContext). Only complain to pedants. 9201 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 9202 (AT.isSizeT() || AT.isPtrdiffT()) && 9203 AT.matchesType(S.Context, CastTy)) 9204 Match = ArgType::NoMatchPedantic; 9205 IntendedTy = CastTy; 9206 ShouldNotPrintDirectly = true; 9207 } 9208 } 9209 9210 // We may be able to offer a FixItHint if it is a supported type. 9211 PrintfSpecifier fixedFS = FS; 9212 bool Success = 9213 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 9214 9215 if (Success) { 9216 // Get the fix string from the fixed format specifier 9217 SmallString<16> buf; 9218 llvm::raw_svector_ostream os(buf); 9219 fixedFS.toString(os); 9220 9221 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 9222 9223 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) { 9224 unsigned Diag; 9225 switch (Match) { 9226 case ArgType::Match: llvm_unreachable("expected non-matching"); 9227 case ArgType::NoMatchPedantic: 9228 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 9229 break; 9230 case ArgType::NoMatchTypeConfusion: 9231 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 9232 break; 9233 case ArgType::NoMatch: 9234 Diag = diag::warn_format_conversion_argument_type_mismatch; 9235 break; 9236 } 9237 9238 // In this case, the specifier is wrong and should be changed to match 9239 // the argument. 9240 EmitFormatDiagnostic(S.PDiag(Diag) 9241 << AT.getRepresentativeTypeName(S.Context) 9242 << IntendedTy << IsEnum << E->getSourceRange(), 9243 E->getBeginLoc(), 9244 /*IsStringLocation*/ false, SpecRange, 9245 FixItHint::CreateReplacement(SpecRange, os.str())); 9246 } else { 9247 // The canonical type for formatting this value is different from the 9248 // actual type of the expression. (This occurs, for example, with Darwin's 9249 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 9250 // should be printed as 'long' for 64-bit compatibility.) 9251 // Rather than emitting a normal format/argument mismatch, we want to 9252 // add a cast to the recommended type (and correct the format string 9253 // if necessary). 9254 SmallString<16> CastBuf; 9255 llvm::raw_svector_ostream CastFix(CastBuf); 9256 CastFix << "("; 9257 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 9258 CastFix << ")"; 9259 9260 SmallVector<FixItHint,4> Hints; 9261 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly) 9262 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 9263 9264 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 9265 // If there's already a cast present, just replace it. 9266 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 9267 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 9268 9269 } else if (!requiresParensToAddCast(E)) { 9270 // If the expression has high enough precedence, 9271 // just write the C-style cast. 9272 Hints.push_back( 9273 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 9274 } else { 9275 // Otherwise, add parens around the expression as well as the cast. 9276 CastFix << "("; 9277 Hints.push_back( 9278 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 9279 9280 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc()); 9281 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 9282 } 9283 9284 if (ShouldNotPrintDirectly) { 9285 // The expression has a type that should not be printed directly. 9286 // We extract the name from the typedef because we don't want to show 9287 // the underlying type in the diagnostic. 9288 StringRef Name; 9289 if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy)) 9290 Name = TypedefTy->getDecl()->getName(); 9291 else 9292 Name = CastTyName; 9293 unsigned Diag = Match == ArgType::NoMatchPedantic 9294 ? diag::warn_format_argument_needs_cast_pedantic 9295 : diag::warn_format_argument_needs_cast; 9296 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 9297 << E->getSourceRange(), 9298 E->getBeginLoc(), /*IsStringLocation=*/false, 9299 SpecRange, Hints); 9300 } else { 9301 // In this case, the expression could be printed using a different 9302 // specifier, but we've decided that the specifier is probably correct 9303 // and we should cast instead. Just use the normal warning message. 9304 EmitFormatDiagnostic( 9305 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 9306 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 9307 << E->getSourceRange(), 9308 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 9309 } 9310 } 9311 } else { 9312 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 9313 SpecifierLen); 9314 // Since the warning for passing non-POD types to variadic functions 9315 // was deferred until now, we emit a warning for non-POD 9316 // arguments here. 9317 switch (S.isValidVarArgType(ExprTy)) { 9318 case Sema::VAK_Valid: 9319 case Sema::VAK_ValidInCXX11: { 9320 unsigned Diag; 9321 switch (Match) { 9322 case ArgType::Match: llvm_unreachable("expected non-matching"); 9323 case ArgType::NoMatchPedantic: 9324 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 9325 break; 9326 case ArgType::NoMatchTypeConfusion: 9327 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 9328 break; 9329 case ArgType::NoMatch: 9330 Diag = diag::warn_format_conversion_argument_type_mismatch; 9331 break; 9332 } 9333 9334 EmitFormatDiagnostic( 9335 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 9336 << IsEnum << CSR << E->getSourceRange(), 9337 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 9338 break; 9339 } 9340 case Sema::VAK_Undefined: 9341 case Sema::VAK_MSVCUndefined: 9342 EmitFormatDiagnostic(S.PDiag(diag::warn_non_pod_vararg_with_format_string) 9343 << S.getLangOpts().CPlusPlus11 << ExprTy 9344 << CallType 9345 << AT.getRepresentativeTypeName(S.Context) << CSR 9346 << E->getSourceRange(), 9347 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 9348 checkForCStrMembers(AT, E); 9349 break; 9350 9351 case Sema::VAK_Invalid: 9352 if (ExprTy->isObjCObjectType()) 9353 EmitFormatDiagnostic( 9354 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 9355 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 9356 << AT.getRepresentativeTypeName(S.Context) << CSR 9357 << E->getSourceRange(), 9358 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 9359 else 9360 // FIXME: If this is an initializer list, suggest removing the braces 9361 // or inserting a cast to the target type. 9362 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 9363 << isa<InitListExpr>(E) << ExprTy << CallType 9364 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 9365 break; 9366 } 9367 9368 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 9369 "format string specifier index out of range"); 9370 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 9371 } 9372 9373 return true; 9374 } 9375 9376 //===--- CHECK: Scanf format string checking ------------------------------===// 9377 9378 namespace { 9379 9380 class CheckScanfHandler : public CheckFormatHandler { 9381 public: 9382 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 9383 const Expr *origFormatExpr, Sema::FormatStringType type, 9384 unsigned firstDataArg, unsigned numDataArgs, 9385 const char *beg, bool hasVAListArg, 9386 ArrayRef<const Expr *> Args, unsigned formatIdx, 9387 bool inFunctionCall, Sema::VariadicCallType CallType, 9388 llvm::SmallBitVector &CheckedVarArgs, 9389 UncoveredArgHandler &UncoveredArg) 9390 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 9391 numDataArgs, beg, hasVAListArg, Args, formatIdx, 9392 inFunctionCall, CallType, CheckedVarArgs, 9393 UncoveredArg) {} 9394 9395 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 9396 const char *startSpecifier, 9397 unsigned specifierLen) override; 9398 9399 bool HandleInvalidScanfConversionSpecifier( 9400 const analyze_scanf::ScanfSpecifier &FS, 9401 const char *startSpecifier, 9402 unsigned specifierLen) override; 9403 9404 void HandleIncompleteScanList(const char *start, const char *end) override; 9405 }; 9406 9407 } // namespace 9408 9409 void CheckScanfHandler::HandleIncompleteScanList(const char *start, 9410 const char *end) { 9411 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 9412 getLocationOfByte(end), /*IsStringLocation*/true, 9413 getSpecifierRange(start, end - start)); 9414 } 9415 9416 bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 9417 const analyze_scanf::ScanfSpecifier &FS, 9418 const char *startSpecifier, 9419 unsigned specifierLen) { 9420 const analyze_scanf::ScanfConversionSpecifier &CS = 9421 FS.getConversionSpecifier(); 9422 9423 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 9424 getLocationOfByte(CS.getStart()), 9425 startSpecifier, specifierLen, 9426 CS.getStart(), CS.getLength()); 9427 } 9428 9429 bool CheckScanfHandler::HandleScanfSpecifier( 9430 const analyze_scanf::ScanfSpecifier &FS, 9431 const char *startSpecifier, 9432 unsigned specifierLen) { 9433 using namespace analyze_scanf; 9434 using namespace analyze_format_string; 9435 9436 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 9437 9438 // Handle case where '%' and '*' don't consume an argument. These shouldn't 9439 // be used to decide if we are using positional arguments consistently. 9440 if (FS.consumesDataArgument()) { 9441 if (atFirstArg) { 9442 atFirstArg = false; 9443 usesPositionalArgs = FS.usesPositionalArg(); 9444 } 9445 else if (usesPositionalArgs != FS.usesPositionalArg()) { 9446 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 9447 startSpecifier, specifierLen); 9448 return false; 9449 } 9450 } 9451 9452 // Check if the field with is non-zero. 9453 const OptionalAmount &Amt = FS.getFieldWidth(); 9454 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 9455 if (Amt.getConstantAmount() == 0) { 9456 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 9457 Amt.getConstantLength()); 9458 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 9459 getLocationOfByte(Amt.getStart()), 9460 /*IsStringLocation*/true, R, 9461 FixItHint::CreateRemoval(R)); 9462 } 9463 } 9464 9465 if (!FS.consumesDataArgument()) { 9466 // FIXME: Technically specifying a precision or field width here 9467 // makes no sense. Worth issuing a warning at some point. 9468 return true; 9469 } 9470 9471 // Consume the argument. 9472 unsigned argIndex = FS.getArgIndex(); 9473 if (argIndex < NumDataArgs) { 9474 // The check to see if the argIndex is valid will come later. 9475 // We set the bit here because we may exit early from this 9476 // function if we encounter some other error. 9477 CoveredArgs.set(argIndex); 9478 } 9479 9480 // Check the length modifier is valid with the given conversion specifier. 9481 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 9482 S.getLangOpts())) 9483 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9484 diag::warn_format_nonsensical_length); 9485 else if (!FS.hasStandardLengthModifier()) 9486 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 9487 else if (!FS.hasStandardLengthConversionCombination()) 9488 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 9489 diag::warn_format_non_standard_conversion_spec); 9490 9491 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 9492 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 9493 9494 // The remaining checks depend on the data arguments. 9495 if (HasVAListArg) 9496 return true; 9497 9498 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 9499 return false; 9500 9501 // Check that the argument type matches the format specifier. 9502 const Expr *Ex = getDataArg(argIndex); 9503 if (!Ex) 9504 return true; 9505 9506 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 9507 9508 if (!AT.isValid()) { 9509 return true; 9510 } 9511 9512 analyze_format_string::ArgType::MatchKind Match = 9513 AT.matchesType(S.Context, Ex->getType()); 9514 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 9515 if (Match == analyze_format_string::ArgType::Match) 9516 return true; 9517 9518 ScanfSpecifier fixedFS = FS; 9519 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 9520 S.getLangOpts(), S.Context); 9521 9522 unsigned Diag = 9523 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 9524 : diag::warn_format_conversion_argument_type_mismatch; 9525 9526 if (Success) { 9527 // Get the fix string from the fixed format specifier. 9528 SmallString<128> buf; 9529 llvm::raw_svector_ostream os(buf); 9530 fixedFS.toString(os); 9531 9532 EmitFormatDiagnostic( 9533 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 9534 << Ex->getType() << false << Ex->getSourceRange(), 9535 Ex->getBeginLoc(), 9536 /*IsStringLocation*/ false, 9537 getSpecifierRange(startSpecifier, specifierLen), 9538 FixItHint::CreateReplacement( 9539 getSpecifierRange(startSpecifier, specifierLen), os.str())); 9540 } else { 9541 EmitFormatDiagnostic(S.PDiag(Diag) 9542 << AT.getRepresentativeTypeName(S.Context) 9543 << Ex->getType() << false << Ex->getSourceRange(), 9544 Ex->getBeginLoc(), 9545 /*IsStringLocation*/ false, 9546 getSpecifierRange(startSpecifier, specifierLen)); 9547 } 9548 9549 return true; 9550 } 9551 9552 static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr, 9553 const Expr *OrigFormatExpr, 9554 ArrayRef<const Expr *> Args, 9555 bool HasVAListArg, unsigned format_idx, 9556 unsigned firstDataArg, 9557 Sema::FormatStringType Type, 9558 bool inFunctionCall, 9559 Sema::VariadicCallType CallType, 9560 llvm::SmallBitVector &CheckedVarArgs, 9561 UncoveredArgHandler &UncoveredArg, 9562 bool IgnoreStringsWithoutSpecifiers) { 9563 // CHECK: is the format string a wide literal? 9564 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 9565 CheckFormatHandler::EmitFormatDiagnostic( 9566 S, inFunctionCall, Args[format_idx], 9567 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 9568 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 9569 return; 9570 } 9571 9572 // Str - The format string. NOTE: this is NOT null-terminated! 9573 StringRef StrRef = FExpr->getString(); 9574 const char *Str = StrRef.data(); 9575 // Account for cases where the string literal is truncated in a declaration. 9576 const ConstantArrayType *T = 9577 S.Context.getAsConstantArrayType(FExpr->getType()); 9578 assert(T && "String literal not of constant array type!"); 9579 size_t TypeSize = T->getSize().getZExtValue(); 9580 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 9581 const unsigned numDataArgs = Args.size() - firstDataArg; 9582 9583 if (IgnoreStringsWithoutSpecifiers && 9584 !analyze_format_string::parseFormatStringHasFormattingSpecifiers( 9585 Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 9586 return; 9587 9588 // Emit a warning if the string literal is truncated and does not contain an 9589 // embedded null character. 9590 if (TypeSize <= StrRef.size() && 9591 StrRef.substr(0, TypeSize).find('\0') == StringRef::npos) { 9592 CheckFormatHandler::EmitFormatDiagnostic( 9593 S, inFunctionCall, Args[format_idx], 9594 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 9595 FExpr->getBeginLoc(), 9596 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 9597 return; 9598 } 9599 9600 // CHECK: empty format string? 9601 if (StrLen == 0 && numDataArgs > 0) { 9602 CheckFormatHandler::EmitFormatDiagnostic( 9603 S, inFunctionCall, Args[format_idx], 9604 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 9605 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 9606 return; 9607 } 9608 9609 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 9610 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog || 9611 Type == Sema::FST_OSTrace) { 9612 CheckPrintfHandler H( 9613 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 9614 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, 9615 HasVAListArg, Args, format_idx, inFunctionCall, CallType, 9616 CheckedVarArgs, UncoveredArg); 9617 9618 if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen, 9619 S.getLangOpts(), 9620 S.Context.getTargetInfo(), 9621 Type == Sema::FST_FreeBSDKPrintf)) 9622 H.DoneProcessing(); 9623 } else if (Type == Sema::FST_Scanf) { 9624 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 9625 numDataArgs, Str, HasVAListArg, Args, format_idx, 9626 inFunctionCall, CallType, CheckedVarArgs, UncoveredArg); 9627 9628 if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen, 9629 S.getLangOpts(), 9630 S.Context.getTargetInfo())) 9631 H.DoneProcessing(); 9632 } // TODO: handle other formats 9633 } 9634 9635 bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 9636 // Str - The format string. NOTE: this is NOT null-terminated! 9637 StringRef StrRef = FExpr->getString(); 9638 const char *Str = StrRef.data(); 9639 // Account for cases where the string literal is truncated in a declaration. 9640 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 9641 assert(T && "String literal not of constant array type!"); 9642 size_t TypeSize = T->getSize().getZExtValue(); 9643 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 9644 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 9645 getLangOpts(), 9646 Context.getTargetInfo()); 9647 } 9648 9649 //===--- CHECK: Warn on use of wrong absolute value function. -------------===// 9650 9651 // Returns the related absolute value function that is larger, of 0 if one 9652 // does not exist. 9653 static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 9654 switch (AbsFunction) { 9655 default: 9656 return 0; 9657 9658 case Builtin::BI__builtin_abs: 9659 return Builtin::BI__builtin_labs; 9660 case Builtin::BI__builtin_labs: 9661 return Builtin::BI__builtin_llabs; 9662 case Builtin::BI__builtin_llabs: 9663 return 0; 9664 9665 case Builtin::BI__builtin_fabsf: 9666 return Builtin::BI__builtin_fabs; 9667 case Builtin::BI__builtin_fabs: 9668 return Builtin::BI__builtin_fabsl; 9669 case Builtin::BI__builtin_fabsl: 9670 return 0; 9671 9672 case Builtin::BI__builtin_cabsf: 9673 return Builtin::BI__builtin_cabs; 9674 case Builtin::BI__builtin_cabs: 9675 return Builtin::BI__builtin_cabsl; 9676 case Builtin::BI__builtin_cabsl: 9677 return 0; 9678 9679 case Builtin::BIabs: 9680 return Builtin::BIlabs; 9681 case Builtin::BIlabs: 9682 return Builtin::BIllabs; 9683 case Builtin::BIllabs: 9684 return 0; 9685 9686 case Builtin::BIfabsf: 9687 return Builtin::BIfabs; 9688 case Builtin::BIfabs: 9689 return Builtin::BIfabsl; 9690 case Builtin::BIfabsl: 9691 return 0; 9692 9693 case Builtin::BIcabsf: 9694 return Builtin::BIcabs; 9695 case Builtin::BIcabs: 9696 return Builtin::BIcabsl; 9697 case Builtin::BIcabsl: 9698 return 0; 9699 } 9700 } 9701 9702 // Returns the argument type of the absolute value function. 9703 static QualType getAbsoluteValueArgumentType(ASTContext &Context, 9704 unsigned AbsType) { 9705 if (AbsType == 0) 9706 return QualType(); 9707 9708 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 9709 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 9710 if (Error != ASTContext::GE_None) 9711 return QualType(); 9712 9713 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 9714 if (!FT) 9715 return QualType(); 9716 9717 if (FT->getNumParams() != 1) 9718 return QualType(); 9719 9720 return FT->getParamType(0); 9721 } 9722 9723 // Returns the best absolute value function, or zero, based on type and 9724 // current absolute value function. 9725 static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 9726 unsigned AbsFunctionKind) { 9727 unsigned BestKind = 0; 9728 uint64_t ArgSize = Context.getTypeSize(ArgType); 9729 for (unsigned Kind = AbsFunctionKind; Kind != 0; 9730 Kind = getLargerAbsoluteValueFunction(Kind)) { 9731 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 9732 if (Context.getTypeSize(ParamType) >= ArgSize) { 9733 if (BestKind == 0) 9734 BestKind = Kind; 9735 else if (Context.hasSameType(ParamType, ArgType)) { 9736 BestKind = Kind; 9737 break; 9738 } 9739 } 9740 } 9741 return BestKind; 9742 } 9743 9744 enum AbsoluteValueKind { 9745 AVK_Integer, 9746 AVK_Floating, 9747 AVK_Complex 9748 }; 9749 9750 static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 9751 if (T->isIntegralOrEnumerationType()) 9752 return AVK_Integer; 9753 if (T->isRealFloatingType()) 9754 return AVK_Floating; 9755 if (T->isAnyComplexType()) 9756 return AVK_Complex; 9757 9758 llvm_unreachable("Type not integer, floating, or complex"); 9759 } 9760 9761 // Changes the absolute value function to a different type. Preserves whether 9762 // the function is a builtin. 9763 static unsigned changeAbsFunction(unsigned AbsKind, 9764 AbsoluteValueKind ValueKind) { 9765 switch (ValueKind) { 9766 case AVK_Integer: 9767 switch (AbsKind) { 9768 default: 9769 return 0; 9770 case Builtin::BI__builtin_fabsf: 9771 case Builtin::BI__builtin_fabs: 9772 case Builtin::BI__builtin_fabsl: 9773 case Builtin::BI__builtin_cabsf: 9774 case Builtin::BI__builtin_cabs: 9775 case Builtin::BI__builtin_cabsl: 9776 return Builtin::BI__builtin_abs; 9777 case Builtin::BIfabsf: 9778 case Builtin::BIfabs: 9779 case Builtin::BIfabsl: 9780 case Builtin::BIcabsf: 9781 case Builtin::BIcabs: 9782 case Builtin::BIcabsl: 9783 return Builtin::BIabs; 9784 } 9785 case AVK_Floating: 9786 switch (AbsKind) { 9787 default: 9788 return 0; 9789 case Builtin::BI__builtin_abs: 9790 case Builtin::BI__builtin_labs: 9791 case Builtin::BI__builtin_llabs: 9792 case Builtin::BI__builtin_cabsf: 9793 case Builtin::BI__builtin_cabs: 9794 case Builtin::BI__builtin_cabsl: 9795 return Builtin::BI__builtin_fabsf; 9796 case Builtin::BIabs: 9797 case Builtin::BIlabs: 9798 case Builtin::BIllabs: 9799 case Builtin::BIcabsf: 9800 case Builtin::BIcabs: 9801 case Builtin::BIcabsl: 9802 return Builtin::BIfabsf; 9803 } 9804 case AVK_Complex: 9805 switch (AbsKind) { 9806 default: 9807 return 0; 9808 case Builtin::BI__builtin_abs: 9809 case Builtin::BI__builtin_labs: 9810 case Builtin::BI__builtin_llabs: 9811 case Builtin::BI__builtin_fabsf: 9812 case Builtin::BI__builtin_fabs: 9813 case Builtin::BI__builtin_fabsl: 9814 return Builtin::BI__builtin_cabsf; 9815 case Builtin::BIabs: 9816 case Builtin::BIlabs: 9817 case Builtin::BIllabs: 9818 case Builtin::BIfabsf: 9819 case Builtin::BIfabs: 9820 case Builtin::BIfabsl: 9821 return Builtin::BIcabsf; 9822 } 9823 } 9824 llvm_unreachable("Unable to convert function"); 9825 } 9826 9827 static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 9828 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 9829 if (!FnInfo) 9830 return 0; 9831 9832 switch (FDecl->getBuiltinID()) { 9833 default: 9834 return 0; 9835 case Builtin::BI__builtin_abs: 9836 case Builtin::BI__builtin_fabs: 9837 case Builtin::BI__builtin_fabsf: 9838 case Builtin::BI__builtin_fabsl: 9839 case Builtin::BI__builtin_labs: 9840 case Builtin::BI__builtin_llabs: 9841 case Builtin::BI__builtin_cabs: 9842 case Builtin::BI__builtin_cabsf: 9843 case Builtin::BI__builtin_cabsl: 9844 case Builtin::BIabs: 9845 case Builtin::BIlabs: 9846 case Builtin::BIllabs: 9847 case Builtin::BIfabs: 9848 case Builtin::BIfabsf: 9849 case Builtin::BIfabsl: 9850 case Builtin::BIcabs: 9851 case Builtin::BIcabsf: 9852 case Builtin::BIcabsl: 9853 return FDecl->getBuiltinID(); 9854 } 9855 llvm_unreachable("Unknown Builtin type"); 9856 } 9857 9858 // If the replacement is valid, emit a note with replacement function. 9859 // Additionally, suggest including the proper header if not already included. 9860 static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 9861 unsigned AbsKind, QualType ArgType) { 9862 bool EmitHeaderHint = true; 9863 const char *HeaderName = nullptr; 9864 const char *FunctionName = nullptr; 9865 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 9866 FunctionName = "std::abs"; 9867 if (ArgType->isIntegralOrEnumerationType()) { 9868 HeaderName = "cstdlib"; 9869 } else if (ArgType->isRealFloatingType()) { 9870 HeaderName = "cmath"; 9871 } else { 9872 llvm_unreachable("Invalid Type"); 9873 } 9874 9875 // Lookup all std::abs 9876 if (NamespaceDecl *Std = S.getStdNamespace()) { 9877 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 9878 R.suppressDiagnostics(); 9879 S.LookupQualifiedName(R, Std); 9880 9881 for (const auto *I : R) { 9882 const FunctionDecl *FDecl = nullptr; 9883 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 9884 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 9885 } else { 9886 FDecl = dyn_cast<FunctionDecl>(I); 9887 } 9888 if (!FDecl) 9889 continue; 9890 9891 // Found std::abs(), check that they are the right ones. 9892 if (FDecl->getNumParams() != 1) 9893 continue; 9894 9895 // Check that the parameter type can handle the argument. 9896 QualType ParamType = FDecl->getParamDecl(0)->getType(); 9897 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 9898 S.Context.getTypeSize(ArgType) <= 9899 S.Context.getTypeSize(ParamType)) { 9900 // Found a function, don't need the header hint. 9901 EmitHeaderHint = false; 9902 break; 9903 } 9904 } 9905 } 9906 } else { 9907 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 9908 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 9909 9910 if (HeaderName) { 9911 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 9912 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 9913 R.suppressDiagnostics(); 9914 S.LookupName(R, S.getCurScope()); 9915 9916 if (R.isSingleResult()) { 9917 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 9918 if (FD && FD->getBuiltinID() == AbsKind) { 9919 EmitHeaderHint = false; 9920 } else { 9921 return; 9922 } 9923 } else if (!R.empty()) { 9924 return; 9925 } 9926 } 9927 } 9928 9929 S.Diag(Loc, diag::note_replace_abs_function) 9930 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 9931 9932 if (!HeaderName) 9933 return; 9934 9935 if (!EmitHeaderHint) 9936 return; 9937 9938 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 9939 << FunctionName; 9940 } 9941 9942 template <std::size_t StrLen> 9943 static bool IsStdFunction(const FunctionDecl *FDecl, 9944 const char (&Str)[StrLen]) { 9945 if (!FDecl) 9946 return false; 9947 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 9948 return false; 9949 if (!FDecl->isInStdNamespace()) 9950 return false; 9951 9952 return true; 9953 } 9954 9955 // Warn when using the wrong abs() function. 9956 void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 9957 const FunctionDecl *FDecl) { 9958 if (Call->getNumArgs() != 1) 9959 return; 9960 9961 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 9962 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 9963 if (AbsKind == 0 && !IsStdAbs) 9964 return; 9965 9966 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 9967 QualType ParamType = Call->getArg(0)->getType(); 9968 9969 // Unsigned types cannot be negative. Suggest removing the absolute value 9970 // function call. 9971 if (ArgType->isUnsignedIntegerType()) { 9972 const char *FunctionName = 9973 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 9974 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 9975 Diag(Call->getExprLoc(), diag::note_remove_abs) 9976 << FunctionName 9977 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 9978 return; 9979 } 9980 9981 // Taking the absolute value of a pointer is very suspicious, they probably 9982 // wanted to index into an array, dereference a pointer, call a function, etc. 9983 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 9984 unsigned DiagType = 0; 9985 if (ArgType->isFunctionType()) 9986 DiagType = 1; 9987 else if (ArgType->isArrayType()) 9988 DiagType = 2; 9989 9990 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 9991 return; 9992 } 9993 9994 // std::abs has overloads which prevent most of the absolute value problems 9995 // from occurring. 9996 if (IsStdAbs) 9997 return; 9998 9999 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 10000 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 10001 10002 // The argument and parameter are the same kind. Check if they are the right 10003 // size. 10004 if (ArgValueKind == ParamValueKind) { 10005 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 10006 return; 10007 10008 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 10009 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 10010 << FDecl << ArgType << ParamType; 10011 10012 if (NewAbsKind == 0) 10013 return; 10014 10015 emitReplacement(*this, Call->getExprLoc(), 10016 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 10017 return; 10018 } 10019 10020 // ArgValueKind != ParamValueKind 10021 // The wrong type of absolute value function was used. Attempt to find the 10022 // proper one. 10023 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 10024 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 10025 if (NewAbsKind == 0) 10026 return; 10027 10028 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 10029 << FDecl << ParamValueKind << ArgValueKind; 10030 10031 emitReplacement(*this, Call->getExprLoc(), 10032 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 10033 } 10034 10035 //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 10036 void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 10037 const FunctionDecl *FDecl) { 10038 if (!Call || !FDecl) return; 10039 10040 // Ignore template specializations and macros. 10041 if (inTemplateInstantiation()) return; 10042 if (Call->getExprLoc().isMacroID()) return; 10043 10044 // Only care about the one template argument, two function parameter std::max 10045 if (Call->getNumArgs() != 2) return; 10046 if (!IsStdFunction(FDecl, "max")) return; 10047 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 10048 if (!ArgList) return; 10049 if (ArgList->size() != 1) return; 10050 10051 // Check that template type argument is unsigned integer. 10052 const auto& TA = ArgList->get(0); 10053 if (TA.getKind() != TemplateArgument::Type) return; 10054 QualType ArgType = TA.getAsType(); 10055 if (!ArgType->isUnsignedIntegerType()) return; 10056 10057 // See if either argument is a literal zero. 10058 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 10059 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 10060 if (!MTE) return false; 10061 const auto *Num = dyn_cast<IntegerLiteral>(MTE->getSubExpr()); 10062 if (!Num) return false; 10063 if (Num->getValue() != 0) return false; 10064 return true; 10065 }; 10066 10067 const Expr *FirstArg = Call->getArg(0); 10068 const Expr *SecondArg = Call->getArg(1); 10069 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 10070 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 10071 10072 // Only warn when exactly one argument is zero. 10073 if (IsFirstArgZero == IsSecondArgZero) return; 10074 10075 SourceRange FirstRange = FirstArg->getSourceRange(); 10076 SourceRange SecondRange = SecondArg->getSourceRange(); 10077 10078 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 10079 10080 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 10081 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 10082 10083 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 10084 SourceRange RemovalRange; 10085 if (IsFirstArgZero) { 10086 RemovalRange = SourceRange(FirstRange.getBegin(), 10087 SecondRange.getBegin().getLocWithOffset(-1)); 10088 } else { 10089 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 10090 SecondRange.getEnd()); 10091 } 10092 10093 Diag(Call->getExprLoc(), diag::note_remove_max_call) 10094 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 10095 << FixItHint::CreateRemoval(RemovalRange); 10096 } 10097 10098 //===--- CHECK: Standard memory functions ---------------------------------===// 10099 10100 /// Takes the expression passed to the size_t parameter of functions 10101 /// such as memcmp, strncat, etc and warns if it's a comparison. 10102 /// 10103 /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 10104 static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 10105 IdentifierInfo *FnName, 10106 SourceLocation FnLoc, 10107 SourceLocation RParenLoc) { 10108 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 10109 if (!Size) 10110 return false; 10111 10112 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 10113 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 10114 return false; 10115 10116 SourceRange SizeRange = Size->getSourceRange(); 10117 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 10118 << SizeRange << FnName; 10119 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 10120 << FnName 10121 << FixItHint::CreateInsertion( 10122 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 10123 << FixItHint::CreateRemoval(RParenLoc); 10124 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 10125 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 10126 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 10127 ")"); 10128 10129 return true; 10130 } 10131 10132 /// Determine whether the given type is or contains a dynamic class type 10133 /// (e.g., whether it has a vtable). 10134 static const CXXRecordDecl *getContainedDynamicClass(QualType T, 10135 bool &IsContained) { 10136 // Look through array types while ignoring qualifiers. 10137 const Type *Ty = T->getBaseElementTypeUnsafe(); 10138 IsContained = false; 10139 10140 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 10141 RD = RD ? RD->getDefinition() : nullptr; 10142 if (!RD || RD->isInvalidDecl()) 10143 return nullptr; 10144 10145 if (RD->isDynamicClass()) 10146 return RD; 10147 10148 // Check all the fields. If any bases were dynamic, the class is dynamic. 10149 // It's impossible for a class to transitively contain itself by value, so 10150 // infinite recursion is impossible. 10151 for (auto *FD : RD->fields()) { 10152 bool SubContained; 10153 if (const CXXRecordDecl *ContainedRD = 10154 getContainedDynamicClass(FD->getType(), SubContained)) { 10155 IsContained = true; 10156 return ContainedRD; 10157 } 10158 } 10159 10160 return nullptr; 10161 } 10162 10163 static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 10164 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 10165 if (Unary->getKind() == UETT_SizeOf) 10166 return Unary; 10167 return nullptr; 10168 } 10169 10170 /// If E is a sizeof expression, returns its argument expression, 10171 /// otherwise returns NULL. 10172 static const Expr *getSizeOfExprArg(const Expr *E) { 10173 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 10174 if (!SizeOf->isArgumentType()) 10175 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 10176 return nullptr; 10177 } 10178 10179 /// If E is a sizeof expression, returns its argument type. 10180 static QualType getSizeOfArgType(const Expr *E) { 10181 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 10182 return SizeOf->getTypeOfArgument(); 10183 return QualType(); 10184 } 10185 10186 namespace { 10187 10188 struct SearchNonTrivialToInitializeField 10189 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 10190 using Super = 10191 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 10192 10193 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 10194 10195 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 10196 SourceLocation SL) { 10197 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 10198 asDerived().visitArray(PDIK, AT, SL); 10199 return; 10200 } 10201 10202 Super::visitWithKind(PDIK, FT, SL); 10203 } 10204 10205 void visitARCStrong(QualType FT, SourceLocation SL) { 10206 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 10207 } 10208 void visitARCWeak(QualType FT, SourceLocation SL) { 10209 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 10210 } 10211 void visitStruct(QualType FT, SourceLocation SL) { 10212 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 10213 visit(FD->getType(), FD->getLocation()); 10214 } 10215 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 10216 const ArrayType *AT, SourceLocation SL) { 10217 visit(getContext().getBaseElementType(AT), SL); 10218 } 10219 void visitTrivial(QualType FT, SourceLocation SL) {} 10220 10221 static void diag(QualType RT, const Expr *E, Sema &S) { 10222 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 10223 } 10224 10225 ASTContext &getContext() { return S.getASTContext(); } 10226 10227 const Expr *E; 10228 Sema &S; 10229 }; 10230 10231 struct SearchNonTrivialToCopyField 10232 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 10233 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 10234 10235 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 10236 10237 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 10238 SourceLocation SL) { 10239 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 10240 asDerived().visitArray(PCK, AT, SL); 10241 return; 10242 } 10243 10244 Super::visitWithKind(PCK, FT, SL); 10245 } 10246 10247 void visitARCStrong(QualType FT, SourceLocation SL) { 10248 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 10249 } 10250 void visitARCWeak(QualType FT, SourceLocation SL) { 10251 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 10252 } 10253 void visitStruct(QualType FT, SourceLocation SL) { 10254 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 10255 visit(FD->getType(), FD->getLocation()); 10256 } 10257 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 10258 SourceLocation SL) { 10259 visit(getContext().getBaseElementType(AT), SL); 10260 } 10261 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 10262 SourceLocation SL) {} 10263 void visitTrivial(QualType FT, SourceLocation SL) {} 10264 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 10265 10266 static void diag(QualType RT, const Expr *E, Sema &S) { 10267 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 10268 } 10269 10270 ASTContext &getContext() { return S.getASTContext(); } 10271 10272 const Expr *E; 10273 Sema &S; 10274 }; 10275 10276 } 10277 10278 /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 10279 static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 10280 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 10281 10282 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 10283 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 10284 return false; 10285 10286 return doesExprLikelyComputeSize(BO->getLHS()) || 10287 doesExprLikelyComputeSize(BO->getRHS()); 10288 } 10289 10290 return getAsSizeOfExpr(SizeofExpr) != nullptr; 10291 } 10292 10293 /// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 10294 /// 10295 /// \code 10296 /// #define MACRO 0 10297 /// foo(MACRO); 10298 /// foo(0); 10299 /// \endcode 10300 /// 10301 /// This should return true for the first call to foo, but not for the second 10302 /// (regardless of whether foo is a macro or function). 10303 static bool isArgumentExpandedFromMacro(SourceManager &SM, 10304 SourceLocation CallLoc, 10305 SourceLocation ArgLoc) { 10306 if (!CallLoc.isMacroID()) 10307 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 10308 10309 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 10310 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 10311 } 10312 10313 /// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 10314 /// last two arguments transposed. 10315 static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 10316 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 10317 return; 10318 10319 const Expr *SizeArg = 10320 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 10321 10322 auto isLiteralZero = [](const Expr *E) { 10323 return isa<IntegerLiteral>(E) && cast<IntegerLiteral>(E)->getValue() == 0; 10324 }; 10325 10326 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 10327 SourceLocation CallLoc = Call->getRParenLoc(); 10328 SourceManager &SM = S.getSourceManager(); 10329 if (isLiteralZero(SizeArg) && 10330 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 10331 10332 SourceLocation DiagLoc = SizeArg->getExprLoc(); 10333 10334 // Some platforms #define bzero to __builtin_memset. See if this is the 10335 // case, and if so, emit a better diagnostic. 10336 if (BId == Builtin::BIbzero || 10337 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 10338 CallLoc, SM, S.getLangOpts()) == "bzero")) { 10339 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 10340 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 10341 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 10342 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 10343 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 10344 } 10345 return; 10346 } 10347 10348 // If the second argument to a memset is a sizeof expression and the third 10349 // isn't, this is also likely an error. This should catch 10350 // 'memset(buf, sizeof(buf), 0xff)'. 10351 if (BId == Builtin::BImemset && 10352 doesExprLikelyComputeSize(Call->getArg(1)) && 10353 !doesExprLikelyComputeSize(Call->getArg(2))) { 10354 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 10355 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 10356 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 10357 return; 10358 } 10359 } 10360 10361 /// Check for dangerous or invalid arguments to memset(). 10362 /// 10363 /// This issues warnings on known problematic, dangerous or unspecified 10364 /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 10365 /// function calls. 10366 /// 10367 /// \param Call The call expression to diagnose. 10368 void Sema::CheckMemaccessArguments(const CallExpr *Call, 10369 unsigned BId, 10370 IdentifierInfo *FnName) { 10371 assert(BId != 0); 10372 10373 // It is possible to have a non-standard definition of memset. Validate 10374 // we have enough arguments, and if not, abort further checking. 10375 unsigned ExpectedNumArgs = 10376 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 10377 if (Call->getNumArgs() < ExpectedNumArgs) 10378 return; 10379 10380 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 10381 BId == Builtin::BIstrndup ? 1 : 2); 10382 unsigned LenArg = 10383 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 10384 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 10385 10386 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 10387 Call->getBeginLoc(), Call->getRParenLoc())) 10388 return; 10389 10390 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 10391 CheckMemaccessSize(*this, BId, Call); 10392 10393 // We have special checking when the length is a sizeof expression. 10394 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 10395 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 10396 llvm::FoldingSetNodeID SizeOfArgID; 10397 10398 // Although widely used, 'bzero' is not a standard function. Be more strict 10399 // with the argument types before allowing diagnostics and only allow the 10400 // form bzero(ptr, sizeof(...)). 10401 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 10402 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 10403 return; 10404 10405 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 10406 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 10407 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 10408 10409 QualType DestTy = Dest->getType(); 10410 QualType PointeeTy; 10411 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 10412 PointeeTy = DestPtrTy->getPointeeType(); 10413 10414 // Never warn about void type pointers. This can be used to suppress 10415 // false positives. 10416 if (PointeeTy->isVoidType()) 10417 continue; 10418 10419 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 10420 // actually comparing the expressions for equality. Because computing the 10421 // expression IDs can be expensive, we only do this if the diagnostic is 10422 // enabled. 10423 if (SizeOfArg && 10424 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 10425 SizeOfArg->getExprLoc())) { 10426 // We only compute IDs for expressions if the warning is enabled, and 10427 // cache the sizeof arg's ID. 10428 if (SizeOfArgID == llvm::FoldingSetNodeID()) 10429 SizeOfArg->Profile(SizeOfArgID, Context, true); 10430 llvm::FoldingSetNodeID DestID; 10431 Dest->Profile(DestID, Context, true); 10432 if (DestID == SizeOfArgID) { 10433 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 10434 // over sizeof(src) as well. 10435 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 10436 StringRef ReadableName = FnName->getName(); 10437 10438 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 10439 if (UnaryOp->getOpcode() == UO_AddrOf) 10440 ActionIdx = 1; // If its an address-of operator, just remove it. 10441 if (!PointeeTy->isIncompleteType() && 10442 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 10443 ActionIdx = 2; // If the pointee's size is sizeof(char), 10444 // suggest an explicit length. 10445 10446 // If the function is defined as a builtin macro, do not show macro 10447 // expansion. 10448 SourceLocation SL = SizeOfArg->getExprLoc(); 10449 SourceRange DSR = Dest->getSourceRange(); 10450 SourceRange SSR = SizeOfArg->getSourceRange(); 10451 SourceManager &SM = getSourceManager(); 10452 10453 if (SM.isMacroArgExpansion(SL)) { 10454 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 10455 SL = SM.getSpellingLoc(SL); 10456 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 10457 SM.getSpellingLoc(DSR.getEnd())); 10458 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 10459 SM.getSpellingLoc(SSR.getEnd())); 10460 } 10461 10462 DiagRuntimeBehavior(SL, SizeOfArg, 10463 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 10464 << ReadableName 10465 << PointeeTy 10466 << DestTy 10467 << DSR 10468 << SSR); 10469 DiagRuntimeBehavior(SL, SizeOfArg, 10470 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 10471 << ActionIdx 10472 << SSR); 10473 10474 break; 10475 } 10476 } 10477 10478 // Also check for cases where the sizeof argument is the exact same 10479 // type as the memory argument, and where it points to a user-defined 10480 // record type. 10481 if (SizeOfArgTy != QualType()) { 10482 if (PointeeTy->isRecordType() && 10483 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 10484 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 10485 PDiag(diag::warn_sizeof_pointer_type_memaccess) 10486 << FnName << SizeOfArgTy << ArgIdx 10487 << PointeeTy << Dest->getSourceRange() 10488 << LenExpr->getSourceRange()); 10489 break; 10490 } 10491 } 10492 } else if (DestTy->isArrayType()) { 10493 PointeeTy = DestTy; 10494 } 10495 10496 if (PointeeTy == QualType()) 10497 continue; 10498 10499 // Always complain about dynamic classes. 10500 bool IsContained; 10501 if (const CXXRecordDecl *ContainedRD = 10502 getContainedDynamicClass(PointeeTy, IsContained)) { 10503 10504 unsigned OperationType = 0; 10505 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp; 10506 // "overwritten" if we're warning about the destination for any call 10507 // but memcmp; otherwise a verb appropriate to the call. 10508 if (ArgIdx != 0 || IsCmp) { 10509 if (BId == Builtin::BImemcpy) 10510 OperationType = 1; 10511 else if(BId == Builtin::BImemmove) 10512 OperationType = 2; 10513 else if (IsCmp) 10514 OperationType = 3; 10515 } 10516 10517 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 10518 PDiag(diag::warn_dyn_class_memaccess) 10519 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName 10520 << IsContained << ContainedRD << OperationType 10521 << Call->getCallee()->getSourceRange()); 10522 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 10523 BId != Builtin::BImemset) 10524 DiagRuntimeBehavior( 10525 Dest->getExprLoc(), Dest, 10526 PDiag(diag::warn_arc_object_memaccess) 10527 << ArgIdx << FnName << PointeeTy 10528 << Call->getCallee()->getSourceRange()); 10529 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 10530 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 10531 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 10532 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 10533 PDiag(diag::warn_cstruct_memaccess) 10534 << ArgIdx << FnName << PointeeTy << 0); 10535 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 10536 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 10537 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 10538 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 10539 PDiag(diag::warn_cstruct_memaccess) 10540 << ArgIdx << FnName << PointeeTy << 1); 10541 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 10542 } else { 10543 continue; 10544 } 10545 } else 10546 continue; 10547 10548 DiagRuntimeBehavior( 10549 Dest->getExprLoc(), Dest, 10550 PDiag(diag::note_bad_memaccess_silence) 10551 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 10552 break; 10553 } 10554 } 10555 10556 // A little helper routine: ignore addition and subtraction of integer literals. 10557 // This intentionally does not ignore all integer constant expressions because 10558 // we don't want to remove sizeof(). 10559 static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 10560 Ex = Ex->IgnoreParenCasts(); 10561 10562 while (true) { 10563 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 10564 if (!BO || !BO->isAdditiveOp()) 10565 break; 10566 10567 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 10568 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 10569 10570 if (isa<IntegerLiteral>(RHS)) 10571 Ex = LHS; 10572 else if (isa<IntegerLiteral>(LHS)) 10573 Ex = RHS; 10574 else 10575 break; 10576 } 10577 10578 return Ex; 10579 } 10580 10581 static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 10582 ASTContext &Context) { 10583 // Only handle constant-sized or VLAs, but not flexible members. 10584 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 10585 // Only issue the FIXIT for arrays of size > 1. 10586 if (CAT->getSize().getSExtValue() <= 1) 10587 return false; 10588 } else if (!Ty->isVariableArrayType()) { 10589 return false; 10590 } 10591 return true; 10592 } 10593 10594 // Warn if the user has made the 'size' argument to strlcpy or strlcat 10595 // be the size of the source, instead of the destination. 10596 void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 10597 IdentifierInfo *FnName) { 10598 10599 // Don't crash if the user has the wrong number of arguments 10600 unsigned NumArgs = Call->getNumArgs(); 10601 if ((NumArgs != 3) && (NumArgs != 4)) 10602 return; 10603 10604 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 10605 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 10606 const Expr *CompareWithSrc = nullptr; 10607 10608 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 10609 Call->getBeginLoc(), Call->getRParenLoc())) 10610 return; 10611 10612 // Look for 'strlcpy(dst, x, sizeof(x))' 10613 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 10614 CompareWithSrc = Ex; 10615 else { 10616 // Look for 'strlcpy(dst, x, strlen(x))' 10617 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 10618 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 10619 SizeCall->getNumArgs() == 1) 10620 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 10621 } 10622 } 10623 10624 if (!CompareWithSrc) 10625 return; 10626 10627 // Determine if the argument to sizeof/strlen is equal to the source 10628 // argument. In principle there's all kinds of things you could do 10629 // here, for instance creating an == expression and evaluating it with 10630 // EvaluateAsBooleanCondition, but this uses a more direct technique: 10631 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 10632 if (!SrcArgDRE) 10633 return; 10634 10635 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 10636 if (!CompareWithSrcDRE || 10637 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 10638 return; 10639 10640 const Expr *OriginalSizeArg = Call->getArg(2); 10641 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 10642 << OriginalSizeArg->getSourceRange() << FnName; 10643 10644 // Output a FIXIT hint if the destination is an array (rather than a 10645 // pointer to an array). This could be enhanced to handle some 10646 // pointers if we know the actual size, like if DstArg is 'array+2' 10647 // we could say 'sizeof(array)-2'. 10648 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 10649 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 10650 return; 10651 10652 SmallString<128> sizeString; 10653 llvm::raw_svector_ostream OS(sizeString); 10654 OS << "sizeof("; 10655 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 10656 OS << ")"; 10657 10658 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 10659 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 10660 OS.str()); 10661 } 10662 10663 /// Check if two expressions refer to the same declaration. 10664 static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 10665 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 10666 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 10667 return D1->getDecl() == D2->getDecl(); 10668 return false; 10669 } 10670 10671 static const Expr *getStrlenExprArg(const Expr *E) { 10672 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 10673 const FunctionDecl *FD = CE->getDirectCallee(); 10674 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 10675 return nullptr; 10676 return CE->getArg(0)->IgnoreParenCasts(); 10677 } 10678 return nullptr; 10679 } 10680 10681 // Warn on anti-patterns as the 'size' argument to strncat. 10682 // The correct size argument should look like following: 10683 // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 10684 void Sema::CheckStrncatArguments(const CallExpr *CE, 10685 IdentifierInfo *FnName) { 10686 // Don't crash if the user has the wrong number of arguments. 10687 if (CE->getNumArgs() < 3) 10688 return; 10689 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 10690 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 10691 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 10692 10693 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 10694 CE->getRParenLoc())) 10695 return; 10696 10697 // Identify common expressions, which are wrongly used as the size argument 10698 // to strncat and may lead to buffer overflows. 10699 unsigned PatternType = 0; 10700 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 10701 // - sizeof(dst) 10702 if (referToTheSameDecl(SizeOfArg, DstArg)) 10703 PatternType = 1; 10704 // - sizeof(src) 10705 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 10706 PatternType = 2; 10707 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 10708 if (BE->getOpcode() == BO_Sub) { 10709 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 10710 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 10711 // - sizeof(dst) - strlen(dst) 10712 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 10713 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 10714 PatternType = 1; 10715 // - sizeof(src) - (anything) 10716 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 10717 PatternType = 2; 10718 } 10719 } 10720 10721 if (PatternType == 0) 10722 return; 10723 10724 // Generate the diagnostic. 10725 SourceLocation SL = LenArg->getBeginLoc(); 10726 SourceRange SR = LenArg->getSourceRange(); 10727 SourceManager &SM = getSourceManager(); 10728 10729 // If the function is defined as a builtin macro, do not show macro expansion. 10730 if (SM.isMacroArgExpansion(SL)) { 10731 SL = SM.getSpellingLoc(SL); 10732 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 10733 SM.getSpellingLoc(SR.getEnd())); 10734 } 10735 10736 // Check if the destination is an array (rather than a pointer to an array). 10737 QualType DstTy = DstArg->getType(); 10738 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 10739 Context); 10740 if (!isKnownSizeArray) { 10741 if (PatternType == 1) 10742 Diag(SL, diag::warn_strncat_wrong_size) << SR; 10743 else 10744 Diag(SL, diag::warn_strncat_src_size) << SR; 10745 return; 10746 } 10747 10748 if (PatternType == 1) 10749 Diag(SL, diag::warn_strncat_large_size) << SR; 10750 else 10751 Diag(SL, diag::warn_strncat_src_size) << SR; 10752 10753 SmallString<128> sizeString; 10754 llvm::raw_svector_ostream OS(sizeString); 10755 OS << "sizeof("; 10756 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 10757 OS << ") - "; 10758 OS << "strlen("; 10759 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 10760 OS << ") - 1"; 10761 10762 Diag(SL, diag::note_strncat_wrong_size) 10763 << FixItHint::CreateReplacement(SR, OS.str()); 10764 } 10765 10766 namespace { 10767 void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName, 10768 const UnaryOperator *UnaryExpr, const Decl *D) { 10769 if (isa<FieldDecl, FunctionDecl, VarDecl>(D)) { 10770 S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object) 10771 << CalleeName << 0 /*object: */ << cast<NamedDecl>(D); 10772 return; 10773 } 10774 } 10775 10776 void CheckFreeArgumentsAddressof(Sema &S, const std::string &CalleeName, 10777 const UnaryOperator *UnaryExpr) { 10778 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(UnaryExpr->getSubExpr())) { 10779 const Decl *D = Lvalue->getDecl(); 10780 if (isa<DeclaratorDecl>(D)) 10781 if (!dyn_cast<DeclaratorDecl>(D)->getType()->isReferenceType()) 10782 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, D); 10783 } 10784 10785 if (const auto *Lvalue = dyn_cast<MemberExpr>(UnaryExpr->getSubExpr())) 10786 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, 10787 Lvalue->getMemberDecl()); 10788 } 10789 10790 void CheckFreeArgumentsPlus(Sema &S, const std::string &CalleeName, 10791 const UnaryOperator *UnaryExpr) { 10792 const auto *Lambda = dyn_cast<LambdaExpr>( 10793 UnaryExpr->getSubExpr()->IgnoreImplicitAsWritten()->IgnoreParens()); 10794 if (!Lambda) 10795 return; 10796 10797 S.Diag(Lambda->getBeginLoc(), diag::warn_free_nonheap_object) 10798 << CalleeName << 2 /*object: lambda expression*/; 10799 } 10800 10801 void CheckFreeArgumentsStackArray(Sema &S, const std::string &CalleeName, 10802 const DeclRefExpr *Lvalue) { 10803 const auto *Var = dyn_cast<VarDecl>(Lvalue->getDecl()); 10804 if (Var == nullptr) 10805 return; 10806 10807 S.Diag(Lvalue->getBeginLoc(), diag::warn_free_nonheap_object) 10808 << CalleeName << 0 /*object: */ << Var; 10809 } 10810 10811 void CheckFreeArgumentsCast(Sema &S, const std::string &CalleeName, 10812 const CastExpr *Cast) { 10813 SmallString<128> SizeString; 10814 llvm::raw_svector_ostream OS(SizeString); 10815 10816 clang::CastKind Kind = Cast->getCastKind(); 10817 if (Kind == clang::CK_BitCast && 10818 !Cast->getSubExpr()->getType()->isFunctionPointerType()) 10819 return; 10820 if (Kind == clang::CK_IntegralToPointer && 10821 !isa<IntegerLiteral>( 10822 Cast->getSubExpr()->IgnoreParenImpCasts()->IgnoreParens())) 10823 return; 10824 10825 switch (Cast->getCastKind()) { 10826 case clang::CK_BitCast: 10827 case clang::CK_IntegralToPointer: 10828 case clang::CK_FunctionToPointerDecay: 10829 OS << '\''; 10830 Cast->printPretty(OS, nullptr, S.getPrintingPolicy()); 10831 OS << '\''; 10832 break; 10833 default: 10834 return; 10835 } 10836 10837 S.Diag(Cast->getBeginLoc(), diag::warn_free_nonheap_object) 10838 << CalleeName << 0 /*object: */ << OS.str(); 10839 } 10840 } // namespace 10841 10842 /// Alerts the user that they are attempting to free a non-malloc'd object. 10843 void Sema::CheckFreeArguments(const CallExpr *E) { 10844 const std::string CalleeName = 10845 dyn_cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString(); 10846 10847 { // Prefer something that doesn't involve a cast to make things simpler. 10848 const Expr *Arg = E->getArg(0)->IgnoreParenCasts(); 10849 if (const auto *UnaryExpr = dyn_cast<UnaryOperator>(Arg)) 10850 switch (UnaryExpr->getOpcode()) { 10851 case UnaryOperator::Opcode::UO_AddrOf: 10852 return CheckFreeArgumentsAddressof(*this, CalleeName, UnaryExpr); 10853 case UnaryOperator::Opcode::UO_Plus: 10854 return CheckFreeArgumentsPlus(*this, CalleeName, UnaryExpr); 10855 default: 10856 break; 10857 } 10858 10859 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Arg)) 10860 if (Lvalue->getType()->isArrayType()) 10861 return CheckFreeArgumentsStackArray(*this, CalleeName, Lvalue); 10862 10863 if (const auto *Label = dyn_cast<AddrLabelExpr>(Arg)) { 10864 Diag(Label->getBeginLoc(), diag::warn_free_nonheap_object) 10865 << CalleeName << 0 /*object: */ << Label->getLabel()->getIdentifier(); 10866 return; 10867 } 10868 10869 if (isa<BlockExpr>(Arg)) { 10870 Diag(Arg->getBeginLoc(), diag::warn_free_nonheap_object) 10871 << CalleeName << 1 /*object: block*/; 10872 return; 10873 } 10874 } 10875 // Maybe the cast was important, check after the other cases. 10876 if (const auto *Cast = dyn_cast<CastExpr>(E->getArg(0))) 10877 return CheckFreeArgumentsCast(*this, CalleeName, Cast); 10878 } 10879 10880 void 10881 Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 10882 SourceLocation ReturnLoc, 10883 bool isObjCMethod, 10884 const AttrVec *Attrs, 10885 const FunctionDecl *FD) { 10886 // Check if the return value is null but should not be. 10887 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 10888 (!isObjCMethod && isNonNullType(Context, lhsType))) && 10889 CheckNonNullExpr(*this, RetValExp)) 10890 Diag(ReturnLoc, diag::warn_null_ret) 10891 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 10892 10893 // C++11 [basic.stc.dynamic.allocation]p4: 10894 // If an allocation function declared with a non-throwing 10895 // exception-specification fails to allocate storage, it shall return 10896 // a null pointer. Any other allocation function that fails to allocate 10897 // storage shall indicate failure only by throwing an exception [...] 10898 if (FD) { 10899 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 10900 if (Op == OO_New || Op == OO_Array_New) { 10901 const FunctionProtoType *Proto 10902 = FD->getType()->castAs<FunctionProtoType>(); 10903 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 10904 CheckNonNullExpr(*this, RetValExp)) 10905 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 10906 << FD << getLangOpts().CPlusPlus11; 10907 } 10908 } 10909 10910 // PPC MMA non-pointer types are not allowed as return type. Checking the type 10911 // here prevent the user from using a PPC MMA type as trailing return type. 10912 if (Context.getTargetInfo().getTriple().isPPC64()) 10913 CheckPPCMMAType(RetValExp->getType(), ReturnLoc); 10914 } 10915 10916 //===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===// 10917 10918 /// Check for comparisons of floating point operands using != and ==. 10919 /// Issue a warning if these are no self-comparisons, as they are not likely 10920 /// to do what the programmer intended. 10921 void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) { 10922 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 10923 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 10924 10925 // Special case: check for x == x (which is OK). 10926 // Do not emit warnings for such cases. 10927 if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 10928 if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 10929 if (DRL->getDecl() == DRR->getDecl()) 10930 return; 10931 10932 // Special case: check for comparisons against literals that can be exactly 10933 // represented by APFloat. In such cases, do not emit a warning. This 10934 // is a heuristic: often comparison against such literals are used to 10935 // detect if a value in a variable has not changed. This clearly can 10936 // lead to false negatives. 10937 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 10938 if (FLL->isExact()) 10939 return; 10940 } else 10941 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 10942 if (FLR->isExact()) 10943 return; 10944 10945 // Check for comparisons with builtin types. 10946 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 10947 if (CL->getBuiltinCallee()) 10948 return; 10949 10950 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 10951 if (CR->getBuiltinCallee()) 10952 return; 10953 10954 // Emit the diagnostic. 10955 Diag(Loc, diag::warn_floatingpoint_eq) 10956 << LHS->getSourceRange() << RHS->getSourceRange(); 10957 } 10958 10959 //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 10960 //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 10961 10962 namespace { 10963 10964 /// Structure recording the 'active' range of an integer-valued 10965 /// expression. 10966 struct IntRange { 10967 /// The number of bits active in the int. Note that this includes exactly one 10968 /// sign bit if !NonNegative. 10969 unsigned Width; 10970 10971 /// True if the int is known not to have negative values. If so, all leading 10972 /// bits before Width are known zero, otherwise they are known to be the 10973 /// same as the MSB within Width. 10974 bool NonNegative; 10975 10976 IntRange(unsigned Width, bool NonNegative) 10977 : Width(Width), NonNegative(NonNegative) {} 10978 10979 /// Number of bits excluding the sign bit. 10980 unsigned valueBits() const { 10981 return NonNegative ? Width : Width - 1; 10982 } 10983 10984 /// Returns the range of the bool type. 10985 static IntRange forBoolType() { 10986 return IntRange(1, true); 10987 } 10988 10989 /// Returns the range of an opaque value of the given integral type. 10990 static IntRange forValueOfType(ASTContext &C, QualType T) { 10991 return forValueOfCanonicalType(C, 10992 T->getCanonicalTypeInternal().getTypePtr()); 10993 } 10994 10995 /// Returns the range of an opaque value of a canonical integral type. 10996 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 10997 assert(T->isCanonicalUnqualified()); 10998 10999 if (const VectorType *VT = dyn_cast<VectorType>(T)) 11000 T = VT->getElementType().getTypePtr(); 11001 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 11002 T = CT->getElementType().getTypePtr(); 11003 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 11004 T = AT->getValueType().getTypePtr(); 11005 11006 if (!C.getLangOpts().CPlusPlus) { 11007 // For enum types in C code, use the underlying datatype. 11008 if (const EnumType *ET = dyn_cast<EnumType>(T)) 11009 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 11010 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 11011 // For enum types in C++, use the known bit width of the enumerators. 11012 EnumDecl *Enum = ET->getDecl(); 11013 // In C++11, enums can have a fixed underlying type. Use this type to 11014 // compute the range. 11015 if (Enum->isFixed()) { 11016 return IntRange(C.getIntWidth(QualType(T, 0)), 11017 !ET->isSignedIntegerOrEnumerationType()); 11018 } 11019 11020 unsigned NumPositive = Enum->getNumPositiveBits(); 11021 unsigned NumNegative = Enum->getNumNegativeBits(); 11022 11023 if (NumNegative == 0) 11024 return IntRange(NumPositive, true/*NonNegative*/); 11025 else 11026 return IntRange(std::max(NumPositive + 1, NumNegative), 11027 false/*NonNegative*/); 11028 } 11029 11030 if (const auto *EIT = dyn_cast<ExtIntType>(T)) 11031 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 11032 11033 const BuiltinType *BT = cast<BuiltinType>(T); 11034 assert(BT->isInteger()); 11035 11036 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 11037 } 11038 11039 /// Returns the "target" range of a canonical integral type, i.e. 11040 /// the range of values expressible in the type. 11041 /// 11042 /// This matches forValueOfCanonicalType except that enums have the 11043 /// full range of their type, not the range of their enumerators. 11044 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 11045 assert(T->isCanonicalUnqualified()); 11046 11047 if (const VectorType *VT = dyn_cast<VectorType>(T)) 11048 T = VT->getElementType().getTypePtr(); 11049 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 11050 T = CT->getElementType().getTypePtr(); 11051 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 11052 T = AT->getValueType().getTypePtr(); 11053 if (const EnumType *ET = dyn_cast<EnumType>(T)) 11054 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 11055 11056 if (const auto *EIT = dyn_cast<ExtIntType>(T)) 11057 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 11058 11059 const BuiltinType *BT = cast<BuiltinType>(T); 11060 assert(BT->isInteger()); 11061 11062 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 11063 } 11064 11065 /// Returns the supremum of two ranges: i.e. their conservative merge. 11066 static IntRange join(IntRange L, IntRange R) { 11067 bool Unsigned = L.NonNegative && R.NonNegative; 11068 return IntRange(std::max(L.valueBits(), R.valueBits()) + !Unsigned, 11069 L.NonNegative && R.NonNegative); 11070 } 11071 11072 /// Return the range of a bitwise-AND of the two ranges. 11073 static IntRange bit_and(IntRange L, IntRange R) { 11074 unsigned Bits = std::max(L.Width, R.Width); 11075 bool NonNegative = false; 11076 if (L.NonNegative) { 11077 Bits = std::min(Bits, L.Width); 11078 NonNegative = true; 11079 } 11080 if (R.NonNegative) { 11081 Bits = std::min(Bits, R.Width); 11082 NonNegative = true; 11083 } 11084 return IntRange(Bits, NonNegative); 11085 } 11086 11087 /// Return the range of a sum of the two ranges. 11088 static IntRange sum(IntRange L, IntRange R) { 11089 bool Unsigned = L.NonNegative && R.NonNegative; 11090 return IntRange(std::max(L.valueBits(), R.valueBits()) + 1 + !Unsigned, 11091 Unsigned); 11092 } 11093 11094 /// Return the range of a difference of the two ranges. 11095 static IntRange difference(IntRange L, IntRange R) { 11096 // We need a 1-bit-wider range if: 11097 // 1) LHS can be negative: least value can be reduced. 11098 // 2) RHS can be negative: greatest value can be increased. 11099 bool CanWiden = !L.NonNegative || !R.NonNegative; 11100 bool Unsigned = L.NonNegative && R.Width == 0; 11101 return IntRange(std::max(L.valueBits(), R.valueBits()) + CanWiden + 11102 !Unsigned, 11103 Unsigned); 11104 } 11105 11106 /// Return the range of a product of the two ranges. 11107 static IntRange product(IntRange L, IntRange R) { 11108 // If both LHS and RHS can be negative, we can form 11109 // -2^L * -2^R = 2^(L + R) 11110 // which requires L + R + 1 value bits to represent. 11111 bool CanWiden = !L.NonNegative && !R.NonNegative; 11112 bool Unsigned = L.NonNegative && R.NonNegative; 11113 return IntRange(L.valueBits() + R.valueBits() + CanWiden + !Unsigned, 11114 Unsigned); 11115 } 11116 11117 /// Return the range of a remainder operation between the two ranges. 11118 static IntRange rem(IntRange L, IntRange R) { 11119 // The result of a remainder can't be larger than the result of 11120 // either side. The sign of the result is the sign of the LHS. 11121 bool Unsigned = L.NonNegative; 11122 return IntRange(std::min(L.valueBits(), R.valueBits()) + !Unsigned, 11123 Unsigned); 11124 } 11125 }; 11126 11127 } // namespace 11128 11129 static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 11130 unsigned MaxWidth) { 11131 if (value.isSigned() && value.isNegative()) 11132 return IntRange(value.getMinSignedBits(), false); 11133 11134 if (value.getBitWidth() > MaxWidth) 11135 value = value.trunc(MaxWidth); 11136 11137 // isNonNegative() just checks the sign bit without considering 11138 // signedness. 11139 return IntRange(value.getActiveBits(), true); 11140 } 11141 11142 static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 11143 unsigned MaxWidth) { 11144 if (result.isInt()) 11145 return GetValueRange(C, result.getInt(), MaxWidth); 11146 11147 if (result.isVector()) { 11148 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 11149 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 11150 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 11151 R = IntRange::join(R, El); 11152 } 11153 return R; 11154 } 11155 11156 if (result.isComplexInt()) { 11157 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 11158 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 11159 return IntRange::join(R, I); 11160 } 11161 11162 // This can happen with lossless casts to intptr_t of "based" lvalues. 11163 // Assume it might use arbitrary bits. 11164 // FIXME: The only reason we need to pass the type in here is to get 11165 // the sign right on this one case. It would be nice if APValue 11166 // preserved this. 11167 assert(result.isLValue() || result.isAddrLabelDiff()); 11168 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 11169 } 11170 11171 static QualType GetExprType(const Expr *E) { 11172 QualType Ty = E->getType(); 11173 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 11174 Ty = AtomicRHS->getValueType(); 11175 return Ty; 11176 } 11177 11178 /// Pseudo-evaluate the given integer expression, estimating the 11179 /// range of values it might take. 11180 /// 11181 /// \param MaxWidth The width to which the value will be truncated. 11182 /// \param Approximate If \c true, return a likely range for the result: in 11183 /// particular, assume that aritmetic on narrower types doesn't leave 11184 /// those types. If \c false, return a range including all possible 11185 /// result values. 11186 static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth, 11187 bool InConstantContext, bool Approximate) { 11188 E = E->IgnoreParens(); 11189 11190 // Try a full evaluation first. 11191 Expr::EvalResult result; 11192 if (E->EvaluateAsRValue(result, C, InConstantContext)) 11193 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 11194 11195 // I think we only want to look through implicit casts here; if the 11196 // user has an explicit widening cast, we should treat the value as 11197 // being of the new, wider type. 11198 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 11199 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 11200 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext, 11201 Approximate); 11202 11203 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 11204 11205 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 11206 CE->getCastKind() == CK_BooleanToSignedIntegral; 11207 11208 // Assume that non-integer casts can span the full range of the type. 11209 if (!isIntegerCast) 11210 return OutputTypeRange; 11211 11212 IntRange SubRange = GetExprRange(C, CE->getSubExpr(), 11213 std::min(MaxWidth, OutputTypeRange.Width), 11214 InConstantContext, Approximate); 11215 11216 // Bail out if the subexpr's range is as wide as the cast type. 11217 if (SubRange.Width >= OutputTypeRange.Width) 11218 return OutputTypeRange; 11219 11220 // Otherwise, we take the smaller width, and we're non-negative if 11221 // either the output type or the subexpr is. 11222 return IntRange(SubRange.Width, 11223 SubRange.NonNegative || OutputTypeRange.NonNegative); 11224 } 11225 11226 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 11227 // If we can fold the condition, just take that operand. 11228 bool CondResult; 11229 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 11230 return GetExprRange(C, 11231 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(), 11232 MaxWidth, InConstantContext, Approximate); 11233 11234 // Otherwise, conservatively merge. 11235 // GetExprRange requires an integer expression, but a throw expression 11236 // results in a void type. 11237 Expr *E = CO->getTrueExpr(); 11238 IntRange L = E->getType()->isVoidType() 11239 ? IntRange{0, true} 11240 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 11241 E = CO->getFalseExpr(); 11242 IntRange R = E->getType()->isVoidType() 11243 ? IntRange{0, true} 11244 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 11245 return IntRange::join(L, R); 11246 } 11247 11248 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 11249 IntRange (*Combine)(IntRange, IntRange) = IntRange::join; 11250 11251 switch (BO->getOpcode()) { 11252 case BO_Cmp: 11253 llvm_unreachable("builtin <=> should have class type"); 11254 11255 // Boolean-valued operations are single-bit and positive. 11256 case BO_LAnd: 11257 case BO_LOr: 11258 case BO_LT: 11259 case BO_GT: 11260 case BO_LE: 11261 case BO_GE: 11262 case BO_EQ: 11263 case BO_NE: 11264 return IntRange::forBoolType(); 11265 11266 // The type of the assignments is the type of the LHS, so the RHS 11267 // is not necessarily the same type. 11268 case BO_MulAssign: 11269 case BO_DivAssign: 11270 case BO_RemAssign: 11271 case BO_AddAssign: 11272 case BO_SubAssign: 11273 case BO_XorAssign: 11274 case BO_OrAssign: 11275 // TODO: bitfields? 11276 return IntRange::forValueOfType(C, GetExprType(E)); 11277 11278 // Simple assignments just pass through the RHS, which will have 11279 // been coerced to the LHS type. 11280 case BO_Assign: 11281 // TODO: bitfields? 11282 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 11283 Approximate); 11284 11285 // Operations with opaque sources are black-listed. 11286 case BO_PtrMemD: 11287 case BO_PtrMemI: 11288 return IntRange::forValueOfType(C, GetExprType(E)); 11289 11290 // Bitwise-and uses the *infinum* of the two source ranges. 11291 case BO_And: 11292 case BO_AndAssign: 11293 Combine = IntRange::bit_and; 11294 break; 11295 11296 // Left shift gets black-listed based on a judgement call. 11297 case BO_Shl: 11298 // ...except that we want to treat '1 << (blah)' as logically 11299 // positive. It's an important idiom. 11300 if (IntegerLiteral *I 11301 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 11302 if (I->getValue() == 1) { 11303 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 11304 return IntRange(R.Width, /*NonNegative*/ true); 11305 } 11306 } 11307 LLVM_FALLTHROUGH; 11308 11309 case BO_ShlAssign: 11310 return IntRange::forValueOfType(C, GetExprType(E)); 11311 11312 // Right shift by a constant can narrow its left argument. 11313 case BO_Shr: 11314 case BO_ShrAssign: { 11315 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext, 11316 Approximate); 11317 11318 // If the shift amount is a positive constant, drop the width by 11319 // that much. 11320 if (Optional<llvm::APSInt> shift = 11321 BO->getRHS()->getIntegerConstantExpr(C)) { 11322 if (shift->isNonNegative()) { 11323 unsigned zext = shift->getZExtValue(); 11324 if (zext >= L.Width) 11325 L.Width = (L.NonNegative ? 0 : 1); 11326 else 11327 L.Width -= zext; 11328 } 11329 } 11330 11331 return L; 11332 } 11333 11334 // Comma acts as its right operand. 11335 case BO_Comma: 11336 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 11337 Approximate); 11338 11339 case BO_Add: 11340 if (!Approximate) 11341 Combine = IntRange::sum; 11342 break; 11343 11344 case BO_Sub: 11345 if (BO->getLHS()->getType()->isPointerType()) 11346 return IntRange::forValueOfType(C, GetExprType(E)); 11347 if (!Approximate) 11348 Combine = IntRange::difference; 11349 break; 11350 11351 case BO_Mul: 11352 if (!Approximate) 11353 Combine = IntRange::product; 11354 break; 11355 11356 // The width of a division result is mostly determined by the size 11357 // of the LHS. 11358 case BO_Div: { 11359 // Don't 'pre-truncate' the operands. 11360 unsigned opWidth = C.getIntWidth(GetExprType(E)); 11361 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, 11362 Approximate); 11363 11364 // If the divisor is constant, use that. 11365 if (Optional<llvm::APSInt> divisor = 11366 BO->getRHS()->getIntegerConstantExpr(C)) { 11367 unsigned log2 = divisor->logBase2(); // floor(log_2(divisor)) 11368 if (log2 >= L.Width) 11369 L.Width = (L.NonNegative ? 0 : 1); 11370 else 11371 L.Width = std::min(L.Width - log2, MaxWidth); 11372 return L; 11373 } 11374 11375 // Otherwise, just use the LHS's width. 11376 // FIXME: This is wrong if the LHS could be its minimal value and the RHS 11377 // could be -1. 11378 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, 11379 Approximate); 11380 return IntRange(L.Width, L.NonNegative && R.NonNegative); 11381 } 11382 11383 case BO_Rem: 11384 Combine = IntRange::rem; 11385 break; 11386 11387 // The default behavior is okay for these. 11388 case BO_Xor: 11389 case BO_Or: 11390 break; 11391 } 11392 11393 // Combine the two ranges, but limit the result to the type in which we 11394 // performed the computation. 11395 QualType T = GetExprType(E); 11396 unsigned opWidth = C.getIntWidth(T); 11397 IntRange L = 11398 GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, Approximate); 11399 IntRange R = 11400 GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, Approximate); 11401 IntRange C = Combine(L, R); 11402 C.NonNegative |= T->isUnsignedIntegerOrEnumerationType(); 11403 C.Width = std::min(C.Width, MaxWidth); 11404 return C; 11405 } 11406 11407 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 11408 switch (UO->getOpcode()) { 11409 // Boolean-valued operations are white-listed. 11410 case UO_LNot: 11411 return IntRange::forBoolType(); 11412 11413 // Operations with opaque sources are black-listed. 11414 case UO_Deref: 11415 case UO_AddrOf: // should be impossible 11416 return IntRange::forValueOfType(C, GetExprType(E)); 11417 11418 default: 11419 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext, 11420 Approximate); 11421 } 11422 } 11423 11424 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 11425 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext, 11426 Approximate); 11427 11428 if (const auto *BitField = E->getSourceBitField()) 11429 return IntRange(BitField->getBitWidthValue(C), 11430 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 11431 11432 return IntRange::forValueOfType(C, GetExprType(E)); 11433 } 11434 11435 static IntRange GetExprRange(ASTContext &C, const Expr *E, 11436 bool InConstantContext, bool Approximate) { 11437 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext, 11438 Approximate); 11439 } 11440 11441 /// Checks whether the given value, which currently has the given 11442 /// source semantics, has the same value when coerced through the 11443 /// target semantics. 11444 static bool IsSameFloatAfterCast(const llvm::APFloat &value, 11445 const llvm::fltSemantics &Src, 11446 const llvm::fltSemantics &Tgt) { 11447 llvm::APFloat truncated = value; 11448 11449 bool ignored; 11450 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 11451 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 11452 11453 return truncated.bitwiseIsEqual(value); 11454 } 11455 11456 /// Checks whether the given value, which currently has the given 11457 /// source semantics, has the same value when coerced through the 11458 /// target semantics. 11459 /// 11460 /// The value might be a vector of floats (or a complex number). 11461 static bool IsSameFloatAfterCast(const APValue &value, 11462 const llvm::fltSemantics &Src, 11463 const llvm::fltSemantics &Tgt) { 11464 if (value.isFloat()) 11465 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 11466 11467 if (value.isVector()) { 11468 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 11469 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 11470 return false; 11471 return true; 11472 } 11473 11474 assert(value.isComplexFloat()); 11475 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 11476 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 11477 } 11478 11479 static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC, 11480 bool IsListInit = false); 11481 11482 static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 11483 // Suppress cases where we are comparing against an enum constant. 11484 if (const DeclRefExpr *DR = 11485 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 11486 if (isa<EnumConstantDecl>(DR->getDecl())) 11487 return true; 11488 11489 // Suppress cases where the value is expanded from a macro, unless that macro 11490 // is how a language represents a boolean literal. This is the case in both C 11491 // and Objective-C. 11492 SourceLocation BeginLoc = E->getBeginLoc(); 11493 if (BeginLoc.isMacroID()) { 11494 StringRef MacroName = Lexer::getImmediateMacroName( 11495 BeginLoc, S.getSourceManager(), S.getLangOpts()); 11496 return MacroName != "YES" && MacroName != "NO" && 11497 MacroName != "true" && MacroName != "false"; 11498 } 11499 11500 return false; 11501 } 11502 11503 static bool isKnownToHaveUnsignedValue(Expr *E) { 11504 return E->getType()->isIntegerType() && 11505 (!E->getType()->isSignedIntegerType() || 11506 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 11507 } 11508 11509 namespace { 11510 /// The promoted range of values of a type. In general this has the 11511 /// following structure: 11512 /// 11513 /// |-----------| . . . |-----------| 11514 /// ^ ^ ^ ^ 11515 /// Min HoleMin HoleMax Max 11516 /// 11517 /// ... where there is only a hole if a signed type is promoted to unsigned 11518 /// (in which case Min and Max are the smallest and largest representable 11519 /// values). 11520 struct PromotedRange { 11521 // Min, or HoleMax if there is a hole. 11522 llvm::APSInt PromotedMin; 11523 // Max, or HoleMin if there is a hole. 11524 llvm::APSInt PromotedMax; 11525 11526 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 11527 if (R.Width == 0) 11528 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 11529 else if (R.Width >= BitWidth && !Unsigned) { 11530 // Promotion made the type *narrower*. This happens when promoting 11531 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 11532 // Treat all values of 'signed int' as being in range for now. 11533 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 11534 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 11535 } else { 11536 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 11537 .extOrTrunc(BitWidth); 11538 PromotedMin.setIsUnsigned(Unsigned); 11539 11540 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 11541 .extOrTrunc(BitWidth); 11542 PromotedMax.setIsUnsigned(Unsigned); 11543 } 11544 } 11545 11546 // Determine whether this range is contiguous (has no hole). 11547 bool isContiguous() const { return PromotedMin <= PromotedMax; } 11548 11549 // Where a constant value is within the range. 11550 enum ComparisonResult { 11551 LT = 0x1, 11552 LE = 0x2, 11553 GT = 0x4, 11554 GE = 0x8, 11555 EQ = 0x10, 11556 NE = 0x20, 11557 InRangeFlag = 0x40, 11558 11559 Less = LE | LT | NE, 11560 Min = LE | InRangeFlag, 11561 InRange = InRangeFlag, 11562 Max = GE | InRangeFlag, 11563 Greater = GE | GT | NE, 11564 11565 OnlyValue = LE | GE | EQ | InRangeFlag, 11566 InHole = NE 11567 }; 11568 11569 ComparisonResult compare(const llvm::APSInt &Value) const { 11570 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 11571 Value.isUnsigned() == PromotedMin.isUnsigned()); 11572 if (!isContiguous()) { 11573 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 11574 if (Value.isMinValue()) return Min; 11575 if (Value.isMaxValue()) return Max; 11576 if (Value >= PromotedMin) return InRange; 11577 if (Value <= PromotedMax) return InRange; 11578 return InHole; 11579 } 11580 11581 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 11582 case -1: return Less; 11583 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 11584 case 1: 11585 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 11586 case -1: return InRange; 11587 case 0: return Max; 11588 case 1: return Greater; 11589 } 11590 } 11591 11592 llvm_unreachable("impossible compare result"); 11593 } 11594 11595 static llvm::Optional<StringRef> 11596 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 11597 if (Op == BO_Cmp) { 11598 ComparisonResult LTFlag = LT, GTFlag = GT; 11599 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 11600 11601 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 11602 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 11603 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 11604 return llvm::None; 11605 } 11606 11607 ComparisonResult TrueFlag, FalseFlag; 11608 if (Op == BO_EQ) { 11609 TrueFlag = EQ; 11610 FalseFlag = NE; 11611 } else if (Op == BO_NE) { 11612 TrueFlag = NE; 11613 FalseFlag = EQ; 11614 } else { 11615 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 11616 TrueFlag = LT; 11617 FalseFlag = GE; 11618 } else { 11619 TrueFlag = GT; 11620 FalseFlag = LE; 11621 } 11622 if (Op == BO_GE || Op == BO_LE) 11623 std::swap(TrueFlag, FalseFlag); 11624 } 11625 if (R & TrueFlag) 11626 return StringRef("true"); 11627 if (R & FalseFlag) 11628 return StringRef("false"); 11629 return llvm::None; 11630 } 11631 }; 11632 } 11633 11634 static bool HasEnumType(Expr *E) { 11635 // Strip off implicit integral promotions. 11636 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 11637 if (ICE->getCastKind() != CK_IntegralCast && 11638 ICE->getCastKind() != CK_NoOp) 11639 break; 11640 E = ICE->getSubExpr(); 11641 } 11642 11643 return E->getType()->isEnumeralType(); 11644 } 11645 11646 static int classifyConstantValue(Expr *Constant) { 11647 // The values of this enumeration are used in the diagnostics 11648 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 11649 enum ConstantValueKind { 11650 Miscellaneous = 0, 11651 LiteralTrue, 11652 LiteralFalse 11653 }; 11654 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 11655 return BL->getValue() ? ConstantValueKind::LiteralTrue 11656 : ConstantValueKind::LiteralFalse; 11657 return ConstantValueKind::Miscellaneous; 11658 } 11659 11660 static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 11661 Expr *Constant, Expr *Other, 11662 const llvm::APSInt &Value, 11663 bool RhsConstant) { 11664 if (S.inTemplateInstantiation()) 11665 return false; 11666 11667 Expr *OriginalOther = Other; 11668 11669 Constant = Constant->IgnoreParenImpCasts(); 11670 Other = Other->IgnoreParenImpCasts(); 11671 11672 // Suppress warnings on tautological comparisons between values of the same 11673 // enumeration type. There are only two ways we could warn on this: 11674 // - If the constant is outside the range of representable values of 11675 // the enumeration. In such a case, we should warn about the cast 11676 // to enumeration type, not about the comparison. 11677 // - If the constant is the maximum / minimum in-range value. For an 11678 // enumeratin type, such comparisons can be meaningful and useful. 11679 if (Constant->getType()->isEnumeralType() && 11680 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 11681 return false; 11682 11683 IntRange OtherValueRange = GetExprRange( 11684 S.Context, Other, S.isConstantEvaluated(), /*Approximate*/ false); 11685 11686 QualType OtherT = Other->getType(); 11687 if (const auto *AT = OtherT->getAs<AtomicType>()) 11688 OtherT = AT->getValueType(); 11689 IntRange OtherTypeRange = IntRange::forValueOfType(S.Context, OtherT); 11690 11691 // Special case for ObjC BOOL on targets where its a typedef for a signed char 11692 // (Namely, macOS). FIXME: IntRange::forValueOfType should do this. 11693 bool IsObjCSignedCharBool = S.getLangOpts().ObjC && 11694 S.NSAPIObj->isObjCBOOLType(OtherT) && 11695 OtherT->isSpecificBuiltinType(BuiltinType::SChar); 11696 11697 // Whether we're treating Other as being a bool because of the form of 11698 // expression despite it having another type (typically 'int' in C). 11699 bool OtherIsBooleanDespiteType = 11700 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 11701 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool) 11702 OtherTypeRange = OtherValueRange = IntRange::forBoolType(); 11703 11704 // Check if all values in the range of possible values of this expression 11705 // lead to the same comparison outcome. 11706 PromotedRange OtherPromotedValueRange(OtherValueRange, Value.getBitWidth(), 11707 Value.isUnsigned()); 11708 auto Cmp = OtherPromotedValueRange.compare(Value); 11709 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 11710 if (!Result) 11711 return false; 11712 11713 // Also consider the range determined by the type alone. This allows us to 11714 // classify the warning under the proper diagnostic group. 11715 bool TautologicalTypeCompare = false; 11716 { 11717 PromotedRange OtherPromotedTypeRange(OtherTypeRange, Value.getBitWidth(), 11718 Value.isUnsigned()); 11719 auto TypeCmp = OtherPromotedTypeRange.compare(Value); 11720 if (auto TypeResult = PromotedRange::constantValue(E->getOpcode(), TypeCmp, 11721 RhsConstant)) { 11722 TautologicalTypeCompare = true; 11723 Cmp = TypeCmp; 11724 Result = TypeResult; 11725 } 11726 } 11727 11728 // Don't warn if the non-constant operand actually always evaluates to the 11729 // same value. 11730 if (!TautologicalTypeCompare && OtherValueRange.Width == 0) 11731 return false; 11732 11733 // Suppress the diagnostic for an in-range comparison if the constant comes 11734 // from a macro or enumerator. We don't want to diagnose 11735 // 11736 // some_long_value <= INT_MAX 11737 // 11738 // when sizeof(int) == sizeof(long). 11739 bool InRange = Cmp & PromotedRange::InRangeFlag; 11740 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 11741 return false; 11742 11743 // A comparison of an unsigned bit-field against 0 is really a type problem, 11744 // even though at the type level the bit-field might promote to 'signed int'. 11745 if (Other->refersToBitField() && InRange && Value == 0 && 11746 Other->getType()->isUnsignedIntegerOrEnumerationType()) 11747 TautologicalTypeCompare = true; 11748 11749 // If this is a comparison to an enum constant, include that 11750 // constant in the diagnostic. 11751 const EnumConstantDecl *ED = nullptr; 11752 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 11753 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 11754 11755 // Should be enough for uint128 (39 decimal digits) 11756 SmallString<64> PrettySourceValue; 11757 llvm::raw_svector_ostream OS(PrettySourceValue); 11758 if (ED) { 11759 OS << '\'' << *ED << "' (" << Value << ")"; 11760 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>( 11761 Constant->IgnoreParenImpCasts())) { 11762 OS << (BL->getValue() ? "YES" : "NO"); 11763 } else { 11764 OS << Value; 11765 } 11766 11767 if (!TautologicalTypeCompare) { 11768 S.Diag(E->getOperatorLoc(), diag::warn_tautological_compare_value_range) 11769 << RhsConstant << OtherValueRange.Width << OtherValueRange.NonNegative 11770 << E->getOpcodeStr() << OS.str() << *Result 11771 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 11772 return true; 11773 } 11774 11775 if (IsObjCSignedCharBool) { 11776 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 11777 S.PDiag(diag::warn_tautological_compare_objc_bool) 11778 << OS.str() << *Result); 11779 return true; 11780 } 11781 11782 // FIXME: We use a somewhat different formatting for the in-range cases and 11783 // cases involving boolean values for historical reasons. We should pick a 11784 // consistent way of presenting these diagnostics. 11785 if (!InRange || Other->isKnownToHaveBooleanValue()) { 11786 11787 S.DiagRuntimeBehavior( 11788 E->getOperatorLoc(), E, 11789 S.PDiag(!InRange ? diag::warn_out_of_range_compare 11790 : diag::warn_tautological_bool_compare) 11791 << OS.str() << classifyConstantValue(Constant) << OtherT 11792 << OtherIsBooleanDespiteType << *Result 11793 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 11794 } else { 11795 bool IsCharTy = OtherT.withoutLocalFastQualifiers() == S.Context.CharTy; 11796 unsigned Diag = 11797 (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 11798 ? (HasEnumType(OriginalOther) 11799 ? diag::warn_unsigned_enum_always_true_comparison 11800 : IsCharTy ? diag::warn_unsigned_char_always_true_comparison 11801 : diag::warn_unsigned_always_true_comparison) 11802 : diag::warn_tautological_constant_compare; 11803 11804 S.Diag(E->getOperatorLoc(), Diag) 11805 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 11806 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 11807 } 11808 11809 return true; 11810 } 11811 11812 /// Analyze the operands of the given comparison. Implements the 11813 /// fallback case from AnalyzeComparison. 11814 static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 11815 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 11816 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 11817 } 11818 11819 /// Implements -Wsign-compare. 11820 /// 11821 /// \param E the binary operator to check for warnings 11822 static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 11823 // The type the comparison is being performed in. 11824 QualType T = E->getLHS()->getType(); 11825 11826 // Only analyze comparison operators where both sides have been converted to 11827 // the same type. 11828 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 11829 return AnalyzeImpConvsInComparison(S, E); 11830 11831 // Don't analyze value-dependent comparisons directly. 11832 if (E->isValueDependent()) 11833 return AnalyzeImpConvsInComparison(S, E); 11834 11835 Expr *LHS = E->getLHS(); 11836 Expr *RHS = E->getRHS(); 11837 11838 if (T->isIntegralType(S.Context)) { 11839 Optional<llvm::APSInt> RHSValue = RHS->getIntegerConstantExpr(S.Context); 11840 Optional<llvm::APSInt> LHSValue = LHS->getIntegerConstantExpr(S.Context); 11841 11842 // We don't care about expressions whose result is a constant. 11843 if (RHSValue && LHSValue) 11844 return AnalyzeImpConvsInComparison(S, E); 11845 11846 // We only care about expressions where just one side is literal 11847 if ((bool)RHSValue ^ (bool)LHSValue) { 11848 // Is the constant on the RHS or LHS? 11849 const bool RhsConstant = (bool)RHSValue; 11850 Expr *Const = RhsConstant ? RHS : LHS; 11851 Expr *Other = RhsConstant ? LHS : RHS; 11852 const llvm::APSInt &Value = RhsConstant ? *RHSValue : *LHSValue; 11853 11854 // Check whether an integer constant comparison results in a value 11855 // of 'true' or 'false'. 11856 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 11857 return AnalyzeImpConvsInComparison(S, E); 11858 } 11859 } 11860 11861 if (!T->hasUnsignedIntegerRepresentation()) { 11862 // We don't do anything special if this isn't an unsigned integral 11863 // comparison: we're only interested in integral comparisons, and 11864 // signed comparisons only happen in cases we don't care to warn about. 11865 return AnalyzeImpConvsInComparison(S, E); 11866 } 11867 11868 LHS = LHS->IgnoreParenImpCasts(); 11869 RHS = RHS->IgnoreParenImpCasts(); 11870 11871 if (!S.getLangOpts().CPlusPlus) { 11872 // Avoid warning about comparison of integers with different signs when 11873 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 11874 // the type of `E`. 11875 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 11876 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 11877 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 11878 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 11879 } 11880 11881 // Check to see if one of the (unmodified) operands is of different 11882 // signedness. 11883 Expr *signedOperand, *unsignedOperand; 11884 if (LHS->getType()->hasSignedIntegerRepresentation()) { 11885 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 11886 "unsigned comparison between two signed integer expressions?"); 11887 signedOperand = LHS; 11888 unsignedOperand = RHS; 11889 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 11890 signedOperand = RHS; 11891 unsignedOperand = LHS; 11892 } else { 11893 return AnalyzeImpConvsInComparison(S, E); 11894 } 11895 11896 // Otherwise, calculate the effective range of the signed operand. 11897 IntRange signedRange = GetExprRange( 11898 S.Context, signedOperand, S.isConstantEvaluated(), /*Approximate*/ true); 11899 11900 // Go ahead and analyze implicit conversions in the operands. Note 11901 // that we skip the implicit conversions on both sides. 11902 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 11903 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 11904 11905 // If the signed range is non-negative, -Wsign-compare won't fire. 11906 if (signedRange.NonNegative) 11907 return; 11908 11909 // For (in)equality comparisons, if the unsigned operand is a 11910 // constant which cannot collide with a overflowed signed operand, 11911 // then reinterpreting the signed operand as unsigned will not 11912 // change the result of the comparison. 11913 if (E->isEqualityOp()) { 11914 unsigned comparisonWidth = S.Context.getIntWidth(T); 11915 IntRange unsignedRange = 11916 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated(), 11917 /*Approximate*/ true); 11918 11919 // We should never be unable to prove that the unsigned operand is 11920 // non-negative. 11921 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 11922 11923 if (unsignedRange.Width < comparisonWidth) 11924 return; 11925 } 11926 11927 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 11928 S.PDiag(diag::warn_mixed_sign_comparison) 11929 << LHS->getType() << RHS->getType() 11930 << LHS->getSourceRange() << RHS->getSourceRange()); 11931 } 11932 11933 /// Analyzes an attempt to assign the given value to a bitfield. 11934 /// 11935 /// Returns true if there was something fishy about the attempt. 11936 static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 11937 SourceLocation InitLoc) { 11938 assert(Bitfield->isBitField()); 11939 if (Bitfield->isInvalidDecl()) 11940 return false; 11941 11942 // White-list bool bitfields. 11943 QualType BitfieldType = Bitfield->getType(); 11944 if (BitfieldType->isBooleanType()) 11945 return false; 11946 11947 if (BitfieldType->isEnumeralType()) { 11948 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl(); 11949 // If the underlying enum type was not explicitly specified as an unsigned 11950 // type and the enum contain only positive values, MSVC++ will cause an 11951 // inconsistency by storing this as a signed type. 11952 if (S.getLangOpts().CPlusPlus11 && 11953 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 11954 BitfieldEnumDecl->getNumPositiveBits() > 0 && 11955 BitfieldEnumDecl->getNumNegativeBits() == 0) { 11956 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 11957 << BitfieldEnumDecl; 11958 } 11959 } 11960 11961 if (Bitfield->getType()->isBooleanType()) 11962 return false; 11963 11964 // Ignore value- or type-dependent expressions. 11965 if (Bitfield->getBitWidth()->isValueDependent() || 11966 Bitfield->getBitWidth()->isTypeDependent() || 11967 Init->isValueDependent() || 11968 Init->isTypeDependent()) 11969 return false; 11970 11971 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 11972 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 11973 11974 Expr::EvalResult Result; 11975 if (!OriginalInit->EvaluateAsInt(Result, S.Context, 11976 Expr::SE_AllowSideEffects)) { 11977 // The RHS is not constant. If the RHS has an enum type, make sure the 11978 // bitfield is wide enough to hold all the values of the enum without 11979 // truncation. 11980 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 11981 EnumDecl *ED = EnumTy->getDecl(); 11982 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 11983 11984 // Enum types are implicitly signed on Windows, so check if there are any 11985 // negative enumerators to see if the enum was intended to be signed or 11986 // not. 11987 bool SignedEnum = ED->getNumNegativeBits() > 0; 11988 11989 // Check for surprising sign changes when assigning enum values to a 11990 // bitfield of different signedness. If the bitfield is signed and we 11991 // have exactly the right number of bits to store this unsigned enum, 11992 // suggest changing the enum to an unsigned type. This typically happens 11993 // on Windows where unfixed enums always use an underlying type of 'int'. 11994 unsigned DiagID = 0; 11995 if (SignedEnum && !SignedBitfield) { 11996 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 11997 } else if (SignedBitfield && !SignedEnum && 11998 ED->getNumPositiveBits() == FieldWidth) { 11999 DiagID = diag::warn_signed_bitfield_enum_conversion; 12000 } 12001 12002 if (DiagID) { 12003 S.Diag(InitLoc, DiagID) << Bitfield << ED; 12004 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 12005 SourceRange TypeRange = 12006 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 12007 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 12008 << SignedEnum << TypeRange; 12009 } 12010 12011 // Compute the required bitwidth. If the enum has negative values, we need 12012 // one more bit than the normal number of positive bits to represent the 12013 // sign bit. 12014 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 12015 ED->getNumNegativeBits()) 12016 : ED->getNumPositiveBits(); 12017 12018 // Check the bitwidth. 12019 if (BitsNeeded > FieldWidth) { 12020 Expr *WidthExpr = Bitfield->getBitWidth(); 12021 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 12022 << Bitfield << ED; 12023 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 12024 << BitsNeeded << ED << WidthExpr->getSourceRange(); 12025 } 12026 } 12027 12028 return false; 12029 } 12030 12031 llvm::APSInt Value = Result.Val.getInt(); 12032 12033 unsigned OriginalWidth = Value.getBitWidth(); 12034 12035 if (!Value.isSigned() || Value.isNegative()) 12036 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 12037 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 12038 OriginalWidth = Value.getMinSignedBits(); 12039 12040 if (OriginalWidth <= FieldWidth) 12041 return false; 12042 12043 // Compute the value which the bitfield will contain. 12044 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 12045 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 12046 12047 // Check whether the stored value is equal to the original value. 12048 TruncatedValue = TruncatedValue.extend(OriginalWidth); 12049 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 12050 return false; 12051 12052 // Special-case bitfields of width 1: booleans are naturally 0/1, and 12053 // therefore don't strictly fit into a signed bitfield of width 1. 12054 if (FieldWidth == 1 && Value == 1) 12055 return false; 12056 12057 std::string PrettyValue = toString(Value, 10); 12058 std::string PrettyTrunc = toString(TruncatedValue, 10); 12059 12060 S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant) 12061 << PrettyValue << PrettyTrunc << OriginalInit->getType() 12062 << Init->getSourceRange(); 12063 12064 return true; 12065 } 12066 12067 /// Analyze the given simple or compound assignment for warning-worthy 12068 /// operations. 12069 static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 12070 // Just recurse on the LHS. 12071 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12072 12073 // We want to recurse on the RHS as normal unless we're assigning to 12074 // a bitfield. 12075 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 12076 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 12077 E->getOperatorLoc())) { 12078 // Recurse, ignoring any implicit conversions on the RHS. 12079 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 12080 E->getOperatorLoc()); 12081 } 12082 } 12083 12084 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12085 12086 // Diagnose implicitly sequentially-consistent atomic assignment. 12087 if (E->getLHS()->getType()->isAtomicType()) 12088 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 12089 } 12090 12091 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 12092 static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 12093 SourceLocation CContext, unsigned diag, 12094 bool pruneControlFlow = false) { 12095 if (pruneControlFlow) { 12096 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12097 S.PDiag(diag) 12098 << SourceType << T << E->getSourceRange() 12099 << SourceRange(CContext)); 12100 return; 12101 } 12102 S.Diag(E->getExprLoc(), diag) 12103 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 12104 } 12105 12106 /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 12107 static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 12108 SourceLocation CContext, 12109 unsigned diag, bool pruneControlFlow = false) { 12110 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 12111 } 12112 12113 static bool isObjCSignedCharBool(Sema &S, QualType Ty) { 12114 return Ty->isSpecificBuiltinType(BuiltinType::SChar) && 12115 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty); 12116 } 12117 12118 static void adornObjCBoolConversionDiagWithTernaryFixit( 12119 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) { 12120 Expr *Ignored = SourceExpr->IgnoreImplicit(); 12121 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored)) 12122 Ignored = OVE->getSourceExpr(); 12123 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) || 12124 isa<BinaryOperator>(Ignored) || 12125 isa<CXXOperatorCallExpr>(Ignored); 12126 SourceLocation EndLoc = S.getLocForEndOfToken(SourceExpr->getEndLoc()); 12127 if (NeedsParens) 12128 Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(") 12129 << FixItHint::CreateInsertion(EndLoc, ")"); 12130 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO"); 12131 } 12132 12133 /// Diagnose an implicit cast from a floating point value to an integer value. 12134 static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 12135 SourceLocation CContext) { 12136 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 12137 const bool PruneWarnings = S.inTemplateInstantiation(); 12138 12139 Expr *InnerE = E->IgnoreParenImpCasts(); 12140 // We also want to warn on, e.g., "int i = -1.234" 12141 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 12142 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 12143 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 12144 12145 const bool IsLiteral = 12146 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 12147 12148 llvm::APFloat Value(0.0); 12149 bool IsConstant = 12150 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 12151 if (!IsConstant) { 12152 if (isObjCSignedCharBool(S, T)) { 12153 return adornObjCBoolConversionDiagWithTernaryFixit( 12154 S, E, 12155 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool) 12156 << E->getType()); 12157 } 12158 12159 return DiagnoseImpCast(S, E, T, CContext, 12160 diag::warn_impcast_float_integer, PruneWarnings); 12161 } 12162 12163 bool isExact = false; 12164 12165 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 12166 T->hasUnsignedIntegerRepresentation()); 12167 llvm::APFloat::opStatus Result = Value.convertToInteger( 12168 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 12169 12170 // FIXME: Force the precision of the source value down so we don't print 12171 // digits which are usually useless (we don't really care here if we 12172 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 12173 // would automatically print the shortest representation, but it's a bit 12174 // tricky to implement. 12175 SmallString<16> PrettySourceValue; 12176 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 12177 precision = (precision * 59 + 195) / 196; 12178 Value.toString(PrettySourceValue, precision); 12179 12180 if (isObjCSignedCharBool(S, T) && IntegerValue != 0 && IntegerValue != 1) { 12181 return adornObjCBoolConversionDiagWithTernaryFixit( 12182 S, E, 12183 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool) 12184 << PrettySourceValue); 12185 } 12186 12187 if (Result == llvm::APFloat::opOK && isExact) { 12188 if (IsLiteral) return; 12189 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 12190 PruneWarnings); 12191 } 12192 12193 // Conversion of a floating-point value to a non-bool integer where the 12194 // integral part cannot be represented by the integer type is undefined. 12195 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 12196 return DiagnoseImpCast( 12197 S, E, T, CContext, 12198 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 12199 : diag::warn_impcast_float_to_integer_out_of_range, 12200 PruneWarnings); 12201 12202 unsigned DiagID = 0; 12203 if (IsLiteral) { 12204 // Warn on floating point literal to integer. 12205 DiagID = diag::warn_impcast_literal_float_to_integer; 12206 } else if (IntegerValue == 0) { 12207 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 12208 return DiagnoseImpCast(S, E, T, CContext, 12209 diag::warn_impcast_float_integer, PruneWarnings); 12210 } 12211 // Warn on non-zero to zero conversion. 12212 DiagID = diag::warn_impcast_float_to_integer_zero; 12213 } else { 12214 if (IntegerValue.isUnsigned()) { 12215 if (!IntegerValue.isMaxValue()) { 12216 return DiagnoseImpCast(S, E, T, CContext, 12217 diag::warn_impcast_float_integer, PruneWarnings); 12218 } 12219 } else { // IntegerValue.isSigned() 12220 if (!IntegerValue.isMaxSignedValue() && 12221 !IntegerValue.isMinSignedValue()) { 12222 return DiagnoseImpCast(S, E, T, CContext, 12223 diag::warn_impcast_float_integer, PruneWarnings); 12224 } 12225 } 12226 // Warn on evaluatable floating point expression to integer conversion. 12227 DiagID = diag::warn_impcast_float_to_integer; 12228 } 12229 12230 SmallString<16> PrettyTargetValue; 12231 if (IsBool) 12232 PrettyTargetValue = Value.isZero() ? "false" : "true"; 12233 else 12234 IntegerValue.toString(PrettyTargetValue); 12235 12236 if (PruneWarnings) { 12237 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12238 S.PDiag(DiagID) 12239 << E->getType() << T.getUnqualifiedType() 12240 << PrettySourceValue << PrettyTargetValue 12241 << E->getSourceRange() << SourceRange(CContext)); 12242 } else { 12243 S.Diag(E->getExprLoc(), DiagID) 12244 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 12245 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 12246 } 12247 } 12248 12249 /// Analyze the given compound assignment for the possible losing of 12250 /// floating-point precision. 12251 static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 12252 assert(isa<CompoundAssignOperator>(E) && 12253 "Must be compound assignment operation"); 12254 // Recurse on the LHS and RHS in here 12255 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 12256 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 12257 12258 if (E->getLHS()->getType()->isAtomicType()) 12259 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 12260 12261 // Now check the outermost expression 12262 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 12263 const auto *RBT = cast<CompoundAssignOperator>(E) 12264 ->getComputationResultType() 12265 ->getAs<BuiltinType>(); 12266 12267 // The below checks assume source is floating point. 12268 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 12269 12270 // If source is floating point but target is an integer. 12271 if (ResultBT->isInteger()) 12272 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(), 12273 E->getExprLoc(), diag::warn_impcast_float_integer); 12274 12275 if (!ResultBT->isFloatingPoint()) 12276 return; 12277 12278 // If both source and target are floating points, warn about losing precision. 12279 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 12280 QualType(ResultBT, 0), QualType(RBT, 0)); 12281 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 12282 // warn about dropping FP rank. 12283 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 12284 diag::warn_impcast_float_result_precision); 12285 } 12286 12287 static std::string PrettyPrintInRange(const llvm::APSInt &Value, 12288 IntRange Range) { 12289 if (!Range.Width) return "0"; 12290 12291 llvm::APSInt ValueInRange = Value; 12292 ValueInRange.setIsSigned(!Range.NonNegative); 12293 ValueInRange = ValueInRange.trunc(Range.Width); 12294 return toString(ValueInRange, 10); 12295 } 12296 12297 static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 12298 if (!isa<ImplicitCastExpr>(Ex)) 12299 return false; 12300 12301 Expr *InnerE = Ex->IgnoreParenImpCasts(); 12302 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 12303 const Type *Source = 12304 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 12305 if (Target->isDependentType()) 12306 return false; 12307 12308 const BuiltinType *FloatCandidateBT = 12309 dyn_cast<BuiltinType>(ToBool ? Source : Target); 12310 const Type *BoolCandidateType = ToBool ? Target : Source; 12311 12312 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 12313 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 12314 } 12315 12316 static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 12317 SourceLocation CC) { 12318 unsigned NumArgs = TheCall->getNumArgs(); 12319 for (unsigned i = 0; i < NumArgs; ++i) { 12320 Expr *CurrA = TheCall->getArg(i); 12321 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 12322 continue; 12323 12324 bool IsSwapped = ((i > 0) && 12325 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 12326 IsSwapped |= ((i < (NumArgs - 1)) && 12327 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 12328 if (IsSwapped) { 12329 // Warn on this floating-point to bool conversion. 12330 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 12331 CurrA->getType(), CC, 12332 diag::warn_impcast_floating_point_to_bool); 12333 } 12334 } 12335 } 12336 12337 static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 12338 SourceLocation CC) { 12339 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 12340 E->getExprLoc())) 12341 return; 12342 12343 // Don't warn on functions which have return type nullptr_t. 12344 if (isa<CallExpr>(E)) 12345 return; 12346 12347 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 12348 const Expr::NullPointerConstantKind NullKind = 12349 E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull); 12350 if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr) 12351 return; 12352 12353 // Return if target type is a safe conversion. 12354 if (T->isAnyPointerType() || T->isBlockPointerType() || 12355 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 12356 return; 12357 12358 SourceLocation Loc = E->getSourceRange().getBegin(); 12359 12360 // Venture through the macro stacks to get to the source of macro arguments. 12361 // The new location is a better location than the complete location that was 12362 // passed in. 12363 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 12364 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 12365 12366 // __null is usually wrapped in a macro. Go up a macro if that is the case. 12367 if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) { 12368 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 12369 Loc, S.SourceMgr, S.getLangOpts()); 12370 if (MacroName == "NULL") 12371 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 12372 } 12373 12374 // Only warn if the null and context location are in the same macro expansion. 12375 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 12376 return; 12377 12378 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 12379 << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC) 12380 << FixItHint::CreateReplacement(Loc, 12381 S.getFixItZeroLiteralForType(T, Loc)); 12382 } 12383 12384 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 12385 ObjCArrayLiteral *ArrayLiteral); 12386 12387 static void 12388 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 12389 ObjCDictionaryLiteral *DictionaryLiteral); 12390 12391 /// Check a single element within a collection literal against the 12392 /// target element type. 12393 static void checkObjCCollectionLiteralElement(Sema &S, 12394 QualType TargetElementType, 12395 Expr *Element, 12396 unsigned ElementKind) { 12397 // Skip a bitcast to 'id' or qualified 'id'. 12398 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 12399 if (ICE->getCastKind() == CK_BitCast && 12400 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 12401 Element = ICE->getSubExpr(); 12402 } 12403 12404 QualType ElementType = Element->getType(); 12405 ExprResult ElementResult(Element); 12406 if (ElementType->getAs<ObjCObjectPointerType>() && 12407 S.CheckSingleAssignmentConstraints(TargetElementType, 12408 ElementResult, 12409 false, false) 12410 != Sema::Compatible) { 12411 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 12412 << ElementType << ElementKind << TargetElementType 12413 << Element->getSourceRange(); 12414 } 12415 12416 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 12417 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 12418 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 12419 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 12420 } 12421 12422 /// Check an Objective-C array literal being converted to the given 12423 /// target type. 12424 static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 12425 ObjCArrayLiteral *ArrayLiteral) { 12426 if (!S.NSArrayDecl) 12427 return; 12428 12429 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 12430 if (!TargetObjCPtr) 12431 return; 12432 12433 if (TargetObjCPtr->isUnspecialized() || 12434 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 12435 != S.NSArrayDecl->getCanonicalDecl()) 12436 return; 12437 12438 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 12439 if (TypeArgs.size() != 1) 12440 return; 12441 12442 QualType TargetElementType = TypeArgs[0]; 12443 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 12444 checkObjCCollectionLiteralElement(S, TargetElementType, 12445 ArrayLiteral->getElement(I), 12446 0); 12447 } 12448 } 12449 12450 /// Check an Objective-C dictionary literal being converted to the given 12451 /// target type. 12452 static void 12453 checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 12454 ObjCDictionaryLiteral *DictionaryLiteral) { 12455 if (!S.NSDictionaryDecl) 12456 return; 12457 12458 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 12459 if (!TargetObjCPtr) 12460 return; 12461 12462 if (TargetObjCPtr->isUnspecialized() || 12463 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 12464 != S.NSDictionaryDecl->getCanonicalDecl()) 12465 return; 12466 12467 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 12468 if (TypeArgs.size() != 2) 12469 return; 12470 12471 QualType TargetKeyType = TypeArgs[0]; 12472 QualType TargetObjectType = TypeArgs[1]; 12473 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 12474 auto Element = DictionaryLiteral->getKeyValueElement(I); 12475 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 12476 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 12477 } 12478 } 12479 12480 // Helper function to filter out cases for constant width constant conversion. 12481 // Don't warn on char array initialization or for non-decimal values. 12482 static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 12483 SourceLocation CC) { 12484 // If initializing from a constant, and the constant starts with '0', 12485 // then it is a binary, octal, or hexadecimal. Allow these constants 12486 // to fill all the bits, even if there is a sign change. 12487 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 12488 const char FirstLiteralCharacter = 12489 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 12490 if (FirstLiteralCharacter == '0') 12491 return false; 12492 } 12493 12494 // If the CC location points to a '{', and the type is char, then assume 12495 // assume it is an array initialization. 12496 if (CC.isValid() && T->isCharType()) { 12497 const char FirstContextCharacter = 12498 S.getSourceManager().getCharacterData(CC)[0]; 12499 if (FirstContextCharacter == '{') 12500 return false; 12501 } 12502 12503 return true; 12504 } 12505 12506 static const IntegerLiteral *getIntegerLiteral(Expr *E) { 12507 const auto *IL = dyn_cast<IntegerLiteral>(E); 12508 if (!IL) { 12509 if (auto *UO = dyn_cast<UnaryOperator>(E)) { 12510 if (UO->getOpcode() == UO_Minus) 12511 return dyn_cast<IntegerLiteral>(UO->getSubExpr()); 12512 } 12513 } 12514 12515 return IL; 12516 } 12517 12518 static void DiagnoseIntInBoolContext(Sema &S, Expr *E) { 12519 E = E->IgnoreParenImpCasts(); 12520 SourceLocation ExprLoc = E->getExprLoc(); 12521 12522 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 12523 BinaryOperator::Opcode Opc = BO->getOpcode(); 12524 Expr::EvalResult Result; 12525 // Do not diagnose unsigned shifts. 12526 if (Opc == BO_Shl) { 12527 const auto *LHS = getIntegerLiteral(BO->getLHS()); 12528 const auto *RHS = getIntegerLiteral(BO->getRHS()); 12529 if (LHS && LHS->getValue() == 0) 12530 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0; 12531 else if (!E->isValueDependent() && LHS && RHS && 12532 RHS->getValue().isNonNegative() && 12533 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) 12534 S.Diag(ExprLoc, diag::warn_left_shift_always) 12535 << (Result.Val.getInt() != 0); 12536 else if (E->getType()->isSignedIntegerType()) 12537 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E; 12538 } 12539 } 12540 12541 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 12542 const auto *LHS = getIntegerLiteral(CO->getTrueExpr()); 12543 const auto *RHS = getIntegerLiteral(CO->getFalseExpr()); 12544 if (!LHS || !RHS) 12545 return; 12546 if ((LHS->getValue() == 0 || LHS->getValue() == 1) && 12547 (RHS->getValue() == 0 || RHS->getValue() == 1)) 12548 // Do not diagnose common idioms. 12549 return; 12550 if (LHS->getValue() != 0 && RHS->getValue() != 0) 12551 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true); 12552 } 12553 } 12554 12555 static void CheckImplicitConversion(Sema &S, Expr *E, QualType T, 12556 SourceLocation CC, 12557 bool *ICContext = nullptr, 12558 bool IsListInit = false) { 12559 if (E->isTypeDependent() || E->isValueDependent()) return; 12560 12561 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 12562 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 12563 if (Source == Target) return; 12564 if (Target->isDependentType()) return; 12565 12566 // If the conversion context location is invalid don't complain. We also 12567 // don't want to emit a warning if the issue occurs from the expansion of 12568 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 12569 // delay this check as long as possible. Once we detect we are in that 12570 // scenario, we just return. 12571 if (CC.isInvalid()) 12572 return; 12573 12574 if (Source->isAtomicType()) 12575 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 12576 12577 // Diagnose implicit casts to bool. 12578 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 12579 if (isa<StringLiteral>(E)) 12580 // Warn on string literal to bool. Checks for string literals in logical 12581 // and expressions, for instance, assert(0 && "error here"), are 12582 // prevented by a check in AnalyzeImplicitConversions(). 12583 return DiagnoseImpCast(S, E, T, CC, 12584 diag::warn_impcast_string_literal_to_bool); 12585 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 12586 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 12587 // This covers the literal expressions that evaluate to Objective-C 12588 // objects. 12589 return DiagnoseImpCast(S, E, T, CC, 12590 diag::warn_impcast_objective_c_literal_to_bool); 12591 } 12592 if (Source->isPointerType() || Source->canDecayToPointerType()) { 12593 // Warn on pointer to bool conversion that is always true. 12594 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 12595 SourceRange(CC)); 12596 } 12597 } 12598 12599 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL 12600 // is a typedef for signed char (macOS), then that constant value has to be 1 12601 // or 0. 12602 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) { 12603 Expr::EvalResult Result; 12604 if (E->EvaluateAsInt(Result, S.getASTContext(), 12605 Expr::SE_AllowSideEffects)) { 12606 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) { 12607 adornObjCBoolConversionDiagWithTernaryFixit( 12608 S, E, 12609 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool) 12610 << toString(Result.Val.getInt(), 10)); 12611 } 12612 return; 12613 } 12614 } 12615 12616 // Check implicit casts from Objective-C collection literals to specialized 12617 // collection types, e.g., NSArray<NSString *> *. 12618 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 12619 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 12620 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 12621 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 12622 12623 // Strip vector types. 12624 if (isa<VectorType>(Source)) { 12625 if (Target->isVLSTBuiltinType() && 12626 (S.Context.areCompatibleSveTypes(QualType(Target, 0), 12627 QualType(Source, 0)) || 12628 S.Context.areLaxCompatibleSveTypes(QualType(Target, 0), 12629 QualType(Source, 0)))) 12630 return; 12631 12632 if (!isa<VectorType>(Target)) { 12633 if (S.SourceMgr.isInSystemMacro(CC)) 12634 return; 12635 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 12636 } 12637 12638 // If the vector cast is cast between two vectors of the same size, it is 12639 // a bitcast, not a conversion. 12640 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 12641 return; 12642 12643 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 12644 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 12645 } 12646 if (auto VecTy = dyn_cast<VectorType>(Target)) 12647 Target = VecTy->getElementType().getTypePtr(); 12648 12649 // Strip complex types. 12650 if (isa<ComplexType>(Source)) { 12651 if (!isa<ComplexType>(Target)) { 12652 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 12653 return; 12654 12655 return DiagnoseImpCast(S, E, T, CC, 12656 S.getLangOpts().CPlusPlus 12657 ? diag::err_impcast_complex_scalar 12658 : diag::warn_impcast_complex_scalar); 12659 } 12660 12661 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 12662 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 12663 } 12664 12665 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 12666 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 12667 12668 // If the source is floating point... 12669 if (SourceBT && SourceBT->isFloatingPoint()) { 12670 // ...and the target is floating point... 12671 if (TargetBT && TargetBT->isFloatingPoint()) { 12672 // ...then warn if we're dropping FP rank. 12673 12674 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 12675 QualType(SourceBT, 0), QualType(TargetBT, 0)); 12676 if (Order > 0) { 12677 // Don't warn about float constants that are precisely 12678 // representable in the target type. 12679 Expr::EvalResult result; 12680 if (E->EvaluateAsRValue(result, S.Context)) { 12681 // Value might be a float, a float vector, or a float complex. 12682 if (IsSameFloatAfterCast(result.Val, 12683 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 12684 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 12685 return; 12686 } 12687 12688 if (S.SourceMgr.isInSystemMacro(CC)) 12689 return; 12690 12691 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 12692 } 12693 // ... or possibly if we're increasing rank, too 12694 else if (Order < 0) { 12695 if (S.SourceMgr.isInSystemMacro(CC)) 12696 return; 12697 12698 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 12699 } 12700 return; 12701 } 12702 12703 // If the target is integral, always warn. 12704 if (TargetBT && TargetBT->isInteger()) { 12705 if (S.SourceMgr.isInSystemMacro(CC)) 12706 return; 12707 12708 DiagnoseFloatingImpCast(S, E, T, CC); 12709 } 12710 12711 // Detect the case where a call result is converted from floating-point to 12712 // to bool, and the final argument to the call is converted from bool, to 12713 // discover this typo: 12714 // 12715 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 12716 // 12717 // FIXME: This is an incredibly special case; is there some more general 12718 // way to detect this class of misplaced-parentheses bug? 12719 if (Target->isBooleanType() && isa<CallExpr>(E)) { 12720 // Check last argument of function call to see if it is an 12721 // implicit cast from a type matching the type the result 12722 // is being cast to. 12723 CallExpr *CEx = cast<CallExpr>(E); 12724 if (unsigned NumArgs = CEx->getNumArgs()) { 12725 Expr *LastA = CEx->getArg(NumArgs - 1); 12726 Expr *InnerE = LastA->IgnoreParenImpCasts(); 12727 if (isa<ImplicitCastExpr>(LastA) && 12728 InnerE->getType()->isBooleanType()) { 12729 // Warn on this floating-point to bool conversion 12730 DiagnoseImpCast(S, E, T, CC, 12731 diag::warn_impcast_floating_point_to_bool); 12732 } 12733 } 12734 } 12735 return; 12736 } 12737 12738 // Valid casts involving fixed point types should be accounted for here. 12739 if (Source->isFixedPointType()) { 12740 if (Target->isUnsaturatedFixedPointType()) { 12741 Expr::EvalResult Result; 12742 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects, 12743 S.isConstantEvaluated())) { 12744 llvm::APFixedPoint Value = Result.Val.getFixedPoint(); 12745 llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(T); 12746 llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(T); 12747 if (Value > MaxVal || Value < MinVal) { 12748 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12749 S.PDiag(diag::warn_impcast_fixed_point_range) 12750 << Value.toString() << T 12751 << E->getSourceRange() 12752 << clang::SourceRange(CC)); 12753 return; 12754 } 12755 } 12756 } else if (Target->isIntegerType()) { 12757 Expr::EvalResult Result; 12758 if (!S.isConstantEvaluated() && 12759 E->EvaluateAsFixedPoint(Result, S.Context, 12760 Expr::SE_AllowSideEffects)) { 12761 llvm::APFixedPoint FXResult = Result.Val.getFixedPoint(); 12762 12763 bool Overflowed; 12764 llvm::APSInt IntResult = FXResult.convertToInt( 12765 S.Context.getIntWidth(T), 12766 Target->isSignedIntegerOrEnumerationType(), &Overflowed); 12767 12768 if (Overflowed) { 12769 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12770 S.PDiag(diag::warn_impcast_fixed_point_range) 12771 << FXResult.toString() << T 12772 << E->getSourceRange() 12773 << clang::SourceRange(CC)); 12774 return; 12775 } 12776 } 12777 } 12778 } else if (Target->isUnsaturatedFixedPointType()) { 12779 if (Source->isIntegerType()) { 12780 Expr::EvalResult Result; 12781 if (!S.isConstantEvaluated() && 12782 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 12783 llvm::APSInt Value = Result.Val.getInt(); 12784 12785 bool Overflowed; 12786 llvm::APFixedPoint IntResult = llvm::APFixedPoint::getFromIntValue( 12787 Value, S.Context.getFixedPointSemantics(T), &Overflowed); 12788 12789 if (Overflowed) { 12790 S.DiagRuntimeBehavior(E->getExprLoc(), E, 12791 S.PDiag(diag::warn_impcast_fixed_point_range) 12792 << toString(Value, /*Radix=*/10) << T 12793 << E->getSourceRange() 12794 << clang::SourceRange(CC)); 12795 return; 12796 } 12797 } 12798 } 12799 } 12800 12801 // If we are casting an integer type to a floating point type without 12802 // initialization-list syntax, we might lose accuracy if the floating 12803 // point type has a narrower significand than the integer type. 12804 if (SourceBT && TargetBT && SourceBT->isIntegerType() && 12805 TargetBT->isFloatingType() && !IsListInit) { 12806 // Determine the number of precision bits in the source integer type. 12807 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated(), 12808 /*Approximate*/ true); 12809 unsigned int SourcePrecision = SourceRange.Width; 12810 12811 // Determine the number of precision bits in the 12812 // target floating point type. 12813 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision( 12814 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 12815 12816 if (SourcePrecision > 0 && TargetPrecision > 0 && 12817 SourcePrecision > TargetPrecision) { 12818 12819 if (Optional<llvm::APSInt> SourceInt = 12820 E->getIntegerConstantExpr(S.Context)) { 12821 // If the source integer is a constant, convert it to the target 12822 // floating point type. Issue a warning if the value changes 12823 // during the whole conversion. 12824 llvm::APFloat TargetFloatValue( 12825 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 12826 llvm::APFloat::opStatus ConversionStatus = 12827 TargetFloatValue.convertFromAPInt( 12828 *SourceInt, SourceBT->isSignedInteger(), 12829 llvm::APFloat::rmNearestTiesToEven); 12830 12831 if (ConversionStatus != llvm::APFloat::opOK) { 12832 SmallString<32> PrettySourceValue; 12833 SourceInt->toString(PrettySourceValue, 10); 12834 SmallString<32> PrettyTargetValue; 12835 TargetFloatValue.toString(PrettyTargetValue, TargetPrecision); 12836 12837 S.DiagRuntimeBehavior( 12838 E->getExprLoc(), E, 12839 S.PDiag(diag::warn_impcast_integer_float_precision_constant) 12840 << PrettySourceValue << PrettyTargetValue << E->getType() << T 12841 << E->getSourceRange() << clang::SourceRange(CC)); 12842 } 12843 } else { 12844 // Otherwise, the implicit conversion may lose precision. 12845 DiagnoseImpCast(S, E, T, CC, 12846 diag::warn_impcast_integer_float_precision); 12847 } 12848 } 12849 } 12850 12851 DiagnoseNullConversion(S, E, T, CC); 12852 12853 S.DiscardMisalignedMemberAddress(Target, E); 12854 12855 if (Target->isBooleanType()) 12856 DiagnoseIntInBoolContext(S, E); 12857 12858 if (!Source->isIntegerType() || !Target->isIntegerType()) 12859 return; 12860 12861 // TODO: remove this early return once the false positives for constant->bool 12862 // in templates, macros, etc, are reduced or removed. 12863 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 12864 return; 12865 12866 if (isObjCSignedCharBool(S, T) && !Source->isCharType() && 12867 !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) { 12868 return adornObjCBoolConversionDiagWithTernaryFixit( 12869 S, E, 12870 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool) 12871 << E->getType()); 12872 } 12873 12874 IntRange SourceTypeRange = 12875 IntRange::forTargetOfCanonicalType(S.Context, Source); 12876 IntRange LikelySourceRange = 12877 GetExprRange(S.Context, E, S.isConstantEvaluated(), /*Approximate*/ true); 12878 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 12879 12880 if (LikelySourceRange.Width > TargetRange.Width) { 12881 // If the source is a constant, use a default-on diagnostic. 12882 // TODO: this should happen for bitfield stores, too. 12883 Expr::EvalResult Result; 12884 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects, 12885 S.isConstantEvaluated())) { 12886 llvm::APSInt Value(32); 12887 Value = Result.Val.getInt(); 12888 12889 if (S.SourceMgr.isInSystemMacro(CC)) 12890 return; 12891 12892 std::string PrettySourceValue = toString(Value, 10); 12893 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 12894 12895 S.DiagRuntimeBehavior( 12896 E->getExprLoc(), E, 12897 S.PDiag(diag::warn_impcast_integer_precision_constant) 12898 << PrettySourceValue << PrettyTargetValue << E->getType() << T 12899 << E->getSourceRange() << SourceRange(CC)); 12900 return; 12901 } 12902 12903 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 12904 if (S.SourceMgr.isInSystemMacro(CC)) 12905 return; 12906 12907 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 12908 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 12909 /* pruneControlFlow */ true); 12910 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 12911 } 12912 12913 if (TargetRange.Width > SourceTypeRange.Width) { 12914 if (auto *UO = dyn_cast<UnaryOperator>(E)) 12915 if (UO->getOpcode() == UO_Minus) 12916 if (Source->isUnsignedIntegerType()) { 12917 if (Target->isUnsignedIntegerType()) 12918 return DiagnoseImpCast(S, E, T, CC, 12919 diag::warn_impcast_high_order_zero_bits); 12920 if (Target->isSignedIntegerType()) 12921 return DiagnoseImpCast(S, E, T, CC, 12922 diag::warn_impcast_nonnegative_result); 12923 } 12924 } 12925 12926 if (TargetRange.Width == LikelySourceRange.Width && 12927 !TargetRange.NonNegative && LikelySourceRange.NonNegative && 12928 Source->isSignedIntegerType()) { 12929 // Warn when doing a signed to signed conversion, warn if the positive 12930 // source value is exactly the width of the target type, which will 12931 // cause a negative value to be stored. 12932 12933 Expr::EvalResult Result; 12934 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) && 12935 !S.SourceMgr.isInSystemMacro(CC)) { 12936 llvm::APSInt Value = Result.Val.getInt(); 12937 if (isSameWidthConstantConversion(S, E, T, CC)) { 12938 std::string PrettySourceValue = toString(Value, 10); 12939 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 12940 12941 S.DiagRuntimeBehavior( 12942 E->getExprLoc(), E, 12943 S.PDiag(diag::warn_impcast_integer_precision_constant) 12944 << PrettySourceValue << PrettyTargetValue << E->getType() << T 12945 << E->getSourceRange() << SourceRange(CC)); 12946 return; 12947 } 12948 } 12949 12950 // Fall through for non-constants to give a sign conversion warning. 12951 } 12952 12953 if ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) || 12954 (!TargetRange.NonNegative && LikelySourceRange.NonNegative && 12955 LikelySourceRange.Width == TargetRange.Width)) { 12956 if (S.SourceMgr.isInSystemMacro(CC)) 12957 return; 12958 12959 unsigned DiagID = diag::warn_impcast_integer_sign; 12960 12961 // Traditionally, gcc has warned about this under -Wsign-compare. 12962 // We also want to warn about it in -Wconversion. 12963 // So if -Wconversion is off, use a completely identical diagnostic 12964 // in the sign-compare group. 12965 // The conditional-checking code will 12966 if (ICContext) { 12967 DiagID = diag::warn_impcast_integer_sign_conditional; 12968 *ICContext = true; 12969 } 12970 12971 return DiagnoseImpCast(S, E, T, CC, DiagID); 12972 } 12973 12974 // Diagnose conversions between different enumeration types. 12975 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 12976 // type, to give us better diagnostics. 12977 QualType SourceType = E->getType(); 12978 if (!S.getLangOpts().CPlusPlus) { 12979 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 12980 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 12981 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 12982 SourceType = S.Context.getTypeDeclType(Enum); 12983 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 12984 } 12985 } 12986 12987 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 12988 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 12989 if (SourceEnum->getDecl()->hasNameForLinkage() && 12990 TargetEnum->getDecl()->hasNameForLinkage() && 12991 SourceEnum != TargetEnum) { 12992 if (S.SourceMgr.isInSystemMacro(CC)) 12993 return; 12994 12995 return DiagnoseImpCast(S, E, SourceType, T, CC, 12996 diag::warn_impcast_different_enum_types); 12997 } 12998 } 12999 13000 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 13001 SourceLocation CC, QualType T); 13002 13003 static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 13004 SourceLocation CC, bool &ICContext) { 13005 E = E->IgnoreParenImpCasts(); 13006 13007 if (auto *CO = dyn_cast<AbstractConditionalOperator>(E)) 13008 return CheckConditionalOperator(S, CO, CC, T); 13009 13010 AnalyzeImplicitConversions(S, E, CC); 13011 if (E->getType() != T) 13012 return CheckImplicitConversion(S, E, T, CC, &ICContext); 13013 } 13014 13015 static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 13016 SourceLocation CC, QualType T) { 13017 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 13018 13019 Expr *TrueExpr = E->getTrueExpr(); 13020 if (auto *BCO = dyn_cast<BinaryConditionalOperator>(E)) 13021 TrueExpr = BCO->getCommon(); 13022 13023 bool Suspicious = false; 13024 CheckConditionalOperand(S, TrueExpr, T, CC, Suspicious); 13025 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 13026 13027 if (T->isBooleanType()) 13028 DiagnoseIntInBoolContext(S, E); 13029 13030 // If -Wconversion would have warned about either of the candidates 13031 // for a signedness conversion to the context type... 13032 if (!Suspicious) return; 13033 13034 // ...but it's currently ignored... 13035 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 13036 return; 13037 13038 // ...then check whether it would have warned about either of the 13039 // candidates for a signedness conversion to the condition type. 13040 if (E->getType() == T) return; 13041 13042 Suspicious = false; 13043 CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(), 13044 E->getType(), CC, &Suspicious); 13045 if (!Suspicious) 13046 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 13047 E->getType(), CC, &Suspicious); 13048 } 13049 13050 /// Check conversion of given expression to boolean. 13051 /// Input argument E is a logical expression. 13052 static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 13053 if (S.getLangOpts().Bool) 13054 return; 13055 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 13056 return; 13057 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 13058 } 13059 13060 namespace { 13061 struct AnalyzeImplicitConversionsWorkItem { 13062 Expr *E; 13063 SourceLocation CC; 13064 bool IsListInit; 13065 }; 13066 } 13067 13068 /// Data recursive variant of AnalyzeImplicitConversions. Subexpressions 13069 /// that should be visited are added to WorkList. 13070 static void AnalyzeImplicitConversions( 13071 Sema &S, AnalyzeImplicitConversionsWorkItem Item, 13072 llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) { 13073 Expr *OrigE = Item.E; 13074 SourceLocation CC = Item.CC; 13075 13076 QualType T = OrigE->getType(); 13077 Expr *E = OrigE->IgnoreParenImpCasts(); 13078 13079 // Propagate whether we are in a C++ list initialization expression. 13080 // If so, we do not issue warnings for implicit int-float conversion 13081 // precision loss, because C++11 narrowing already handles it. 13082 bool IsListInit = Item.IsListInit || 13083 (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus); 13084 13085 if (E->isTypeDependent() || E->isValueDependent()) 13086 return; 13087 13088 Expr *SourceExpr = E; 13089 // Examine, but don't traverse into the source expression of an 13090 // OpaqueValueExpr, since it may have multiple parents and we don't want to 13091 // emit duplicate diagnostics. Its fine to examine the form or attempt to 13092 // evaluate it in the context of checking the specific conversion to T though. 13093 if (auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 13094 if (auto *Src = OVE->getSourceExpr()) 13095 SourceExpr = Src; 13096 13097 if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr)) 13098 if (UO->getOpcode() == UO_Not && 13099 UO->getSubExpr()->isKnownToHaveBooleanValue()) 13100 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool) 13101 << OrigE->getSourceRange() << T->isBooleanType() 13102 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!"); 13103 13104 // For conditional operators, we analyze the arguments as if they 13105 // were being fed directly into the output. 13106 if (auto *CO = dyn_cast<AbstractConditionalOperator>(SourceExpr)) { 13107 CheckConditionalOperator(S, CO, CC, T); 13108 return; 13109 } 13110 13111 // Check implicit argument conversions for function calls. 13112 if (CallExpr *Call = dyn_cast<CallExpr>(SourceExpr)) 13113 CheckImplicitArgumentConversions(S, Call, CC); 13114 13115 // Go ahead and check any implicit conversions we might have skipped. 13116 // The non-canonical typecheck is just an optimization; 13117 // CheckImplicitConversion will filter out dead implicit conversions. 13118 if (SourceExpr->getType() != T) 13119 CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit); 13120 13121 // Now continue drilling into this expression. 13122 13123 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 13124 // The bound subexpressions in a PseudoObjectExpr are not reachable 13125 // as transitive children. 13126 // FIXME: Use a more uniform representation for this. 13127 for (auto *SE : POE->semantics()) 13128 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 13129 WorkList.push_back({OVE->getSourceExpr(), CC, IsListInit}); 13130 } 13131 13132 // Skip past explicit casts. 13133 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 13134 E = CE->getSubExpr()->IgnoreParenImpCasts(); 13135 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 13136 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 13137 WorkList.push_back({E, CC, IsListInit}); 13138 return; 13139 } 13140 13141 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 13142 // Do a somewhat different check with comparison operators. 13143 if (BO->isComparisonOp()) 13144 return AnalyzeComparison(S, BO); 13145 13146 // And with simple assignments. 13147 if (BO->getOpcode() == BO_Assign) 13148 return AnalyzeAssignment(S, BO); 13149 // And with compound assignments. 13150 if (BO->isAssignmentOp()) 13151 return AnalyzeCompoundAssignment(S, BO); 13152 } 13153 13154 // These break the otherwise-useful invariant below. Fortunately, 13155 // we don't really need to recurse into them, because any internal 13156 // expressions should have been analyzed already when they were 13157 // built into statements. 13158 if (isa<StmtExpr>(E)) return; 13159 13160 // Don't descend into unevaluated contexts. 13161 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 13162 13163 // Now just recurse over the expression's children. 13164 CC = E->getExprLoc(); 13165 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 13166 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 13167 for (Stmt *SubStmt : E->children()) { 13168 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 13169 if (!ChildExpr) 13170 continue; 13171 13172 if (IsLogicalAndOperator && 13173 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 13174 // Ignore checking string literals that are in logical and operators. 13175 // This is a common pattern for asserts. 13176 continue; 13177 WorkList.push_back({ChildExpr, CC, IsListInit}); 13178 } 13179 13180 if (BO && BO->isLogicalOp()) { 13181 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 13182 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 13183 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 13184 13185 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 13186 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 13187 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 13188 } 13189 13190 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 13191 if (U->getOpcode() == UO_LNot) { 13192 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 13193 } else if (U->getOpcode() != UO_AddrOf) { 13194 if (U->getSubExpr()->getType()->isAtomicType()) 13195 S.Diag(U->getSubExpr()->getBeginLoc(), 13196 diag::warn_atomic_implicit_seq_cst); 13197 } 13198 } 13199 } 13200 13201 /// AnalyzeImplicitConversions - Find and report any interesting 13202 /// implicit conversions in the given expression. There are a couple 13203 /// of competing diagnostics here, -Wconversion and -Wsign-compare. 13204 static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC, 13205 bool IsListInit/*= false*/) { 13206 llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList; 13207 WorkList.push_back({OrigE, CC, IsListInit}); 13208 while (!WorkList.empty()) 13209 AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList); 13210 } 13211 13212 /// Diagnose integer type and any valid implicit conversion to it. 13213 static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 13214 // Taking into account implicit conversions, 13215 // allow any integer. 13216 if (!E->getType()->isIntegerType()) { 13217 S.Diag(E->getBeginLoc(), 13218 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 13219 return true; 13220 } 13221 // Potentially emit standard warnings for implicit conversions if enabled 13222 // using -Wconversion. 13223 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 13224 return false; 13225 } 13226 13227 // Helper function for Sema::DiagnoseAlwaysNonNullPointer. 13228 // Returns true when emitting a warning about taking the address of a reference. 13229 static bool CheckForReference(Sema &SemaRef, const Expr *E, 13230 const PartialDiagnostic &PD) { 13231 E = E->IgnoreParenImpCasts(); 13232 13233 const FunctionDecl *FD = nullptr; 13234 13235 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 13236 if (!DRE->getDecl()->getType()->isReferenceType()) 13237 return false; 13238 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 13239 if (!M->getMemberDecl()->getType()->isReferenceType()) 13240 return false; 13241 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 13242 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 13243 return false; 13244 FD = Call->getDirectCallee(); 13245 } else { 13246 return false; 13247 } 13248 13249 SemaRef.Diag(E->getExprLoc(), PD); 13250 13251 // If possible, point to location of function. 13252 if (FD) { 13253 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 13254 } 13255 13256 return true; 13257 } 13258 13259 // Returns true if the SourceLocation is expanded from any macro body. 13260 // Returns false if the SourceLocation is invalid, is from not in a macro 13261 // expansion, or is from expanded from a top-level macro argument. 13262 static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 13263 if (Loc.isInvalid()) 13264 return false; 13265 13266 while (Loc.isMacroID()) { 13267 if (SM.isMacroBodyExpansion(Loc)) 13268 return true; 13269 Loc = SM.getImmediateMacroCallerLoc(Loc); 13270 } 13271 13272 return false; 13273 } 13274 13275 /// Diagnose pointers that are always non-null. 13276 /// \param E the expression containing the pointer 13277 /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 13278 /// compared to a null pointer 13279 /// \param IsEqual True when the comparison is equal to a null pointer 13280 /// \param Range Extra SourceRange to highlight in the diagnostic 13281 void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 13282 Expr::NullPointerConstantKind NullKind, 13283 bool IsEqual, SourceRange Range) { 13284 if (!E) 13285 return; 13286 13287 // Don't warn inside macros. 13288 if (E->getExprLoc().isMacroID()) { 13289 const SourceManager &SM = getSourceManager(); 13290 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 13291 IsInAnyMacroBody(SM, Range.getBegin())) 13292 return; 13293 } 13294 E = E->IgnoreImpCasts(); 13295 13296 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 13297 13298 if (isa<CXXThisExpr>(E)) { 13299 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 13300 : diag::warn_this_bool_conversion; 13301 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 13302 return; 13303 } 13304 13305 bool IsAddressOf = false; 13306 13307 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 13308 if (UO->getOpcode() != UO_AddrOf) 13309 return; 13310 IsAddressOf = true; 13311 E = UO->getSubExpr(); 13312 } 13313 13314 if (IsAddressOf) { 13315 unsigned DiagID = IsCompare 13316 ? diag::warn_address_of_reference_null_compare 13317 : diag::warn_address_of_reference_bool_conversion; 13318 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 13319 << IsEqual; 13320 if (CheckForReference(*this, E, PD)) { 13321 return; 13322 } 13323 } 13324 13325 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 13326 bool IsParam = isa<NonNullAttr>(NonnullAttr); 13327 std::string Str; 13328 llvm::raw_string_ostream S(Str); 13329 E->printPretty(S, nullptr, getPrintingPolicy()); 13330 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 13331 : diag::warn_cast_nonnull_to_bool; 13332 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 13333 << E->getSourceRange() << Range << IsEqual; 13334 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 13335 }; 13336 13337 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 13338 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 13339 if (auto *Callee = Call->getDirectCallee()) { 13340 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 13341 ComplainAboutNonnullParamOrCall(A); 13342 return; 13343 } 13344 } 13345 } 13346 13347 // Expect to find a single Decl. Skip anything more complicated. 13348 ValueDecl *D = nullptr; 13349 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 13350 D = R->getDecl(); 13351 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 13352 D = M->getMemberDecl(); 13353 } 13354 13355 // Weak Decls can be null. 13356 if (!D || D->isWeak()) 13357 return; 13358 13359 // Check for parameter decl with nonnull attribute 13360 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 13361 if (getCurFunction() && 13362 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 13363 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 13364 ComplainAboutNonnullParamOrCall(A); 13365 return; 13366 } 13367 13368 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 13369 // Skip function template not specialized yet. 13370 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 13371 return; 13372 auto ParamIter = llvm::find(FD->parameters(), PV); 13373 assert(ParamIter != FD->param_end()); 13374 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 13375 13376 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 13377 if (!NonNull->args_size()) { 13378 ComplainAboutNonnullParamOrCall(NonNull); 13379 return; 13380 } 13381 13382 for (const ParamIdx &ArgNo : NonNull->args()) { 13383 if (ArgNo.getASTIndex() == ParamNo) { 13384 ComplainAboutNonnullParamOrCall(NonNull); 13385 return; 13386 } 13387 } 13388 } 13389 } 13390 } 13391 } 13392 13393 QualType T = D->getType(); 13394 const bool IsArray = T->isArrayType(); 13395 const bool IsFunction = T->isFunctionType(); 13396 13397 // Address of function is used to silence the function warning. 13398 if (IsAddressOf && IsFunction) { 13399 return; 13400 } 13401 13402 // Found nothing. 13403 if (!IsAddressOf && !IsFunction && !IsArray) 13404 return; 13405 13406 // Pretty print the expression for the diagnostic. 13407 std::string Str; 13408 llvm::raw_string_ostream S(Str); 13409 E->printPretty(S, nullptr, getPrintingPolicy()); 13410 13411 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 13412 : diag::warn_impcast_pointer_to_bool; 13413 enum { 13414 AddressOf, 13415 FunctionPointer, 13416 ArrayPointer 13417 } DiagType; 13418 if (IsAddressOf) 13419 DiagType = AddressOf; 13420 else if (IsFunction) 13421 DiagType = FunctionPointer; 13422 else if (IsArray) 13423 DiagType = ArrayPointer; 13424 else 13425 llvm_unreachable("Could not determine diagnostic."); 13426 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 13427 << Range << IsEqual; 13428 13429 if (!IsFunction) 13430 return; 13431 13432 // Suggest '&' to silence the function warning. 13433 Diag(E->getExprLoc(), diag::note_function_warning_silence) 13434 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 13435 13436 // Check to see if '()' fixit should be emitted. 13437 QualType ReturnType; 13438 UnresolvedSet<4> NonTemplateOverloads; 13439 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 13440 if (ReturnType.isNull()) 13441 return; 13442 13443 if (IsCompare) { 13444 // There are two cases here. If there is null constant, the only suggest 13445 // for a pointer return type. If the null is 0, then suggest if the return 13446 // type is a pointer or an integer type. 13447 if (!ReturnType->isPointerType()) { 13448 if (NullKind == Expr::NPCK_ZeroExpression || 13449 NullKind == Expr::NPCK_ZeroLiteral) { 13450 if (!ReturnType->isIntegerType()) 13451 return; 13452 } else { 13453 return; 13454 } 13455 } 13456 } else { // !IsCompare 13457 // For function to bool, only suggest if the function pointer has bool 13458 // return type. 13459 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 13460 return; 13461 } 13462 Diag(E->getExprLoc(), diag::note_function_to_function_call) 13463 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 13464 } 13465 13466 /// Diagnoses "dangerous" implicit conversions within the given 13467 /// expression (which is a full expression). Implements -Wconversion 13468 /// and -Wsign-compare. 13469 /// 13470 /// \param CC the "context" location of the implicit conversion, i.e. 13471 /// the most location of the syntactic entity requiring the implicit 13472 /// conversion 13473 void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 13474 // Don't diagnose in unevaluated contexts. 13475 if (isUnevaluatedContext()) 13476 return; 13477 13478 // Don't diagnose for value- or type-dependent expressions. 13479 if (E->isTypeDependent() || E->isValueDependent()) 13480 return; 13481 13482 // Check for array bounds violations in cases where the check isn't triggered 13483 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 13484 // ArraySubscriptExpr is on the RHS of a variable initialization. 13485 CheckArrayAccess(E); 13486 13487 // This is not the right CC for (e.g.) a variable initialization. 13488 AnalyzeImplicitConversions(*this, E, CC); 13489 } 13490 13491 /// CheckBoolLikeConversion - Check conversion of given expression to boolean. 13492 /// Input argument E is a logical expression. 13493 void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 13494 ::CheckBoolLikeConversion(*this, E, CC); 13495 } 13496 13497 /// Diagnose when expression is an integer constant expression and its evaluation 13498 /// results in integer overflow 13499 void Sema::CheckForIntOverflow (Expr *E) { 13500 // Use a work list to deal with nested struct initializers. 13501 SmallVector<Expr *, 2> Exprs(1, E); 13502 13503 do { 13504 Expr *OriginalE = Exprs.pop_back_val(); 13505 Expr *E = OriginalE->IgnoreParenCasts(); 13506 13507 if (isa<BinaryOperator>(E)) { 13508 E->EvaluateForOverflow(Context); 13509 continue; 13510 } 13511 13512 if (auto InitList = dyn_cast<InitListExpr>(OriginalE)) 13513 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 13514 else if (isa<ObjCBoxedExpr>(OriginalE)) 13515 E->EvaluateForOverflow(Context); 13516 else if (auto Call = dyn_cast<CallExpr>(E)) 13517 Exprs.append(Call->arg_begin(), Call->arg_end()); 13518 else if (auto Message = dyn_cast<ObjCMessageExpr>(E)) 13519 Exprs.append(Message->arg_begin(), Message->arg_end()); 13520 } while (!Exprs.empty()); 13521 } 13522 13523 namespace { 13524 13525 /// Visitor for expressions which looks for unsequenced operations on the 13526 /// same object. 13527 class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> { 13528 using Base = ConstEvaluatedExprVisitor<SequenceChecker>; 13529 13530 /// A tree of sequenced regions within an expression. Two regions are 13531 /// unsequenced if one is an ancestor or a descendent of the other. When we 13532 /// finish processing an expression with sequencing, such as a comma 13533 /// expression, we fold its tree nodes into its parent, since they are 13534 /// unsequenced with respect to nodes we will visit later. 13535 class SequenceTree { 13536 struct Value { 13537 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 13538 unsigned Parent : 31; 13539 unsigned Merged : 1; 13540 }; 13541 SmallVector<Value, 8> Values; 13542 13543 public: 13544 /// A region within an expression which may be sequenced with respect 13545 /// to some other region. 13546 class Seq { 13547 friend class SequenceTree; 13548 13549 unsigned Index; 13550 13551 explicit Seq(unsigned N) : Index(N) {} 13552 13553 public: 13554 Seq() : Index(0) {} 13555 }; 13556 13557 SequenceTree() { Values.push_back(Value(0)); } 13558 Seq root() const { return Seq(0); } 13559 13560 /// Create a new sequence of operations, which is an unsequenced 13561 /// subset of \p Parent. This sequence of operations is sequenced with 13562 /// respect to other children of \p Parent. 13563 Seq allocate(Seq Parent) { 13564 Values.push_back(Value(Parent.Index)); 13565 return Seq(Values.size() - 1); 13566 } 13567 13568 /// Merge a sequence of operations into its parent. 13569 void merge(Seq S) { 13570 Values[S.Index].Merged = true; 13571 } 13572 13573 /// Determine whether two operations are unsequenced. This operation 13574 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 13575 /// should have been merged into its parent as appropriate. 13576 bool isUnsequenced(Seq Cur, Seq Old) { 13577 unsigned C = representative(Cur.Index); 13578 unsigned Target = representative(Old.Index); 13579 while (C >= Target) { 13580 if (C == Target) 13581 return true; 13582 C = Values[C].Parent; 13583 } 13584 return false; 13585 } 13586 13587 private: 13588 /// Pick a representative for a sequence. 13589 unsigned representative(unsigned K) { 13590 if (Values[K].Merged) 13591 // Perform path compression as we go. 13592 return Values[K].Parent = representative(Values[K].Parent); 13593 return K; 13594 } 13595 }; 13596 13597 /// An object for which we can track unsequenced uses. 13598 using Object = const NamedDecl *; 13599 13600 /// Different flavors of object usage which we track. We only track the 13601 /// least-sequenced usage of each kind. 13602 enum UsageKind { 13603 /// A read of an object. Multiple unsequenced reads are OK. 13604 UK_Use, 13605 13606 /// A modification of an object which is sequenced before the value 13607 /// computation of the expression, such as ++n in C++. 13608 UK_ModAsValue, 13609 13610 /// A modification of an object which is not sequenced before the value 13611 /// computation of the expression, such as n++. 13612 UK_ModAsSideEffect, 13613 13614 UK_Count = UK_ModAsSideEffect + 1 13615 }; 13616 13617 /// Bundle together a sequencing region and the expression corresponding 13618 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo. 13619 struct Usage { 13620 const Expr *UsageExpr; 13621 SequenceTree::Seq Seq; 13622 13623 Usage() : UsageExpr(nullptr), Seq() {} 13624 }; 13625 13626 struct UsageInfo { 13627 Usage Uses[UK_Count]; 13628 13629 /// Have we issued a diagnostic for this object already? 13630 bool Diagnosed; 13631 13632 UsageInfo() : Uses(), Diagnosed(false) {} 13633 }; 13634 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 13635 13636 Sema &SemaRef; 13637 13638 /// Sequenced regions within the expression. 13639 SequenceTree Tree; 13640 13641 /// Declaration modifications and references which we have seen. 13642 UsageInfoMap UsageMap; 13643 13644 /// The region we are currently within. 13645 SequenceTree::Seq Region; 13646 13647 /// Filled in with declarations which were modified as a side-effect 13648 /// (that is, post-increment operations). 13649 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 13650 13651 /// Expressions to check later. We defer checking these to reduce 13652 /// stack usage. 13653 SmallVectorImpl<const Expr *> &WorkList; 13654 13655 /// RAII object wrapping the visitation of a sequenced subexpression of an 13656 /// expression. At the end of this process, the side-effects of the evaluation 13657 /// become sequenced with respect to the value computation of the result, so 13658 /// we downgrade any UK_ModAsSideEffect within the evaluation to 13659 /// UK_ModAsValue. 13660 struct SequencedSubexpression { 13661 SequencedSubexpression(SequenceChecker &Self) 13662 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 13663 Self.ModAsSideEffect = &ModAsSideEffect; 13664 } 13665 13666 ~SequencedSubexpression() { 13667 for (const std::pair<Object, Usage> &M : llvm::reverse(ModAsSideEffect)) { 13668 // Add a new usage with usage kind UK_ModAsValue, and then restore 13669 // the previous usage with UK_ModAsSideEffect (thus clearing it if 13670 // the previous one was empty). 13671 UsageInfo &UI = Self.UsageMap[M.first]; 13672 auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect]; 13673 Self.addUsage(M.first, UI, SideEffectUsage.UsageExpr, UK_ModAsValue); 13674 SideEffectUsage = M.second; 13675 } 13676 Self.ModAsSideEffect = OldModAsSideEffect; 13677 } 13678 13679 SequenceChecker &Self; 13680 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 13681 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 13682 }; 13683 13684 /// RAII object wrapping the visitation of a subexpression which we might 13685 /// choose to evaluate as a constant. If any subexpression is evaluated and 13686 /// found to be non-constant, this allows us to suppress the evaluation of 13687 /// the outer expression. 13688 class EvaluationTracker { 13689 public: 13690 EvaluationTracker(SequenceChecker &Self) 13691 : Self(Self), Prev(Self.EvalTracker) { 13692 Self.EvalTracker = this; 13693 } 13694 13695 ~EvaluationTracker() { 13696 Self.EvalTracker = Prev; 13697 if (Prev) 13698 Prev->EvalOK &= EvalOK; 13699 } 13700 13701 bool evaluate(const Expr *E, bool &Result) { 13702 if (!EvalOK || E->isValueDependent()) 13703 return false; 13704 EvalOK = E->EvaluateAsBooleanCondition( 13705 Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated()); 13706 return EvalOK; 13707 } 13708 13709 private: 13710 SequenceChecker &Self; 13711 EvaluationTracker *Prev; 13712 bool EvalOK = true; 13713 } *EvalTracker = nullptr; 13714 13715 /// Find the object which is produced by the specified expression, 13716 /// if any. 13717 Object getObject(const Expr *E, bool Mod) const { 13718 E = E->IgnoreParenCasts(); 13719 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 13720 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 13721 return getObject(UO->getSubExpr(), Mod); 13722 } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 13723 if (BO->getOpcode() == BO_Comma) 13724 return getObject(BO->getRHS(), Mod); 13725 if (Mod && BO->isAssignmentOp()) 13726 return getObject(BO->getLHS(), Mod); 13727 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 13728 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 13729 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 13730 return ME->getMemberDecl(); 13731 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 13732 // FIXME: If this is a reference, map through to its value. 13733 return DRE->getDecl(); 13734 return nullptr; 13735 } 13736 13737 /// Note that an object \p O was modified or used by an expression 13738 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for 13739 /// the object \p O as obtained via the \p UsageMap. 13740 void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) { 13741 // Get the old usage for the given object and usage kind. 13742 Usage &U = UI.Uses[UK]; 13743 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) { 13744 // If we have a modification as side effect and are in a sequenced 13745 // subexpression, save the old Usage so that we can restore it later 13746 // in SequencedSubexpression::~SequencedSubexpression. 13747 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 13748 ModAsSideEffect->push_back(std::make_pair(O, U)); 13749 // Then record the new usage with the current sequencing region. 13750 U.UsageExpr = UsageExpr; 13751 U.Seq = Region; 13752 } 13753 } 13754 13755 /// Check whether a modification or use of an object \p O in an expression 13756 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is 13757 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap. 13758 /// \p IsModMod is true when we are checking for a mod-mod unsequenced 13759 /// usage and false we are checking for a mod-use unsequenced usage. 13760 void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, 13761 UsageKind OtherKind, bool IsModMod) { 13762 if (UI.Diagnosed) 13763 return; 13764 13765 const Usage &U = UI.Uses[OtherKind]; 13766 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) 13767 return; 13768 13769 const Expr *Mod = U.UsageExpr; 13770 const Expr *ModOrUse = UsageExpr; 13771 if (OtherKind == UK_Use) 13772 std::swap(Mod, ModOrUse); 13773 13774 SemaRef.DiagRuntimeBehavior( 13775 Mod->getExprLoc(), {Mod, ModOrUse}, 13776 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod 13777 : diag::warn_unsequenced_mod_use) 13778 << O << SourceRange(ModOrUse->getExprLoc())); 13779 UI.Diagnosed = true; 13780 } 13781 13782 // A note on note{Pre, Post}{Use, Mod}: 13783 // 13784 // (It helps to follow the algorithm with an expression such as 13785 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced 13786 // operations before C++17 and both are well-defined in C++17). 13787 // 13788 // When visiting a node which uses/modify an object we first call notePreUse 13789 // or notePreMod before visiting its sub-expression(s). At this point the 13790 // children of the current node have not yet been visited and so the eventual 13791 // uses/modifications resulting from the children of the current node have not 13792 // been recorded yet. 13793 // 13794 // We then visit the children of the current node. After that notePostUse or 13795 // notePostMod is called. These will 1) detect an unsequenced modification 13796 // as side effect (as in "k++ + k") and 2) add a new usage with the 13797 // appropriate usage kind. 13798 // 13799 // We also have to be careful that some operation sequences modification as 13800 // side effect as well (for example: || or ,). To account for this we wrap 13801 // the visitation of such a sub-expression (for example: the LHS of || or ,) 13802 // with SequencedSubexpression. SequencedSubexpression is an RAII object 13803 // which record usages which are modifications as side effect, and then 13804 // downgrade them (or more accurately restore the previous usage which was a 13805 // modification as side effect) when exiting the scope of the sequenced 13806 // subexpression. 13807 13808 void notePreUse(Object O, const Expr *UseExpr) { 13809 UsageInfo &UI = UsageMap[O]; 13810 // Uses conflict with other modifications. 13811 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false); 13812 } 13813 13814 void notePostUse(Object O, const Expr *UseExpr) { 13815 UsageInfo &UI = UsageMap[O]; 13816 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsSideEffect, 13817 /*IsModMod=*/false); 13818 addUsage(O, UI, UseExpr, /*UsageKind=*/UK_Use); 13819 } 13820 13821 void notePreMod(Object O, const Expr *ModExpr) { 13822 UsageInfo &UI = UsageMap[O]; 13823 // Modifications conflict with other modifications and with uses. 13824 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true); 13825 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false); 13826 } 13827 13828 void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) { 13829 UsageInfo &UI = UsageMap[O]; 13830 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsSideEffect, 13831 /*IsModMod=*/true); 13832 addUsage(O, UI, ModExpr, /*UsageKind=*/UK); 13833 } 13834 13835 public: 13836 SequenceChecker(Sema &S, const Expr *E, 13837 SmallVectorImpl<const Expr *> &WorkList) 13838 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 13839 Visit(E); 13840 // Silence a -Wunused-private-field since WorkList is now unused. 13841 // TODO: Evaluate if it can be used, and if not remove it. 13842 (void)this->WorkList; 13843 } 13844 13845 void VisitStmt(const Stmt *S) { 13846 // Skip all statements which aren't expressions for now. 13847 } 13848 13849 void VisitExpr(const Expr *E) { 13850 // By default, just recurse to evaluated subexpressions. 13851 Base::VisitStmt(E); 13852 } 13853 13854 void VisitCastExpr(const CastExpr *E) { 13855 Object O = Object(); 13856 if (E->getCastKind() == CK_LValueToRValue) 13857 O = getObject(E->getSubExpr(), false); 13858 13859 if (O) 13860 notePreUse(O, E); 13861 VisitExpr(E); 13862 if (O) 13863 notePostUse(O, E); 13864 } 13865 13866 void VisitSequencedExpressions(const Expr *SequencedBefore, 13867 const Expr *SequencedAfter) { 13868 SequenceTree::Seq BeforeRegion = Tree.allocate(Region); 13869 SequenceTree::Seq AfterRegion = Tree.allocate(Region); 13870 SequenceTree::Seq OldRegion = Region; 13871 13872 { 13873 SequencedSubexpression SeqBefore(*this); 13874 Region = BeforeRegion; 13875 Visit(SequencedBefore); 13876 } 13877 13878 Region = AfterRegion; 13879 Visit(SequencedAfter); 13880 13881 Region = OldRegion; 13882 13883 Tree.merge(BeforeRegion); 13884 Tree.merge(AfterRegion); 13885 } 13886 13887 void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) { 13888 // C++17 [expr.sub]p1: 13889 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The 13890 // expression E1 is sequenced before the expression E2. 13891 if (SemaRef.getLangOpts().CPlusPlus17) 13892 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS()); 13893 else { 13894 Visit(ASE->getLHS()); 13895 Visit(ASE->getRHS()); 13896 } 13897 } 13898 13899 void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 13900 void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 13901 void VisitBinPtrMem(const BinaryOperator *BO) { 13902 // C++17 [expr.mptr.oper]p4: 13903 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...] 13904 // the expression E1 is sequenced before the expression E2. 13905 if (SemaRef.getLangOpts().CPlusPlus17) 13906 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 13907 else { 13908 Visit(BO->getLHS()); 13909 Visit(BO->getRHS()); 13910 } 13911 } 13912 13913 void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); } 13914 void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); } 13915 void VisitBinShlShr(const BinaryOperator *BO) { 13916 // C++17 [expr.shift]p4: 13917 // The expression E1 is sequenced before the expression E2. 13918 if (SemaRef.getLangOpts().CPlusPlus17) 13919 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 13920 else { 13921 Visit(BO->getLHS()); 13922 Visit(BO->getRHS()); 13923 } 13924 } 13925 13926 void VisitBinComma(const BinaryOperator *BO) { 13927 // C++11 [expr.comma]p1: 13928 // Every value computation and side effect associated with the left 13929 // expression is sequenced before every value computation and side 13930 // effect associated with the right expression. 13931 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 13932 } 13933 13934 void VisitBinAssign(const BinaryOperator *BO) { 13935 SequenceTree::Seq RHSRegion; 13936 SequenceTree::Seq LHSRegion; 13937 if (SemaRef.getLangOpts().CPlusPlus17) { 13938 RHSRegion = Tree.allocate(Region); 13939 LHSRegion = Tree.allocate(Region); 13940 } else { 13941 RHSRegion = Region; 13942 LHSRegion = Region; 13943 } 13944 SequenceTree::Seq OldRegion = Region; 13945 13946 // C++11 [expr.ass]p1: 13947 // [...] the assignment is sequenced after the value computation 13948 // of the right and left operands, [...] 13949 // 13950 // so check it before inspecting the operands and update the 13951 // map afterwards. 13952 Object O = getObject(BO->getLHS(), /*Mod=*/true); 13953 if (O) 13954 notePreMod(O, BO); 13955 13956 if (SemaRef.getLangOpts().CPlusPlus17) { 13957 // C++17 [expr.ass]p1: 13958 // [...] The right operand is sequenced before the left operand. [...] 13959 { 13960 SequencedSubexpression SeqBefore(*this); 13961 Region = RHSRegion; 13962 Visit(BO->getRHS()); 13963 } 13964 13965 Region = LHSRegion; 13966 Visit(BO->getLHS()); 13967 13968 if (O && isa<CompoundAssignOperator>(BO)) 13969 notePostUse(O, BO); 13970 13971 } else { 13972 // C++11 does not specify any sequencing between the LHS and RHS. 13973 Region = LHSRegion; 13974 Visit(BO->getLHS()); 13975 13976 if (O && isa<CompoundAssignOperator>(BO)) 13977 notePostUse(O, BO); 13978 13979 Region = RHSRegion; 13980 Visit(BO->getRHS()); 13981 } 13982 13983 // C++11 [expr.ass]p1: 13984 // the assignment is sequenced [...] before the value computation of the 13985 // assignment expression. 13986 // C11 6.5.16/3 has no such rule. 13987 Region = OldRegion; 13988 if (O) 13989 notePostMod(O, BO, 13990 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 13991 : UK_ModAsSideEffect); 13992 if (SemaRef.getLangOpts().CPlusPlus17) { 13993 Tree.merge(RHSRegion); 13994 Tree.merge(LHSRegion); 13995 } 13996 } 13997 13998 void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) { 13999 VisitBinAssign(CAO); 14000 } 14001 14002 void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 14003 void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 14004 void VisitUnaryPreIncDec(const UnaryOperator *UO) { 14005 Object O = getObject(UO->getSubExpr(), true); 14006 if (!O) 14007 return VisitExpr(UO); 14008 14009 notePreMod(O, UO); 14010 Visit(UO->getSubExpr()); 14011 // C++11 [expr.pre.incr]p1: 14012 // the expression ++x is equivalent to x+=1 14013 notePostMod(O, UO, 14014 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 14015 : UK_ModAsSideEffect); 14016 } 14017 14018 void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 14019 void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 14020 void VisitUnaryPostIncDec(const UnaryOperator *UO) { 14021 Object O = getObject(UO->getSubExpr(), true); 14022 if (!O) 14023 return VisitExpr(UO); 14024 14025 notePreMod(O, UO); 14026 Visit(UO->getSubExpr()); 14027 notePostMod(O, UO, UK_ModAsSideEffect); 14028 } 14029 14030 void VisitBinLOr(const BinaryOperator *BO) { 14031 // C++11 [expr.log.or]p2: 14032 // If the second expression is evaluated, every value computation and 14033 // side effect associated with the first expression is sequenced before 14034 // every value computation and side effect associated with the 14035 // second expression. 14036 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 14037 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 14038 SequenceTree::Seq OldRegion = Region; 14039 14040 EvaluationTracker Eval(*this); 14041 { 14042 SequencedSubexpression Sequenced(*this); 14043 Region = LHSRegion; 14044 Visit(BO->getLHS()); 14045 } 14046 14047 // C++11 [expr.log.or]p1: 14048 // [...] the second operand is not evaluated if the first operand 14049 // evaluates to true. 14050 bool EvalResult = false; 14051 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 14052 bool ShouldVisitRHS = !EvalOK || (EvalOK && !EvalResult); 14053 if (ShouldVisitRHS) { 14054 Region = RHSRegion; 14055 Visit(BO->getRHS()); 14056 } 14057 14058 Region = OldRegion; 14059 Tree.merge(LHSRegion); 14060 Tree.merge(RHSRegion); 14061 } 14062 14063 void VisitBinLAnd(const BinaryOperator *BO) { 14064 // C++11 [expr.log.and]p2: 14065 // If the second expression is evaluated, every value computation and 14066 // side effect associated with the first expression is sequenced before 14067 // every value computation and side effect associated with the 14068 // second expression. 14069 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 14070 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 14071 SequenceTree::Seq OldRegion = Region; 14072 14073 EvaluationTracker Eval(*this); 14074 { 14075 SequencedSubexpression Sequenced(*this); 14076 Region = LHSRegion; 14077 Visit(BO->getLHS()); 14078 } 14079 14080 // C++11 [expr.log.and]p1: 14081 // [...] the second operand is not evaluated if the first operand is false. 14082 bool EvalResult = false; 14083 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 14084 bool ShouldVisitRHS = !EvalOK || (EvalOK && EvalResult); 14085 if (ShouldVisitRHS) { 14086 Region = RHSRegion; 14087 Visit(BO->getRHS()); 14088 } 14089 14090 Region = OldRegion; 14091 Tree.merge(LHSRegion); 14092 Tree.merge(RHSRegion); 14093 } 14094 14095 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) { 14096 // C++11 [expr.cond]p1: 14097 // [...] Every value computation and side effect associated with the first 14098 // expression is sequenced before every value computation and side effect 14099 // associated with the second or third expression. 14100 SequenceTree::Seq ConditionRegion = Tree.allocate(Region); 14101 14102 // No sequencing is specified between the true and false expression. 14103 // However since exactly one of both is going to be evaluated we can 14104 // consider them to be sequenced. This is needed to avoid warning on 14105 // something like "x ? y+= 1 : y += 2;" in the case where we will visit 14106 // both the true and false expressions because we can't evaluate x. 14107 // This will still allow us to detect an expression like (pre C++17) 14108 // "(x ? y += 1 : y += 2) = y". 14109 // 14110 // We don't wrap the visitation of the true and false expression with 14111 // SequencedSubexpression because we don't want to downgrade modifications 14112 // as side effect in the true and false expressions after the visition 14113 // is done. (for example in the expression "(x ? y++ : y++) + y" we should 14114 // not warn between the two "y++", but we should warn between the "y++" 14115 // and the "y". 14116 SequenceTree::Seq TrueRegion = Tree.allocate(Region); 14117 SequenceTree::Seq FalseRegion = Tree.allocate(Region); 14118 SequenceTree::Seq OldRegion = Region; 14119 14120 EvaluationTracker Eval(*this); 14121 { 14122 SequencedSubexpression Sequenced(*this); 14123 Region = ConditionRegion; 14124 Visit(CO->getCond()); 14125 } 14126 14127 // C++11 [expr.cond]p1: 14128 // [...] The first expression is contextually converted to bool (Clause 4). 14129 // It is evaluated and if it is true, the result of the conditional 14130 // expression is the value of the second expression, otherwise that of the 14131 // third expression. Only one of the second and third expressions is 14132 // evaluated. [...] 14133 bool EvalResult = false; 14134 bool EvalOK = Eval.evaluate(CO->getCond(), EvalResult); 14135 bool ShouldVisitTrueExpr = !EvalOK || (EvalOK && EvalResult); 14136 bool ShouldVisitFalseExpr = !EvalOK || (EvalOK && !EvalResult); 14137 if (ShouldVisitTrueExpr) { 14138 Region = TrueRegion; 14139 Visit(CO->getTrueExpr()); 14140 } 14141 if (ShouldVisitFalseExpr) { 14142 Region = FalseRegion; 14143 Visit(CO->getFalseExpr()); 14144 } 14145 14146 Region = OldRegion; 14147 Tree.merge(ConditionRegion); 14148 Tree.merge(TrueRegion); 14149 Tree.merge(FalseRegion); 14150 } 14151 14152 void VisitCallExpr(const CallExpr *CE) { 14153 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 14154 14155 if (CE->isUnevaluatedBuiltinCall(Context)) 14156 return; 14157 14158 // C++11 [intro.execution]p15: 14159 // When calling a function [...], every value computation and side effect 14160 // associated with any argument expression, or with the postfix expression 14161 // designating the called function, is sequenced before execution of every 14162 // expression or statement in the body of the function [and thus before 14163 // the value computation of its result]. 14164 SequencedSubexpression Sequenced(*this); 14165 SemaRef.runWithSufficientStackSpace(CE->getExprLoc(), [&] { 14166 // C++17 [expr.call]p5 14167 // The postfix-expression is sequenced before each expression in the 14168 // expression-list and any default argument. [...] 14169 SequenceTree::Seq CalleeRegion; 14170 SequenceTree::Seq OtherRegion; 14171 if (SemaRef.getLangOpts().CPlusPlus17) { 14172 CalleeRegion = Tree.allocate(Region); 14173 OtherRegion = Tree.allocate(Region); 14174 } else { 14175 CalleeRegion = Region; 14176 OtherRegion = Region; 14177 } 14178 SequenceTree::Seq OldRegion = Region; 14179 14180 // Visit the callee expression first. 14181 Region = CalleeRegion; 14182 if (SemaRef.getLangOpts().CPlusPlus17) { 14183 SequencedSubexpression Sequenced(*this); 14184 Visit(CE->getCallee()); 14185 } else { 14186 Visit(CE->getCallee()); 14187 } 14188 14189 // Then visit the argument expressions. 14190 Region = OtherRegion; 14191 for (const Expr *Argument : CE->arguments()) 14192 Visit(Argument); 14193 14194 Region = OldRegion; 14195 if (SemaRef.getLangOpts().CPlusPlus17) { 14196 Tree.merge(CalleeRegion); 14197 Tree.merge(OtherRegion); 14198 } 14199 }); 14200 } 14201 14202 void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) { 14203 // C++17 [over.match.oper]p2: 14204 // [...] the operator notation is first transformed to the equivalent 14205 // function-call notation as summarized in Table 12 (where @ denotes one 14206 // of the operators covered in the specified subclause). However, the 14207 // operands are sequenced in the order prescribed for the built-in 14208 // operator (Clause 8). 14209 // 14210 // From the above only overloaded binary operators and overloaded call 14211 // operators have sequencing rules in C++17 that we need to handle 14212 // separately. 14213 if (!SemaRef.getLangOpts().CPlusPlus17 || 14214 (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call)) 14215 return VisitCallExpr(CXXOCE); 14216 14217 enum { 14218 NoSequencing, 14219 LHSBeforeRHS, 14220 RHSBeforeLHS, 14221 LHSBeforeRest 14222 } SequencingKind; 14223 switch (CXXOCE->getOperator()) { 14224 case OO_Equal: 14225 case OO_PlusEqual: 14226 case OO_MinusEqual: 14227 case OO_StarEqual: 14228 case OO_SlashEqual: 14229 case OO_PercentEqual: 14230 case OO_CaretEqual: 14231 case OO_AmpEqual: 14232 case OO_PipeEqual: 14233 case OO_LessLessEqual: 14234 case OO_GreaterGreaterEqual: 14235 SequencingKind = RHSBeforeLHS; 14236 break; 14237 14238 case OO_LessLess: 14239 case OO_GreaterGreater: 14240 case OO_AmpAmp: 14241 case OO_PipePipe: 14242 case OO_Comma: 14243 case OO_ArrowStar: 14244 case OO_Subscript: 14245 SequencingKind = LHSBeforeRHS; 14246 break; 14247 14248 case OO_Call: 14249 SequencingKind = LHSBeforeRest; 14250 break; 14251 14252 default: 14253 SequencingKind = NoSequencing; 14254 break; 14255 } 14256 14257 if (SequencingKind == NoSequencing) 14258 return VisitCallExpr(CXXOCE); 14259 14260 // This is a call, so all subexpressions are sequenced before the result. 14261 SequencedSubexpression Sequenced(*this); 14262 14263 SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] { 14264 assert(SemaRef.getLangOpts().CPlusPlus17 && 14265 "Should only get there with C++17 and above!"); 14266 assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) && 14267 "Should only get there with an overloaded binary operator" 14268 " or an overloaded call operator!"); 14269 14270 if (SequencingKind == LHSBeforeRest) { 14271 assert(CXXOCE->getOperator() == OO_Call && 14272 "We should only have an overloaded call operator here!"); 14273 14274 // This is very similar to VisitCallExpr, except that we only have the 14275 // C++17 case. The postfix-expression is the first argument of the 14276 // CXXOperatorCallExpr. The expressions in the expression-list, if any, 14277 // are in the following arguments. 14278 // 14279 // Note that we intentionally do not visit the callee expression since 14280 // it is just a decayed reference to a function. 14281 SequenceTree::Seq PostfixExprRegion = Tree.allocate(Region); 14282 SequenceTree::Seq ArgsRegion = Tree.allocate(Region); 14283 SequenceTree::Seq OldRegion = Region; 14284 14285 assert(CXXOCE->getNumArgs() >= 1 && 14286 "An overloaded call operator must have at least one argument" 14287 " for the postfix-expression!"); 14288 const Expr *PostfixExpr = CXXOCE->getArgs()[0]; 14289 llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1, 14290 CXXOCE->getNumArgs() - 1); 14291 14292 // Visit the postfix-expression first. 14293 { 14294 Region = PostfixExprRegion; 14295 SequencedSubexpression Sequenced(*this); 14296 Visit(PostfixExpr); 14297 } 14298 14299 // Then visit the argument expressions. 14300 Region = ArgsRegion; 14301 for (const Expr *Arg : Args) 14302 Visit(Arg); 14303 14304 Region = OldRegion; 14305 Tree.merge(PostfixExprRegion); 14306 Tree.merge(ArgsRegion); 14307 } else { 14308 assert(CXXOCE->getNumArgs() == 2 && 14309 "Should only have two arguments here!"); 14310 assert((SequencingKind == LHSBeforeRHS || 14311 SequencingKind == RHSBeforeLHS) && 14312 "Unexpected sequencing kind!"); 14313 14314 // We do not visit the callee expression since it is just a decayed 14315 // reference to a function. 14316 const Expr *E1 = CXXOCE->getArg(0); 14317 const Expr *E2 = CXXOCE->getArg(1); 14318 if (SequencingKind == RHSBeforeLHS) 14319 std::swap(E1, E2); 14320 14321 return VisitSequencedExpressions(E1, E2); 14322 } 14323 }); 14324 } 14325 14326 void VisitCXXConstructExpr(const CXXConstructExpr *CCE) { 14327 // This is a call, so all subexpressions are sequenced before the result. 14328 SequencedSubexpression Sequenced(*this); 14329 14330 if (!CCE->isListInitialization()) 14331 return VisitExpr(CCE); 14332 14333 // In C++11, list initializations are sequenced. 14334 SmallVector<SequenceTree::Seq, 32> Elts; 14335 SequenceTree::Seq Parent = Region; 14336 for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(), 14337 E = CCE->arg_end(); 14338 I != E; ++I) { 14339 Region = Tree.allocate(Parent); 14340 Elts.push_back(Region); 14341 Visit(*I); 14342 } 14343 14344 // Forget that the initializers are sequenced. 14345 Region = Parent; 14346 for (unsigned I = 0; I < Elts.size(); ++I) 14347 Tree.merge(Elts[I]); 14348 } 14349 14350 void VisitInitListExpr(const InitListExpr *ILE) { 14351 if (!SemaRef.getLangOpts().CPlusPlus11) 14352 return VisitExpr(ILE); 14353 14354 // In C++11, list initializations are sequenced. 14355 SmallVector<SequenceTree::Seq, 32> Elts; 14356 SequenceTree::Seq Parent = Region; 14357 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 14358 const Expr *E = ILE->getInit(I); 14359 if (!E) 14360 continue; 14361 Region = Tree.allocate(Parent); 14362 Elts.push_back(Region); 14363 Visit(E); 14364 } 14365 14366 // Forget that the initializers are sequenced. 14367 Region = Parent; 14368 for (unsigned I = 0; I < Elts.size(); ++I) 14369 Tree.merge(Elts[I]); 14370 } 14371 }; 14372 14373 } // namespace 14374 14375 void Sema::CheckUnsequencedOperations(const Expr *E) { 14376 SmallVector<const Expr *, 8> WorkList; 14377 WorkList.push_back(E); 14378 while (!WorkList.empty()) { 14379 const Expr *Item = WorkList.pop_back_val(); 14380 SequenceChecker(*this, Item, WorkList); 14381 } 14382 } 14383 14384 void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 14385 bool IsConstexpr) { 14386 llvm::SaveAndRestore<bool> ConstantContext( 14387 isConstantEvaluatedOverride, IsConstexpr || isa<ConstantExpr>(E)); 14388 CheckImplicitConversions(E, CheckLoc); 14389 if (!E->isInstantiationDependent()) 14390 CheckUnsequencedOperations(E); 14391 if (!IsConstexpr && !E->isValueDependent()) 14392 CheckForIntOverflow(E); 14393 DiagnoseMisalignedMembers(); 14394 } 14395 14396 void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 14397 FieldDecl *BitField, 14398 Expr *Init) { 14399 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 14400 } 14401 14402 static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 14403 SourceLocation Loc) { 14404 if (!PType->isVariablyModifiedType()) 14405 return; 14406 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 14407 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 14408 return; 14409 } 14410 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 14411 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 14412 return; 14413 } 14414 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 14415 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 14416 return; 14417 } 14418 14419 const ArrayType *AT = S.Context.getAsArrayType(PType); 14420 if (!AT) 14421 return; 14422 14423 if (AT->getSizeModifier() != ArrayType::Star) { 14424 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 14425 return; 14426 } 14427 14428 S.Diag(Loc, diag::err_array_star_in_function_definition); 14429 } 14430 14431 /// CheckParmsForFunctionDef - Check that the parameters of the given 14432 /// function are appropriate for the definition of a function. This 14433 /// takes care of any checks that cannot be performed on the 14434 /// declaration itself, e.g., that the types of each of the function 14435 /// parameters are complete. 14436 bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 14437 bool CheckParameterNames) { 14438 bool HasInvalidParm = false; 14439 for (ParmVarDecl *Param : Parameters) { 14440 // C99 6.7.5.3p4: the parameters in a parameter type list in a 14441 // function declarator that is part of a function definition of 14442 // that function shall not have incomplete type. 14443 // 14444 // This is also C++ [dcl.fct]p6. 14445 if (!Param->isInvalidDecl() && 14446 RequireCompleteType(Param->getLocation(), Param->getType(), 14447 diag::err_typecheck_decl_incomplete_type)) { 14448 Param->setInvalidDecl(); 14449 HasInvalidParm = true; 14450 } 14451 14452 // C99 6.9.1p5: If the declarator includes a parameter type list, the 14453 // declaration of each parameter shall include an identifier. 14454 if (CheckParameterNames && Param->getIdentifier() == nullptr && 14455 !Param->isImplicit() && !getLangOpts().CPlusPlus) { 14456 // Diagnose this as an extension in C17 and earlier. 14457 if (!getLangOpts().C2x) 14458 Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x); 14459 } 14460 14461 // C99 6.7.5.3p12: 14462 // If the function declarator is not part of a definition of that 14463 // function, parameters may have incomplete type and may use the [*] 14464 // notation in their sequences of declarator specifiers to specify 14465 // variable length array types. 14466 QualType PType = Param->getOriginalType(); 14467 // FIXME: This diagnostic should point the '[*]' if source-location 14468 // information is added for it. 14469 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 14470 14471 // If the parameter is a c++ class type and it has to be destructed in the 14472 // callee function, declare the destructor so that it can be called by the 14473 // callee function. Do not perform any direct access check on the dtor here. 14474 if (!Param->isInvalidDecl()) { 14475 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 14476 if (!ClassDecl->isInvalidDecl() && 14477 !ClassDecl->hasIrrelevantDestructor() && 14478 !ClassDecl->isDependentContext() && 14479 ClassDecl->isParamDestroyedInCallee()) { 14480 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 14481 MarkFunctionReferenced(Param->getLocation(), Destructor); 14482 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 14483 } 14484 } 14485 } 14486 14487 // Parameters with the pass_object_size attribute only need to be marked 14488 // constant at function definitions. Because we lack information about 14489 // whether we're on a declaration or definition when we're instantiating the 14490 // attribute, we need to check for constness here. 14491 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 14492 if (!Param->getType().isConstQualified()) 14493 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 14494 << Attr->getSpelling() << 1; 14495 14496 // Check for parameter names shadowing fields from the class. 14497 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) { 14498 // The owning context for the parameter should be the function, but we 14499 // want to see if this function's declaration context is a record. 14500 DeclContext *DC = Param->getDeclContext(); 14501 if (DC && DC->isFunctionOrMethod()) { 14502 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent())) 14503 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(), 14504 RD, /*DeclIsField*/ false); 14505 } 14506 } 14507 } 14508 14509 return HasInvalidParm; 14510 } 14511 14512 Optional<std::pair<CharUnits, CharUnits>> 14513 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx); 14514 14515 /// Compute the alignment and offset of the base class object given the 14516 /// derived-to-base cast expression and the alignment and offset of the derived 14517 /// class object. 14518 static std::pair<CharUnits, CharUnits> 14519 getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType, 14520 CharUnits BaseAlignment, CharUnits Offset, 14521 ASTContext &Ctx) { 14522 for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE; 14523 ++PathI) { 14524 const CXXBaseSpecifier *Base = *PathI; 14525 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl(); 14526 if (Base->isVirtual()) { 14527 // The complete object may have a lower alignment than the non-virtual 14528 // alignment of the base, in which case the base may be misaligned. Choose 14529 // the smaller of the non-virtual alignment and BaseAlignment, which is a 14530 // conservative lower bound of the complete object alignment. 14531 CharUnits NonVirtualAlignment = 14532 Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment(); 14533 BaseAlignment = std::min(BaseAlignment, NonVirtualAlignment); 14534 Offset = CharUnits::Zero(); 14535 } else { 14536 const ASTRecordLayout &RL = 14537 Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl()); 14538 Offset += RL.getBaseClassOffset(BaseDecl); 14539 } 14540 DerivedType = Base->getType(); 14541 } 14542 14543 return std::make_pair(BaseAlignment, Offset); 14544 } 14545 14546 /// Compute the alignment and offset of a binary additive operator. 14547 static Optional<std::pair<CharUnits, CharUnits>> 14548 getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE, 14549 bool IsSub, ASTContext &Ctx) { 14550 QualType PointeeType = PtrE->getType()->getPointeeType(); 14551 14552 if (!PointeeType->isConstantSizeType()) 14553 return llvm::None; 14554 14555 auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx); 14556 14557 if (!P) 14558 return llvm::None; 14559 14560 CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType); 14561 if (Optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) { 14562 CharUnits Offset = EltSize * IdxRes->getExtValue(); 14563 if (IsSub) 14564 Offset = -Offset; 14565 return std::make_pair(P->first, P->second + Offset); 14566 } 14567 14568 // If the integer expression isn't a constant expression, compute the lower 14569 // bound of the alignment using the alignment and offset of the pointer 14570 // expression and the element size. 14571 return std::make_pair( 14572 P->first.alignmentAtOffset(P->second).alignmentAtOffset(EltSize), 14573 CharUnits::Zero()); 14574 } 14575 14576 /// This helper function takes an lvalue expression and returns the alignment of 14577 /// a VarDecl and a constant offset from the VarDecl. 14578 Optional<std::pair<CharUnits, CharUnits>> 14579 static getBaseAlignmentAndOffsetFromLValue(const Expr *E, ASTContext &Ctx) { 14580 E = E->IgnoreParens(); 14581 switch (E->getStmtClass()) { 14582 default: 14583 break; 14584 case Stmt::CStyleCastExprClass: 14585 case Stmt::CXXStaticCastExprClass: 14586 case Stmt::ImplicitCastExprClass: { 14587 auto *CE = cast<CastExpr>(E); 14588 const Expr *From = CE->getSubExpr(); 14589 switch (CE->getCastKind()) { 14590 default: 14591 break; 14592 case CK_NoOp: 14593 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 14594 case CK_UncheckedDerivedToBase: 14595 case CK_DerivedToBase: { 14596 auto P = getBaseAlignmentAndOffsetFromLValue(From, Ctx); 14597 if (!P) 14598 break; 14599 return getDerivedToBaseAlignmentAndOffset(CE, From->getType(), P->first, 14600 P->second, Ctx); 14601 } 14602 } 14603 break; 14604 } 14605 case Stmt::ArraySubscriptExprClass: { 14606 auto *ASE = cast<ArraySubscriptExpr>(E); 14607 return getAlignmentAndOffsetFromBinAddOrSub(ASE->getBase(), ASE->getIdx(), 14608 false, Ctx); 14609 } 14610 case Stmt::DeclRefExprClass: { 14611 if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) { 14612 // FIXME: If VD is captured by copy or is an escaping __block variable, 14613 // use the alignment of VD's type. 14614 if (!VD->getType()->isReferenceType()) 14615 return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero()); 14616 if (VD->hasInit()) 14617 return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx); 14618 } 14619 break; 14620 } 14621 case Stmt::MemberExprClass: { 14622 auto *ME = cast<MemberExpr>(E); 14623 auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); 14624 if (!FD || FD->getType()->isReferenceType() || 14625 FD->getParent()->isInvalidDecl()) 14626 break; 14627 Optional<std::pair<CharUnits, CharUnits>> P; 14628 if (ME->isArrow()) 14629 P = getBaseAlignmentAndOffsetFromPtr(ME->getBase(), Ctx); 14630 else 14631 P = getBaseAlignmentAndOffsetFromLValue(ME->getBase(), Ctx); 14632 if (!P) 14633 break; 14634 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent()); 14635 uint64_t Offset = Layout.getFieldOffset(FD->getFieldIndex()); 14636 return std::make_pair(P->first, 14637 P->second + CharUnits::fromQuantity(Offset)); 14638 } 14639 case Stmt::UnaryOperatorClass: { 14640 auto *UO = cast<UnaryOperator>(E); 14641 switch (UO->getOpcode()) { 14642 default: 14643 break; 14644 case UO_Deref: 14645 return getBaseAlignmentAndOffsetFromPtr(UO->getSubExpr(), Ctx); 14646 } 14647 break; 14648 } 14649 case Stmt::BinaryOperatorClass: { 14650 auto *BO = cast<BinaryOperator>(E); 14651 auto Opcode = BO->getOpcode(); 14652 switch (Opcode) { 14653 default: 14654 break; 14655 case BO_Comma: 14656 return getBaseAlignmentAndOffsetFromLValue(BO->getRHS(), Ctx); 14657 } 14658 break; 14659 } 14660 } 14661 return llvm::None; 14662 } 14663 14664 /// This helper function takes a pointer expression and returns the alignment of 14665 /// a VarDecl and a constant offset from the VarDecl. 14666 Optional<std::pair<CharUnits, CharUnits>> 14667 static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx) { 14668 E = E->IgnoreParens(); 14669 switch (E->getStmtClass()) { 14670 default: 14671 break; 14672 case Stmt::CStyleCastExprClass: 14673 case Stmt::CXXStaticCastExprClass: 14674 case Stmt::ImplicitCastExprClass: { 14675 auto *CE = cast<CastExpr>(E); 14676 const Expr *From = CE->getSubExpr(); 14677 switch (CE->getCastKind()) { 14678 default: 14679 break; 14680 case CK_NoOp: 14681 return getBaseAlignmentAndOffsetFromPtr(From, Ctx); 14682 case CK_ArrayToPointerDecay: 14683 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 14684 case CK_UncheckedDerivedToBase: 14685 case CK_DerivedToBase: { 14686 auto P = getBaseAlignmentAndOffsetFromPtr(From, Ctx); 14687 if (!P) 14688 break; 14689 return getDerivedToBaseAlignmentAndOffset( 14690 CE, From->getType()->getPointeeType(), P->first, P->second, Ctx); 14691 } 14692 } 14693 break; 14694 } 14695 case Stmt::CXXThisExprClass: { 14696 auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl(); 14697 CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment(); 14698 return std::make_pair(Alignment, CharUnits::Zero()); 14699 } 14700 case Stmt::UnaryOperatorClass: { 14701 auto *UO = cast<UnaryOperator>(E); 14702 if (UO->getOpcode() == UO_AddrOf) 14703 return getBaseAlignmentAndOffsetFromLValue(UO->getSubExpr(), Ctx); 14704 break; 14705 } 14706 case Stmt::BinaryOperatorClass: { 14707 auto *BO = cast<BinaryOperator>(E); 14708 auto Opcode = BO->getOpcode(); 14709 switch (Opcode) { 14710 default: 14711 break; 14712 case BO_Add: 14713 case BO_Sub: { 14714 const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS(); 14715 if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType()) 14716 std::swap(LHS, RHS); 14717 return getAlignmentAndOffsetFromBinAddOrSub(LHS, RHS, Opcode == BO_Sub, 14718 Ctx); 14719 } 14720 case BO_Comma: 14721 return getBaseAlignmentAndOffsetFromPtr(BO->getRHS(), Ctx); 14722 } 14723 break; 14724 } 14725 } 14726 return llvm::None; 14727 } 14728 14729 static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) { 14730 // See if we can compute the alignment of a VarDecl and an offset from it. 14731 Optional<std::pair<CharUnits, CharUnits>> P = 14732 getBaseAlignmentAndOffsetFromPtr(E, S.Context); 14733 14734 if (P) 14735 return P->first.alignmentAtOffset(P->second); 14736 14737 // If that failed, return the type's alignment. 14738 return S.Context.getTypeAlignInChars(E->getType()->getPointeeType()); 14739 } 14740 14741 /// CheckCastAlign - Implements -Wcast-align, which warns when a 14742 /// pointer cast increases the alignment requirements. 14743 void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 14744 // This is actually a lot of work to potentially be doing on every 14745 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 14746 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 14747 return; 14748 14749 // Ignore dependent types. 14750 if (T->isDependentType() || Op->getType()->isDependentType()) 14751 return; 14752 14753 // Require that the destination be a pointer type. 14754 const PointerType *DestPtr = T->getAs<PointerType>(); 14755 if (!DestPtr) return; 14756 14757 // If the destination has alignment 1, we're done. 14758 QualType DestPointee = DestPtr->getPointeeType(); 14759 if (DestPointee->isIncompleteType()) return; 14760 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 14761 if (DestAlign.isOne()) return; 14762 14763 // Require that the source be a pointer type. 14764 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 14765 if (!SrcPtr) return; 14766 QualType SrcPointee = SrcPtr->getPointeeType(); 14767 14768 // Explicitly allow casts from cv void*. We already implicitly 14769 // allowed casts to cv void*, since they have alignment 1. 14770 // Also allow casts involving incomplete types, which implicitly 14771 // includes 'void'. 14772 if (SrcPointee->isIncompleteType()) return; 14773 14774 CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this); 14775 14776 if (SrcAlign >= DestAlign) return; 14777 14778 Diag(TRange.getBegin(), diag::warn_cast_align) 14779 << Op->getType() << T 14780 << static_cast<unsigned>(SrcAlign.getQuantity()) 14781 << static_cast<unsigned>(DestAlign.getQuantity()) 14782 << TRange << Op->getSourceRange(); 14783 } 14784 14785 /// Check whether this array fits the idiom of a size-one tail padded 14786 /// array member of a struct. 14787 /// 14788 /// We avoid emitting out-of-bounds access warnings for such arrays as they are 14789 /// commonly used to emulate flexible arrays in C89 code. 14790 static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size, 14791 const NamedDecl *ND) { 14792 if (Size != 1 || !ND) return false; 14793 14794 const FieldDecl *FD = dyn_cast<FieldDecl>(ND); 14795 if (!FD) return false; 14796 14797 // Don't consider sizes resulting from macro expansions or template argument 14798 // substitution to form C89 tail-padded arrays. 14799 14800 TypeSourceInfo *TInfo = FD->getTypeSourceInfo(); 14801 while (TInfo) { 14802 TypeLoc TL = TInfo->getTypeLoc(); 14803 // Look through typedefs. 14804 if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) { 14805 const TypedefNameDecl *TDL = TTL.getTypedefNameDecl(); 14806 TInfo = TDL->getTypeSourceInfo(); 14807 continue; 14808 } 14809 if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) { 14810 const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr()); 14811 if (!SizeExpr || SizeExpr->getExprLoc().isMacroID()) 14812 return false; 14813 } 14814 break; 14815 } 14816 14817 const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext()); 14818 if (!RD) return false; 14819 if (RD->isUnion()) return false; 14820 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 14821 if (!CRD->isStandardLayout()) return false; 14822 } 14823 14824 // See if this is the last field decl in the record. 14825 const Decl *D = FD; 14826 while ((D = D->getNextDeclInContext())) 14827 if (isa<FieldDecl>(D)) 14828 return false; 14829 return true; 14830 } 14831 14832 void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 14833 const ArraySubscriptExpr *ASE, 14834 bool AllowOnePastEnd, bool IndexNegated) { 14835 // Already diagnosed by the constant evaluator. 14836 if (isConstantEvaluated()) 14837 return; 14838 14839 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 14840 if (IndexExpr->isValueDependent()) 14841 return; 14842 14843 const Type *EffectiveType = 14844 BaseExpr->getType()->getPointeeOrArrayElementType(); 14845 BaseExpr = BaseExpr->IgnoreParenCasts(); 14846 const ConstantArrayType *ArrayTy = 14847 Context.getAsConstantArrayType(BaseExpr->getType()); 14848 14849 const Type *BaseType = 14850 ArrayTy == nullptr ? nullptr : ArrayTy->getElementType().getTypePtr(); 14851 bool IsUnboundedArray = (BaseType == nullptr); 14852 if (EffectiveType->isDependentType() || 14853 (!IsUnboundedArray && BaseType->isDependentType())) 14854 return; 14855 14856 Expr::EvalResult Result; 14857 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) 14858 return; 14859 14860 llvm::APSInt index = Result.Val.getInt(); 14861 if (IndexNegated) { 14862 index.setIsUnsigned(false); 14863 index = -index; 14864 } 14865 14866 const NamedDecl *ND = nullptr; 14867 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 14868 ND = DRE->getDecl(); 14869 if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) 14870 ND = ME->getMemberDecl(); 14871 14872 if (IsUnboundedArray) { 14873 if (index.isUnsigned() || !index.isNegative()) { 14874 const auto &ASTC = getASTContext(); 14875 unsigned AddrBits = 14876 ASTC.getTargetInfo().getPointerWidth(ASTC.getTargetAddressSpace( 14877 EffectiveType->getCanonicalTypeInternal())); 14878 if (index.getBitWidth() < AddrBits) 14879 index = index.zext(AddrBits); 14880 Optional<CharUnits> ElemCharUnits = 14881 ASTC.getTypeSizeInCharsIfKnown(EffectiveType); 14882 // PR50741 - If EffectiveType has unknown size (e.g., if it's a void 14883 // pointer) bounds-checking isn't meaningful. 14884 if (!ElemCharUnits) 14885 return; 14886 llvm::APInt ElemBytes(index.getBitWidth(), ElemCharUnits->getQuantity()); 14887 // If index has more active bits than address space, we already know 14888 // we have a bounds violation to warn about. Otherwise, compute 14889 // address of (index + 1)th element, and warn about bounds violation 14890 // only if that address exceeds address space. 14891 if (index.getActiveBits() <= AddrBits) { 14892 bool Overflow; 14893 llvm::APInt Product(index); 14894 Product += 1; 14895 Product = Product.umul_ov(ElemBytes, Overflow); 14896 if (!Overflow && Product.getActiveBits() <= AddrBits) 14897 return; 14898 } 14899 14900 // Need to compute max possible elements in address space, since that 14901 // is included in diag message. 14902 llvm::APInt MaxElems = llvm::APInt::getMaxValue(AddrBits); 14903 MaxElems = MaxElems.zext(std::max(AddrBits + 1, ElemBytes.getBitWidth())); 14904 MaxElems += 1; 14905 ElemBytes = ElemBytes.zextOrTrunc(MaxElems.getBitWidth()); 14906 MaxElems = MaxElems.udiv(ElemBytes); 14907 14908 unsigned DiagID = 14909 ASE ? diag::warn_array_index_exceeds_max_addressable_bounds 14910 : diag::warn_ptr_arith_exceeds_max_addressable_bounds; 14911 14912 // Diag message shows element size in bits and in "bytes" (platform- 14913 // dependent CharUnits) 14914 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 14915 PDiag(DiagID) 14916 << toString(index, 10, true) << AddrBits 14917 << (unsigned)ASTC.toBits(*ElemCharUnits) 14918 << toString(ElemBytes, 10, false) 14919 << toString(MaxElems, 10, false) 14920 << (unsigned)MaxElems.getLimitedValue(~0U) 14921 << IndexExpr->getSourceRange()); 14922 14923 if (!ND) { 14924 // Try harder to find a NamedDecl to point at in the note. 14925 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 14926 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 14927 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 14928 ND = DRE->getDecl(); 14929 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 14930 ND = ME->getMemberDecl(); 14931 } 14932 14933 if (ND) 14934 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 14935 PDiag(diag::note_array_declared_here) << ND); 14936 } 14937 return; 14938 } 14939 14940 if (index.isUnsigned() || !index.isNegative()) { 14941 // It is possible that the type of the base expression after 14942 // IgnoreParenCasts is incomplete, even though the type of the base 14943 // expression before IgnoreParenCasts is complete (see PR39746 for an 14944 // example). In this case we have no information about whether the array 14945 // access exceeds the array bounds. However we can still diagnose an array 14946 // access which precedes the array bounds. 14947 if (BaseType->isIncompleteType()) 14948 return; 14949 14950 llvm::APInt size = ArrayTy->getSize(); 14951 if (!size.isStrictlyPositive()) 14952 return; 14953 14954 if (BaseType != EffectiveType) { 14955 // Make sure we're comparing apples to apples when comparing index to size 14956 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 14957 uint64_t array_typesize = Context.getTypeSize(BaseType); 14958 // Handle ptrarith_typesize being zero, such as when casting to void* 14959 if (!ptrarith_typesize) ptrarith_typesize = 1; 14960 if (ptrarith_typesize != array_typesize) { 14961 // There's a cast to a different size type involved 14962 uint64_t ratio = array_typesize / ptrarith_typesize; 14963 // TODO: Be smarter about handling cases where array_typesize is not a 14964 // multiple of ptrarith_typesize 14965 if (ptrarith_typesize * ratio == array_typesize) 14966 size *= llvm::APInt(size.getBitWidth(), ratio); 14967 } 14968 } 14969 14970 if (size.getBitWidth() > index.getBitWidth()) 14971 index = index.zext(size.getBitWidth()); 14972 else if (size.getBitWidth() < index.getBitWidth()) 14973 size = size.zext(index.getBitWidth()); 14974 14975 // For array subscripting the index must be less than size, but for pointer 14976 // arithmetic also allow the index (offset) to be equal to size since 14977 // computing the next address after the end of the array is legal and 14978 // commonly done e.g. in C++ iterators and range-based for loops. 14979 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 14980 return; 14981 14982 // Also don't warn for arrays of size 1 which are members of some 14983 // structure. These are often used to approximate flexible arrays in C89 14984 // code. 14985 if (IsTailPaddedMemberArray(*this, size, ND)) 14986 return; 14987 14988 // Suppress the warning if the subscript expression (as identified by the 14989 // ']' location) and the index expression are both from macro expansions 14990 // within a system header. 14991 if (ASE) { 14992 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 14993 ASE->getRBracketLoc()); 14994 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 14995 SourceLocation IndexLoc = 14996 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 14997 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 14998 return; 14999 } 15000 } 15001 15002 unsigned DiagID = ASE ? diag::warn_array_index_exceeds_bounds 15003 : diag::warn_ptr_arith_exceeds_bounds; 15004 15005 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15006 PDiag(DiagID) << toString(index, 10, true) 15007 << toString(size, 10, true) 15008 << (unsigned)size.getLimitedValue(~0U) 15009 << IndexExpr->getSourceRange()); 15010 } else { 15011 unsigned DiagID = diag::warn_array_index_precedes_bounds; 15012 if (!ASE) { 15013 DiagID = diag::warn_ptr_arith_precedes_bounds; 15014 if (index.isNegative()) index = -index; 15015 } 15016 15017 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 15018 PDiag(DiagID) << toString(index, 10, true) 15019 << IndexExpr->getSourceRange()); 15020 } 15021 15022 if (!ND) { 15023 // Try harder to find a NamedDecl to point at in the note. 15024 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 15025 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 15026 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 15027 ND = DRE->getDecl(); 15028 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 15029 ND = ME->getMemberDecl(); 15030 } 15031 15032 if (ND) 15033 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 15034 PDiag(diag::note_array_declared_here) << ND); 15035 } 15036 15037 void Sema::CheckArrayAccess(const Expr *expr) { 15038 int AllowOnePastEnd = 0; 15039 while (expr) { 15040 expr = expr->IgnoreParenImpCasts(); 15041 switch (expr->getStmtClass()) { 15042 case Stmt::ArraySubscriptExprClass: { 15043 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 15044 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 15045 AllowOnePastEnd > 0); 15046 expr = ASE->getBase(); 15047 break; 15048 } 15049 case Stmt::MemberExprClass: { 15050 expr = cast<MemberExpr>(expr)->getBase(); 15051 break; 15052 } 15053 case Stmt::OMPArraySectionExprClass: { 15054 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 15055 if (ASE->getLowerBound()) 15056 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 15057 /*ASE=*/nullptr, AllowOnePastEnd > 0); 15058 return; 15059 } 15060 case Stmt::UnaryOperatorClass: { 15061 // Only unwrap the * and & unary operators 15062 const UnaryOperator *UO = cast<UnaryOperator>(expr); 15063 expr = UO->getSubExpr(); 15064 switch (UO->getOpcode()) { 15065 case UO_AddrOf: 15066 AllowOnePastEnd++; 15067 break; 15068 case UO_Deref: 15069 AllowOnePastEnd--; 15070 break; 15071 default: 15072 return; 15073 } 15074 break; 15075 } 15076 case Stmt::ConditionalOperatorClass: { 15077 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 15078 if (const Expr *lhs = cond->getLHS()) 15079 CheckArrayAccess(lhs); 15080 if (const Expr *rhs = cond->getRHS()) 15081 CheckArrayAccess(rhs); 15082 return; 15083 } 15084 case Stmt::CXXOperatorCallExprClass: { 15085 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 15086 for (const auto *Arg : OCE->arguments()) 15087 CheckArrayAccess(Arg); 15088 return; 15089 } 15090 default: 15091 return; 15092 } 15093 } 15094 } 15095 15096 //===--- CHECK: Objective-C retain cycles ----------------------------------// 15097 15098 namespace { 15099 15100 struct RetainCycleOwner { 15101 VarDecl *Variable = nullptr; 15102 SourceRange Range; 15103 SourceLocation Loc; 15104 bool Indirect = false; 15105 15106 RetainCycleOwner() = default; 15107 15108 void setLocsFrom(Expr *e) { 15109 Loc = e->getExprLoc(); 15110 Range = e->getSourceRange(); 15111 } 15112 }; 15113 15114 } // namespace 15115 15116 /// Consider whether capturing the given variable can possibly lead to 15117 /// a retain cycle. 15118 static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 15119 // In ARC, it's captured strongly iff the variable has __strong 15120 // lifetime. In MRR, it's captured strongly if the variable is 15121 // __block and has an appropriate type. 15122 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 15123 return false; 15124 15125 owner.Variable = var; 15126 if (ref) 15127 owner.setLocsFrom(ref); 15128 return true; 15129 } 15130 15131 static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 15132 while (true) { 15133 e = e->IgnoreParens(); 15134 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 15135 switch (cast->getCastKind()) { 15136 case CK_BitCast: 15137 case CK_LValueBitCast: 15138 case CK_LValueToRValue: 15139 case CK_ARCReclaimReturnedObject: 15140 e = cast->getSubExpr(); 15141 continue; 15142 15143 default: 15144 return false; 15145 } 15146 } 15147 15148 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 15149 ObjCIvarDecl *ivar = ref->getDecl(); 15150 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 15151 return false; 15152 15153 // Try to find a retain cycle in the base. 15154 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 15155 return false; 15156 15157 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 15158 owner.Indirect = true; 15159 return true; 15160 } 15161 15162 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 15163 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 15164 if (!var) return false; 15165 return considerVariable(var, ref, owner); 15166 } 15167 15168 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 15169 if (member->isArrow()) return false; 15170 15171 // Don't count this as an indirect ownership. 15172 e = member->getBase(); 15173 continue; 15174 } 15175 15176 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 15177 // Only pay attention to pseudo-objects on property references. 15178 ObjCPropertyRefExpr *pre 15179 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 15180 ->IgnoreParens()); 15181 if (!pre) return false; 15182 if (pre->isImplicitProperty()) return false; 15183 ObjCPropertyDecl *property = pre->getExplicitProperty(); 15184 if (!property->isRetaining() && 15185 !(property->getPropertyIvarDecl() && 15186 property->getPropertyIvarDecl()->getType() 15187 .getObjCLifetime() == Qualifiers::OCL_Strong)) 15188 return false; 15189 15190 owner.Indirect = true; 15191 if (pre->isSuperReceiver()) { 15192 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 15193 if (!owner.Variable) 15194 return false; 15195 owner.Loc = pre->getLocation(); 15196 owner.Range = pre->getSourceRange(); 15197 return true; 15198 } 15199 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 15200 ->getSourceExpr()); 15201 continue; 15202 } 15203 15204 // Array ivars? 15205 15206 return false; 15207 } 15208 } 15209 15210 namespace { 15211 15212 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 15213 ASTContext &Context; 15214 VarDecl *Variable; 15215 Expr *Capturer = nullptr; 15216 bool VarWillBeReased = false; 15217 15218 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 15219 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 15220 Context(Context), Variable(variable) {} 15221 15222 void VisitDeclRefExpr(DeclRefExpr *ref) { 15223 if (ref->getDecl() == Variable && !Capturer) 15224 Capturer = ref; 15225 } 15226 15227 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 15228 if (Capturer) return; 15229 Visit(ref->getBase()); 15230 if (Capturer && ref->isFreeIvar()) 15231 Capturer = ref; 15232 } 15233 15234 void VisitBlockExpr(BlockExpr *block) { 15235 // Look inside nested blocks 15236 if (block->getBlockDecl()->capturesVariable(Variable)) 15237 Visit(block->getBlockDecl()->getBody()); 15238 } 15239 15240 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 15241 if (Capturer) return; 15242 if (OVE->getSourceExpr()) 15243 Visit(OVE->getSourceExpr()); 15244 } 15245 15246 void VisitBinaryOperator(BinaryOperator *BinOp) { 15247 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 15248 return; 15249 Expr *LHS = BinOp->getLHS(); 15250 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 15251 if (DRE->getDecl() != Variable) 15252 return; 15253 if (Expr *RHS = BinOp->getRHS()) { 15254 RHS = RHS->IgnoreParenCasts(); 15255 Optional<llvm::APSInt> Value; 15256 VarWillBeReased = 15257 (RHS && (Value = RHS->getIntegerConstantExpr(Context)) && 15258 *Value == 0); 15259 } 15260 } 15261 } 15262 }; 15263 15264 } // namespace 15265 15266 /// Check whether the given argument is a block which captures a 15267 /// variable. 15268 static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 15269 assert(owner.Variable && owner.Loc.isValid()); 15270 15271 e = e->IgnoreParenCasts(); 15272 15273 // Look through [^{...} copy] and Block_copy(^{...}). 15274 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 15275 Selector Cmd = ME->getSelector(); 15276 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 15277 e = ME->getInstanceReceiver(); 15278 if (!e) 15279 return nullptr; 15280 e = e->IgnoreParenCasts(); 15281 } 15282 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 15283 if (CE->getNumArgs() == 1) { 15284 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 15285 if (Fn) { 15286 const IdentifierInfo *FnI = Fn->getIdentifier(); 15287 if (FnI && FnI->isStr("_Block_copy")) { 15288 e = CE->getArg(0)->IgnoreParenCasts(); 15289 } 15290 } 15291 } 15292 } 15293 15294 BlockExpr *block = dyn_cast<BlockExpr>(e); 15295 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 15296 return nullptr; 15297 15298 FindCaptureVisitor visitor(S.Context, owner.Variable); 15299 visitor.Visit(block->getBlockDecl()->getBody()); 15300 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 15301 } 15302 15303 static void diagnoseRetainCycle(Sema &S, Expr *capturer, 15304 RetainCycleOwner &owner) { 15305 assert(capturer); 15306 assert(owner.Variable && owner.Loc.isValid()); 15307 15308 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 15309 << owner.Variable << capturer->getSourceRange(); 15310 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 15311 << owner.Indirect << owner.Range; 15312 } 15313 15314 /// Check for a keyword selector that starts with the word 'add' or 15315 /// 'set'. 15316 static bool isSetterLikeSelector(Selector sel) { 15317 if (sel.isUnarySelector()) return false; 15318 15319 StringRef str = sel.getNameForSlot(0); 15320 while (!str.empty() && str.front() == '_') str = str.substr(1); 15321 if (str.startswith("set")) 15322 str = str.substr(3); 15323 else if (str.startswith("add")) { 15324 // Specially allow 'addOperationWithBlock:'. 15325 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) 15326 return false; 15327 str = str.substr(3); 15328 } 15329 else 15330 return false; 15331 15332 if (str.empty()) return true; 15333 return !isLowercase(str.front()); 15334 } 15335 15336 static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S, 15337 ObjCMessageExpr *Message) { 15338 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 15339 Message->getReceiverInterface(), 15340 NSAPI::ClassId_NSMutableArray); 15341 if (!IsMutableArray) { 15342 return None; 15343 } 15344 15345 Selector Sel = Message->getSelector(); 15346 15347 Optional<NSAPI::NSArrayMethodKind> MKOpt = 15348 S.NSAPIObj->getNSArrayMethodKind(Sel); 15349 if (!MKOpt) { 15350 return None; 15351 } 15352 15353 NSAPI::NSArrayMethodKind MK = *MKOpt; 15354 15355 switch (MK) { 15356 case NSAPI::NSMutableArr_addObject: 15357 case NSAPI::NSMutableArr_insertObjectAtIndex: 15358 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 15359 return 0; 15360 case NSAPI::NSMutableArr_replaceObjectAtIndex: 15361 return 1; 15362 15363 default: 15364 return None; 15365 } 15366 15367 return None; 15368 } 15369 15370 static 15371 Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S, 15372 ObjCMessageExpr *Message) { 15373 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 15374 Message->getReceiverInterface(), 15375 NSAPI::ClassId_NSMutableDictionary); 15376 if (!IsMutableDictionary) { 15377 return None; 15378 } 15379 15380 Selector Sel = Message->getSelector(); 15381 15382 Optional<NSAPI::NSDictionaryMethodKind> MKOpt = 15383 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 15384 if (!MKOpt) { 15385 return None; 15386 } 15387 15388 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 15389 15390 switch (MK) { 15391 case NSAPI::NSMutableDict_setObjectForKey: 15392 case NSAPI::NSMutableDict_setValueForKey: 15393 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 15394 return 0; 15395 15396 default: 15397 return None; 15398 } 15399 15400 return None; 15401 } 15402 15403 static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 15404 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 15405 Message->getReceiverInterface(), 15406 NSAPI::ClassId_NSMutableSet); 15407 15408 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 15409 Message->getReceiverInterface(), 15410 NSAPI::ClassId_NSMutableOrderedSet); 15411 if (!IsMutableSet && !IsMutableOrderedSet) { 15412 return None; 15413 } 15414 15415 Selector Sel = Message->getSelector(); 15416 15417 Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel); 15418 if (!MKOpt) { 15419 return None; 15420 } 15421 15422 NSAPI::NSSetMethodKind MK = *MKOpt; 15423 15424 switch (MK) { 15425 case NSAPI::NSMutableSet_addObject: 15426 case NSAPI::NSOrderedSet_setObjectAtIndex: 15427 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 15428 case NSAPI::NSOrderedSet_insertObjectAtIndex: 15429 return 0; 15430 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 15431 return 1; 15432 } 15433 15434 return None; 15435 } 15436 15437 void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 15438 if (!Message->isInstanceMessage()) { 15439 return; 15440 } 15441 15442 Optional<int> ArgOpt; 15443 15444 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 15445 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 15446 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 15447 return; 15448 } 15449 15450 int ArgIndex = *ArgOpt; 15451 15452 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 15453 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 15454 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 15455 } 15456 15457 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 15458 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 15459 if (ArgRE->isObjCSelfExpr()) { 15460 Diag(Message->getSourceRange().getBegin(), 15461 diag::warn_objc_circular_container) 15462 << ArgRE->getDecl() << StringRef("'super'"); 15463 } 15464 } 15465 } else { 15466 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 15467 15468 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 15469 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 15470 } 15471 15472 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 15473 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 15474 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 15475 ValueDecl *Decl = ReceiverRE->getDecl(); 15476 Diag(Message->getSourceRange().getBegin(), 15477 diag::warn_objc_circular_container) 15478 << Decl << Decl; 15479 if (!ArgRE->isObjCSelfExpr()) { 15480 Diag(Decl->getLocation(), 15481 diag::note_objc_circular_container_declared_here) 15482 << Decl; 15483 } 15484 } 15485 } 15486 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 15487 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 15488 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 15489 ObjCIvarDecl *Decl = IvarRE->getDecl(); 15490 Diag(Message->getSourceRange().getBegin(), 15491 diag::warn_objc_circular_container) 15492 << Decl << Decl; 15493 Diag(Decl->getLocation(), 15494 diag::note_objc_circular_container_declared_here) 15495 << Decl; 15496 } 15497 } 15498 } 15499 } 15500 } 15501 15502 /// Check a message send to see if it's likely to cause a retain cycle. 15503 void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 15504 // Only check instance methods whose selector looks like a setter. 15505 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 15506 return; 15507 15508 // Try to find a variable that the receiver is strongly owned by. 15509 RetainCycleOwner owner; 15510 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 15511 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 15512 return; 15513 } else { 15514 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 15515 owner.Variable = getCurMethodDecl()->getSelfDecl(); 15516 owner.Loc = msg->getSuperLoc(); 15517 owner.Range = msg->getSuperLoc(); 15518 } 15519 15520 // Check whether the receiver is captured by any of the arguments. 15521 const ObjCMethodDecl *MD = msg->getMethodDecl(); 15522 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 15523 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 15524 // noescape blocks should not be retained by the method. 15525 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 15526 continue; 15527 return diagnoseRetainCycle(*this, capturer, owner); 15528 } 15529 } 15530 } 15531 15532 /// Check a property assign to see if it's likely to cause a retain cycle. 15533 void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 15534 RetainCycleOwner owner; 15535 if (!findRetainCycleOwner(*this, receiver, owner)) 15536 return; 15537 15538 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 15539 diagnoseRetainCycle(*this, capturer, owner); 15540 } 15541 15542 void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 15543 RetainCycleOwner Owner; 15544 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 15545 return; 15546 15547 // Because we don't have an expression for the variable, we have to set the 15548 // location explicitly here. 15549 Owner.Loc = Var->getLocation(); 15550 Owner.Range = Var->getSourceRange(); 15551 15552 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 15553 diagnoseRetainCycle(*this, Capturer, Owner); 15554 } 15555 15556 static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 15557 Expr *RHS, bool isProperty) { 15558 // Check if RHS is an Objective-C object literal, which also can get 15559 // immediately zapped in a weak reference. Note that we explicitly 15560 // allow ObjCStringLiterals, since those are designed to never really die. 15561 RHS = RHS->IgnoreParenImpCasts(); 15562 15563 // This enum needs to match with the 'select' in 15564 // warn_objc_arc_literal_assign (off-by-1). 15565 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 15566 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 15567 return false; 15568 15569 S.Diag(Loc, diag::warn_arc_literal_assign) 15570 << (unsigned) Kind 15571 << (isProperty ? 0 : 1) 15572 << RHS->getSourceRange(); 15573 15574 return true; 15575 } 15576 15577 static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 15578 Qualifiers::ObjCLifetime LT, 15579 Expr *RHS, bool isProperty) { 15580 // Strip off any implicit cast added to get to the one ARC-specific. 15581 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 15582 if (cast->getCastKind() == CK_ARCConsumeObject) { 15583 S.Diag(Loc, diag::warn_arc_retained_assign) 15584 << (LT == Qualifiers::OCL_ExplicitNone) 15585 << (isProperty ? 0 : 1) 15586 << RHS->getSourceRange(); 15587 return true; 15588 } 15589 RHS = cast->getSubExpr(); 15590 } 15591 15592 if (LT == Qualifiers::OCL_Weak && 15593 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 15594 return true; 15595 15596 return false; 15597 } 15598 15599 bool Sema::checkUnsafeAssigns(SourceLocation Loc, 15600 QualType LHS, Expr *RHS) { 15601 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 15602 15603 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 15604 return false; 15605 15606 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 15607 return true; 15608 15609 return false; 15610 } 15611 15612 void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 15613 Expr *LHS, Expr *RHS) { 15614 QualType LHSType; 15615 // PropertyRef on LHS type need be directly obtained from 15616 // its declaration as it has a PseudoType. 15617 ObjCPropertyRefExpr *PRE 15618 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 15619 if (PRE && !PRE->isImplicitProperty()) { 15620 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 15621 if (PD) 15622 LHSType = PD->getType(); 15623 } 15624 15625 if (LHSType.isNull()) 15626 LHSType = LHS->getType(); 15627 15628 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 15629 15630 if (LT == Qualifiers::OCL_Weak) { 15631 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 15632 getCurFunction()->markSafeWeakUse(LHS); 15633 } 15634 15635 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 15636 return; 15637 15638 // FIXME. Check for other life times. 15639 if (LT != Qualifiers::OCL_None) 15640 return; 15641 15642 if (PRE) { 15643 if (PRE->isImplicitProperty()) 15644 return; 15645 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 15646 if (!PD) 15647 return; 15648 15649 unsigned Attributes = PD->getPropertyAttributes(); 15650 if (Attributes & ObjCPropertyAttribute::kind_assign) { 15651 // when 'assign' attribute was not explicitly specified 15652 // by user, ignore it and rely on property type itself 15653 // for lifetime info. 15654 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 15655 if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) && 15656 LHSType->isObjCRetainableType()) 15657 return; 15658 15659 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 15660 if (cast->getCastKind() == CK_ARCConsumeObject) { 15661 Diag(Loc, diag::warn_arc_retained_property_assign) 15662 << RHS->getSourceRange(); 15663 return; 15664 } 15665 RHS = cast->getSubExpr(); 15666 } 15667 } else if (Attributes & ObjCPropertyAttribute::kind_weak) { 15668 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 15669 return; 15670 } 15671 } 15672 } 15673 15674 //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 15675 15676 static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 15677 SourceLocation StmtLoc, 15678 const NullStmt *Body) { 15679 // Do not warn if the body is a macro that expands to nothing, e.g: 15680 // 15681 // #define CALL(x) 15682 // if (condition) 15683 // CALL(0); 15684 if (Body->hasLeadingEmptyMacro()) 15685 return false; 15686 15687 // Get line numbers of statement and body. 15688 bool StmtLineInvalid; 15689 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 15690 &StmtLineInvalid); 15691 if (StmtLineInvalid) 15692 return false; 15693 15694 bool BodyLineInvalid; 15695 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 15696 &BodyLineInvalid); 15697 if (BodyLineInvalid) 15698 return false; 15699 15700 // Warn if null statement and body are on the same line. 15701 if (StmtLine != BodyLine) 15702 return false; 15703 15704 return true; 15705 } 15706 15707 void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 15708 const Stmt *Body, 15709 unsigned DiagID) { 15710 // Since this is a syntactic check, don't emit diagnostic for template 15711 // instantiations, this just adds noise. 15712 if (CurrentInstantiationScope) 15713 return; 15714 15715 // The body should be a null statement. 15716 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 15717 if (!NBody) 15718 return; 15719 15720 // Do the usual checks. 15721 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 15722 return; 15723 15724 Diag(NBody->getSemiLoc(), DiagID); 15725 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 15726 } 15727 15728 void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 15729 const Stmt *PossibleBody) { 15730 assert(!CurrentInstantiationScope); // Ensured by caller 15731 15732 SourceLocation StmtLoc; 15733 const Stmt *Body; 15734 unsigned DiagID; 15735 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 15736 StmtLoc = FS->getRParenLoc(); 15737 Body = FS->getBody(); 15738 DiagID = diag::warn_empty_for_body; 15739 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 15740 StmtLoc = WS->getCond()->getSourceRange().getEnd(); 15741 Body = WS->getBody(); 15742 DiagID = diag::warn_empty_while_body; 15743 } else 15744 return; // Neither `for' nor `while'. 15745 15746 // The body should be a null statement. 15747 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 15748 if (!NBody) 15749 return; 15750 15751 // Skip expensive checks if diagnostic is disabled. 15752 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 15753 return; 15754 15755 // Do the usual checks. 15756 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 15757 return; 15758 15759 // `for(...);' and `while(...);' are popular idioms, so in order to keep 15760 // noise level low, emit diagnostics only if for/while is followed by a 15761 // CompoundStmt, e.g.: 15762 // for (int i = 0; i < n; i++); 15763 // { 15764 // a(i); 15765 // } 15766 // or if for/while is followed by a statement with more indentation 15767 // than for/while itself: 15768 // for (int i = 0; i < n; i++); 15769 // a(i); 15770 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 15771 if (!ProbableTypo) { 15772 bool BodyColInvalid; 15773 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 15774 PossibleBody->getBeginLoc(), &BodyColInvalid); 15775 if (BodyColInvalid) 15776 return; 15777 15778 bool StmtColInvalid; 15779 unsigned StmtCol = 15780 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 15781 if (StmtColInvalid) 15782 return; 15783 15784 if (BodyCol > StmtCol) 15785 ProbableTypo = true; 15786 } 15787 15788 if (ProbableTypo) { 15789 Diag(NBody->getSemiLoc(), DiagID); 15790 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 15791 } 15792 } 15793 15794 //===--- CHECK: Warn on self move with std::move. -------------------------===// 15795 15796 /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 15797 void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 15798 SourceLocation OpLoc) { 15799 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 15800 return; 15801 15802 if (inTemplateInstantiation()) 15803 return; 15804 15805 // Strip parens and casts away. 15806 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 15807 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 15808 15809 // Check for a call expression 15810 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 15811 if (!CE || CE->getNumArgs() != 1) 15812 return; 15813 15814 // Check for a call to std::move 15815 if (!CE->isCallToStdMove()) 15816 return; 15817 15818 // Get argument from std::move 15819 RHSExpr = CE->getArg(0); 15820 15821 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 15822 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 15823 15824 // Two DeclRefExpr's, check that the decls are the same. 15825 if (LHSDeclRef && RHSDeclRef) { 15826 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 15827 return; 15828 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 15829 RHSDeclRef->getDecl()->getCanonicalDecl()) 15830 return; 15831 15832 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 15833 << LHSExpr->getSourceRange() 15834 << RHSExpr->getSourceRange(); 15835 return; 15836 } 15837 15838 // Member variables require a different approach to check for self moves. 15839 // MemberExpr's are the same if every nested MemberExpr refers to the same 15840 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 15841 // the base Expr's are CXXThisExpr's. 15842 const Expr *LHSBase = LHSExpr; 15843 const Expr *RHSBase = RHSExpr; 15844 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 15845 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 15846 if (!LHSME || !RHSME) 15847 return; 15848 15849 while (LHSME && RHSME) { 15850 if (LHSME->getMemberDecl()->getCanonicalDecl() != 15851 RHSME->getMemberDecl()->getCanonicalDecl()) 15852 return; 15853 15854 LHSBase = LHSME->getBase(); 15855 RHSBase = RHSME->getBase(); 15856 LHSME = dyn_cast<MemberExpr>(LHSBase); 15857 RHSME = dyn_cast<MemberExpr>(RHSBase); 15858 } 15859 15860 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 15861 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 15862 if (LHSDeclRef && RHSDeclRef) { 15863 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 15864 return; 15865 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 15866 RHSDeclRef->getDecl()->getCanonicalDecl()) 15867 return; 15868 15869 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 15870 << LHSExpr->getSourceRange() 15871 << RHSExpr->getSourceRange(); 15872 return; 15873 } 15874 15875 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 15876 Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() 15877 << LHSExpr->getSourceRange() 15878 << RHSExpr->getSourceRange(); 15879 } 15880 15881 //===--- Layout compatibility ----------------------------------------------// 15882 15883 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 15884 15885 /// Check if two enumeration types are layout-compatible. 15886 static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 15887 // C++11 [dcl.enum] p8: 15888 // Two enumeration types are layout-compatible if they have the same 15889 // underlying type. 15890 return ED1->isComplete() && ED2->isComplete() && 15891 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 15892 } 15893 15894 /// Check if two fields are layout-compatible. 15895 static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 15896 FieldDecl *Field2) { 15897 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 15898 return false; 15899 15900 if (Field1->isBitField() != Field2->isBitField()) 15901 return false; 15902 15903 if (Field1->isBitField()) { 15904 // Make sure that the bit-fields are the same length. 15905 unsigned Bits1 = Field1->getBitWidthValue(C); 15906 unsigned Bits2 = Field2->getBitWidthValue(C); 15907 15908 if (Bits1 != Bits2) 15909 return false; 15910 } 15911 15912 return true; 15913 } 15914 15915 /// Check if two standard-layout structs are layout-compatible. 15916 /// (C++11 [class.mem] p17) 15917 static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 15918 RecordDecl *RD2) { 15919 // If both records are C++ classes, check that base classes match. 15920 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 15921 // If one of records is a CXXRecordDecl we are in C++ mode, 15922 // thus the other one is a CXXRecordDecl, too. 15923 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 15924 // Check number of base classes. 15925 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 15926 return false; 15927 15928 // Check the base classes. 15929 for (CXXRecordDecl::base_class_const_iterator 15930 Base1 = D1CXX->bases_begin(), 15931 BaseEnd1 = D1CXX->bases_end(), 15932 Base2 = D2CXX->bases_begin(); 15933 Base1 != BaseEnd1; 15934 ++Base1, ++Base2) { 15935 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 15936 return false; 15937 } 15938 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 15939 // If only RD2 is a C++ class, it should have zero base classes. 15940 if (D2CXX->getNumBases() > 0) 15941 return false; 15942 } 15943 15944 // Check the fields. 15945 RecordDecl::field_iterator Field2 = RD2->field_begin(), 15946 Field2End = RD2->field_end(), 15947 Field1 = RD1->field_begin(), 15948 Field1End = RD1->field_end(); 15949 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 15950 if (!isLayoutCompatible(C, *Field1, *Field2)) 15951 return false; 15952 } 15953 if (Field1 != Field1End || Field2 != Field2End) 15954 return false; 15955 15956 return true; 15957 } 15958 15959 /// Check if two standard-layout unions are layout-compatible. 15960 /// (C++11 [class.mem] p18) 15961 static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 15962 RecordDecl *RD2) { 15963 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 15964 for (auto *Field2 : RD2->fields()) 15965 UnmatchedFields.insert(Field2); 15966 15967 for (auto *Field1 : RD1->fields()) { 15968 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 15969 I = UnmatchedFields.begin(), 15970 E = UnmatchedFields.end(); 15971 15972 for ( ; I != E; ++I) { 15973 if (isLayoutCompatible(C, Field1, *I)) { 15974 bool Result = UnmatchedFields.erase(*I); 15975 (void) Result; 15976 assert(Result); 15977 break; 15978 } 15979 } 15980 if (I == E) 15981 return false; 15982 } 15983 15984 return UnmatchedFields.empty(); 15985 } 15986 15987 static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 15988 RecordDecl *RD2) { 15989 if (RD1->isUnion() != RD2->isUnion()) 15990 return false; 15991 15992 if (RD1->isUnion()) 15993 return isLayoutCompatibleUnion(C, RD1, RD2); 15994 else 15995 return isLayoutCompatibleStruct(C, RD1, RD2); 15996 } 15997 15998 /// Check if two types are layout-compatible in C++11 sense. 15999 static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 16000 if (T1.isNull() || T2.isNull()) 16001 return false; 16002 16003 // C++11 [basic.types] p11: 16004 // If two types T1 and T2 are the same type, then T1 and T2 are 16005 // layout-compatible types. 16006 if (C.hasSameType(T1, T2)) 16007 return true; 16008 16009 T1 = T1.getCanonicalType().getUnqualifiedType(); 16010 T2 = T2.getCanonicalType().getUnqualifiedType(); 16011 16012 const Type::TypeClass TC1 = T1->getTypeClass(); 16013 const Type::TypeClass TC2 = T2->getTypeClass(); 16014 16015 if (TC1 != TC2) 16016 return false; 16017 16018 if (TC1 == Type::Enum) { 16019 return isLayoutCompatible(C, 16020 cast<EnumType>(T1)->getDecl(), 16021 cast<EnumType>(T2)->getDecl()); 16022 } else if (TC1 == Type::Record) { 16023 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 16024 return false; 16025 16026 return isLayoutCompatible(C, 16027 cast<RecordType>(T1)->getDecl(), 16028 cast<RecordType>(T2)->getDecl()); 16029 } 16030 16031 return false; 16032 } 16033 16034 //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 16035 16036 /// Given a type tag expression find the type tag itself. 16037 /// 16038 /// \param TypeExpr Type tag expression, as it appears in user's code. 16039 /// 16040 /// \param VD Declaration of an identifier that appears in a type tag. 16041 /// 16042 /// \param MagicValue Type tag magic value. 16043 /// 16044 /// \param isConstantEvaluated whether the evalaution should be performed in 16045 16046 /// constant context. 16047 static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 16048 const ValueDecl **VD, uint64_t *MagicValue, 16049 bool isConstantEvaluated) { 16050 while(true) { 16051 if (!TypeExpr) 16052 return false; 16053 16054 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 16055 16056 switch (TypeExpr->getStmtClass()) { 16057 case Stmt::UnaryOperatorClass: { 16058 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 16059 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 16060 TypeExpr = UO->getSubExpr(); 16061 continue; 16062 } 16063 return false; 16064 } 16065 16066 case Stmt::DeclRefExprClass: { 16067 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 16068 *VD = DRE->getDecl(); 16069 return true; 16070 } 16071 16072 case Stmt::IntegerLiteralClass: { 16073 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 16074 llvm::APInt MagicValueAPInt = IL->getValue(); 16075 if (MagicValueAPInt.getActiveBits() <= 64) { 16076 *MagicValue = MagicValueAPInt.getZExtValue(); 16077 return true; 16078 } else 16079 return false; 16080 } 16081 16082 case Stmt::BinaryConditionalOperatorClass: 16083 case Stmt::ConditionalOperatorClass: { 16084 const AbstractConditionalOperator *ACO = 16085 cast<AbstractConditionalOperator>(TypeExpr); 16086 bool Result; 16087 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx, 16088 isConstantEvaluated)) { 16089 if (Result) 16090 TypeExpr = ACO->getTrueExpr(); 16091 else 16092 TypeExpr = ACO->getFalseExpr(); 16093 continue; 16094 } 16095 return false; 16096 } 16097 16098 case Stmt::BinaryOperatorClass: { 16099 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 16100 if (BO->getOpcode() == BO_Comma) { 16101 TypeExpr = BO->getRHS(); 16102 continue; 16103 } 16104 return false; 16105 } 16106 16107 default: 16108 return false; 16109 } 16110 } 16111 } 16112 16113 /// Retrieve the C type corresponding to type tag TypeExpr. 16114 /// 16115 /// \param TypeExpr Expression that specifies a type tag. 16116 /// 16117 /// \param MagicValues Registered magic values. 16118 /// 16119 /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 16120 /// kind. 16121 /// 16122 /// \param TypeInfo Information about the corresponding C type. 16123 /// 16124 /// \param isConstantEvaluated whether the evalaution should be performed in 16125 /// constant context. 16126 /// 16127 /// \returns true if the corresponding C type was found. 16128 static bool GetMatchingCType( 16129 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr, 16130 const ASTContext &Ctx, 16131 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData> 16132 *MagicValues, 16133 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo, 16134 bool isConstantEvaluated) { 16135 FoundWrongKind = false; 16136 16137 // Variable declaration that has type_tag_for_datatype attribute. 16138 const ValueDecl *VD = nullptr; 16139 16140 uint64_t MagicValue; 16141 16142 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated)) 16143 return false; 16144 16145 if (VD) { 16146 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 16147 if (I->getArgumentKind() != ArgumentKind) { 16148 FoundWrongKind = true; 16149 return false; 16150 } 16151 TypeInfo.Type = I->getMatchingCType(); 16152 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 16153 TypeInfo.MustBeNull = I->getMustBeNull(); 16154 return true; 16155 } 16156 return false; 16157 } 16158 16159 if (!MagicValues) 16160 return false; 16161 16162 llvm::DenseMap<Sema::TypeTagMagicValue, 16163 Sema::TypeTagData>::const_iterator I = 16164 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 16165 if (I == MagicValues->end()) 16166 return false; 16167 16168 TypeInfo = I->second; 16169 return true; 16170 } 16171 16172 void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 16173 uint64_t MagicValue, QualType Type, 16174 bool LayoutCompatible, 16175 bool MustBeNull) { 16176 if (!TypeTagForDatatypeMagicValues) 16177 TypeTagForDatatypeMagicValues.reset( 16178 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 16179 16180 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 16181 (*TypeTagForDatatypeMagicValues)[Magic] = 16182 TypeTagData(Type, LayoutCompatible, MustBeNull); 16183 } 16184 16185 static bool IsSameCharType(QualType T1, QualType T2) { 16186 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 16187 if (!BT1) 16188 return false; 16189 16190 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 16191 if (!BT2) 16192 return false; 16193 16194 BuiltinType::Kind T1Kind = BT1->getKind(); 16195 BuiltinType::Kind T2Kind = BT2->getKind(); 16196 16197 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 16198 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 16199 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 16200 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 16201 } 16202 16203 void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 16204 const ArrayRef<const Expr *> ExprArgs, 16205 SourceLocation CallSiteLoc) { 16206 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 16207 bool IsPointerAttr = Attr->getIsPointer(); 16208 16209 // Retrieve the argument representing the 'type_tag'. 16210 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 16211 if (TypeTagIdxAST >= ExprArgs.size()) { 16212 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 16213 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 16214 return; 16215 } 16216 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 16217 bool FoundWrongKind; 16218 TypeTagData TypeInfo; 16219 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 16220 TypeTagForDatatypeMagicValues.get(), FoundWrongKind, 16221 TypeInfo, isConstantEvaluated())) { 16222 if (FoundWrongKind) 16223 Diag(TypeTagExpr->getExprLoc(), 16224 diag::warn_type_tag_for_datatype_wrong_kind) 16225 << TypeTagExpr->getSourceRange(); 16226 return; 16227 } 16228 16229 // Retrieve the argument representing the 'arg_idx'. 16230 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 16231 if (ArgumentIdxAST >= ExprArgs.size()) { 16232 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 16233 << 1 << Attr->getArgumentIdx().getSourceIndex(); 16234 return; 16235 } 16236 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 16237 if (IsPointerAttr) { 16238 // Skip implicit cast of pointer to `void *' (as a function argument). 16239 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 16240 if (ICE->getType()->isVoidPointerType() && 16241 ICE->getCastKind() == CK_BitCast) 16242 ArgumentExpr = ICE->getSubExpr(); 16243 } 16244 QualType ArgumentType = ArgumentExpr->getType(); 16245 16246 // Passing a `void*' pointer shouldn't trigger a warning. 16247 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 16248 return; 16249 16250 if (TypeInfo.MustBeNull) { 16251 // Type tag with matching void type requires a null pointer. 16252 if (!ArgumentExpr->isNullPointerConstant(Context, 16253 Expr::NPC_ValueDependentIsNotNull)) { 16254 Diag(ArgumentExpr->getExprLoc(), 16255 diag::warn_type_safety_null_pointer_required) 16256 << ArgumentKind->getName() 16257 << ArgumentExpr->getSourceRange() 16258 << TypeTagExpr->getSourceRange(); 16259 } 16260 return; 16261 } 16262 16263 QualType RequiredType = TypeInfo.Type; 16264 if (IsPointerAttr) 16265 RequiredType = Context.getPointerType(RequiredType); 16266 16267 bool mismatch = false; 16268 if (!TypeInfo.LayoutCompatible) { 16269 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 16270 16271 // C++11 [basic.fundamental] p1: 16272 // Plain char, signed char, and unsigned char are three distinct types. 16273 // 16274 // But we treat plain `char' as equivalent to `signed char' or `unsigned 16275 // char' depending on the current char signedness mode. 16276 if (mismatch) 16277 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 16278 RequiredType->getPointeeType())) || 16279 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 16280 mismatch = false; 16281 } else 16282 if (IsPointerAttr) 16283 mismatch = !isLayoutCompatible(Context, 16284 ArgumentType->getPointeeType(), 16285 RequiredType->getPointeeType()); 16286 else 16287 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 16288 16289 if (mismatch) 16290 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 16291 << ArgumentType << ArgumentKind 16292 << TypeInfo.LayoutCompatible << RequiredType 16293 << ArgumentExpr->getSourceRange() 16294 << TypeTagExpr->getSourceRange(); 16295 } 16296 16297 void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 16298 CharUnits Alignment) { 16299 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 16300 } 16301 16302 void Sema::DiagnoseMisalignedMembers() { 16303 for (MisalignedMember &m : MisalignedMembers) { 16304 const NamedDecl *ND = m.RD; 16305 if (ND->getName().empty()) { 16306 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 16307 ND = TD; 16308 } 16309 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 16310 << m.MD << ND << m.E->getSourceRange(); 16311 } 16312 MisalignedMembers.clear(); 16313 } 16314 16315 void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 16316 E = E->IgnoreParens(); 16317 if (!T->isPointerType() && !T->isIntegerType()) 16318 return; 16319 if (isa<UnaryOperator>(E) && 16320 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 16321 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 16322 if (isa<MemberExpr>(Op)) { 16323 auto MA = llvm::find(MisalignedMembers, MisalignedMember(Op)); 16324 if (MA != MisalignedMembers.end() && 16325 (T->isIntegerType() || 16326 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 16327 Context.getTypeAlignInChars( 16328 T->getPointeeType()) <= MA->Alignment)))) 16329 MisalignedMembers.erase(MA); 16330 } 16331 } 16332 } 16333 16334 void Sema::RefersToMemberWithReducedAlignment( 16335 Expr *E, 16336 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 16337 Action) { 16338 const auto *ME = dyn_cast<MemberExpr>(E); 16339 if (!ME) 16340 return; 16341 16342 // No need to check expressions with an __unaligned-qualified type. 16343 if (E->getType().getQualifiers().hasUnaligned()) 16344 return; 16345 16346 // For a chain of MemberExpr like "a.b.c.d" this list 16347 // will keep FieldDecl's like [d, c, b]. 16348 SmallVector<FieldDecl *, 4> ReverseMemberChain; 16349 const MemberExpr *TopME = nullptr; 16350 bool AnyIsPacked = false; 16351 do { 16352 QualType BaseType = ME->getBase()->getType(); 16353 if (BaseType->isDependentType()) 16354 return; 16355 if (ME->isArrow()) 16356 BaseType = BaseType->getPointeeType(); 16357 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl(); 16358 if (RD->isInvalidDecl()) 16359 return; 16360 16361 ValueDecl *MD = ME->getMemberDecl(); 16362 auto *FD = dyn_cast<FieldDecl>(MD); 16363 // We do not care about non-data members. 16364 if (!FD || FD->isInvalidDecl()) 16365 return; 16366 16367 AnyIsPacked = 16368 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 16369 ReverseMemberChain.push_back(FD); 16370 16371 TopME = ME; 16372 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 16373 } while (ME); 16374 assert(TopME && "We did not compute a topmost MemberExpr!"); 16375 16376 // Not the scope of this diagnostic. 16377 if (!AnyIsPacked) 16378 return; 16379 16380 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 16381 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 16382 // TODO: The innermost base of the member expression may be too complicated. 16383 // For now, just disregard these cases. This is left for future 16384 // improvement. 16385 if (!DRE && !isa<CXXThisExpr>(TopBase)) 16386 return; 16387 16388 // Alignment expected by the whole expression. 16389 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 16390 16391 // No need to do anything else with this case. 16392 if (ExpectedAlignment.isOne()) 16393 return; 16394 16395 // Synthesize offset of the whole access. 16396 CharUnits Offset; 16397 for (auto I = ReverseMemberChain.rbegin(); I != ReverseMemberChain.rend(); 16398 I++) { 16399 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(*I)); 16400 } 16401 16402 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 16403 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 16404 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 16405 16406 // The base expression of the innermost MemberExpr may give 16407 // stronger guarantees than the class containing the member. 16408 if (DRE && !TopME->isArrow()) { 16409 const ValueDecl *VD = DRE->getDecl(); 16410 if (!VD->getType()->isReferenceType()) 16411 CompleteObjectAlignment = 16412 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 16413 } 16414 16415 // Check if the synthesized offset fulfills the alignment. 16416 if (Offset % ExpectedAlignment != 0 || 16417 // It may fulfill the offset it but the effective alignment may still be 16418 // lower than the expected expression alignment. 16419 CompleteObjectAlignment < ExpectedAlignment) { 16420 // If this happens, we want to determine a sensible culprit of this. 16421 // Intuitively, watching the chain of member expressions from right to 16422 // left, we start with the required alignment (as required by the field 16423 // type) but some packed attribute in that chain has reduced the alignment. 16424 // It may happen that another packed structure increases it again. But if 16425 // we are here such increase has not been enough. So pointing the first 16426 // FieldDecl that either is packed or else its RecordDecl is, 16427 // seems reasonable. 16428 FieldDecl *FD = nullptr; 16429 CharUnits Alignment; 16430 for (FieldDecl *FDI : ReverseMemberChain) { 16431 if (FDI->hasAttr<PackedAttr>() || 16432 FDI->getParent()->hasAttr<PackedAttr>()) { 16433 FD = FDI; 16434 Alignment = std::min( 16435 Context.getTypeAlignInChars(FD->getType()), 16436 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 16437 break; 16438 } 16439 } 16440 assert(FD && "We did not find a packed FieldDecl!"); 16441 Action(E, FD->getParent(), FD, Alignment); 16442 } 16443 } 16444 16445 void Sema::CheckAddressOfPackedMember(Expr *rhs) { 16446 using namespace std::placeholders; 16447 16448 RefersToMemberWithReducedAlignment( 16449 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 16450 _2, _3, _4)); 16451 } 16452 16453 ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall, 16454 ExprResult CallResult) { 16455 if (checkArgCount(*this, TheCall, 1)) 16456 return ExprError(); 16457 16458 ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0)); 16459 if (MatrixArg.isInvalid()) 16460 return MatrixArg; 16461 Expr *Matrix = MatrixArg.get(); 16462 16463 auto *MType = Matrix->getType()->getAs<ConstantMatrixType>(); 16464 if (!MType) { 16465 Diag(Matrix->getBeginLoc(), diag::err_builtin_matrix_arg); 16466 return ExprError(); 16467 } 16468 16469 // Create returned matrix type by swapping rows and columns of the argument 16470 // matrix type. 16471 QualType ResultType = Context.getConstantMatrixType( 16472 MType->getElementType(), MType->getNumColumns(), MType->getNumRows()); 16473 16474 // Change the return type to the type of the returned matrix. 16475 TheCall->setType(ResultType); 16476 16477 // Update call argument to use the possibly converted matrix argument. 16478 TheCall->setArg(0, Matrix); 16479 return CallResult; 16480 } 16481 16482 // Get and verify the matrix dimensions. 16483 static llvm::Optional<unsigned> 16484 getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) { 16485 SourceLocation ErrorPos; 16486 Optional<llvm::APSInt> Value = 16487 Expr->getIntegerConstantExpr(S.Context, &ErrorPos); 16488 if (!Value) { 16489 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg) 16490 << Name; 16491 return {}; 16492 } 16493 uint64_t Dim = Value->getZExtValue(); 16494 if (!ConstantMatrixType::isDimensionValid(Dim)) { 16495 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension) 16496 << Name << ConstantMatrixType::getMaxElementsPerDimension(); 16497 return {}; 16498 } 16499 return Dim; 16500 } 16501 16502 ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, 16503 ExprResult CallResult) { 16504 if (!getLangOpts().MatrixTypes) { 16505 Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled); 16506 return ExprError(); 16507 } 16508 16509 if (checkArgCount(*this, TheCall, 4)) 16510 return ExprError(); 16511 16512 unsigned PtrArgIdx = 0; 16513 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 16514 Expr *RowsExpr = TheCall->getArg(1); 16515 Expr *ColumnsExpr = TheCall->getArg(2); 16516 Expr *StrideExpr = TheCall->getArg(3); 16517 16518 bool ArgError = false; 16519 16520 // Check pointer argument. 16521 { 16522 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 16523 if (PtrConv.isInvalid()) 16524 return PtrConv; 16525 PtrExpr = PtrConv.get(); 16526 TheCall->setArg(0, PtrExpr); 16527 if (PtrExpr->isTypeDependent()) { 16528 TheCall->setType(Context.DependentTy); 16529 return TheCall; 16530 } 16531 } 16532 16533 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 16534 QualType ElementTy; 16535 if (!PtrTy) { 16536 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg) 16537 << PtrArgIdx + 1; 16538 ArgError = true; 16539 } else { 16540 ElementTy = PtrTy->getPointeeType().getUnqualifiedType(); 16541 16542 if (!ConstantMatrixType::isValidElementType(ElementTy)) { 16543 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg) 16544 << PtrArgIdx + 1; 16545 ArgError = true; 16546 } 16547 } 16548 16549 // Apply default Lvalue conversions and convert the expression to size_t. 16550 auto ApplyArgumentConversions = [this](Expr *E) { 16551 ExprResult Conv = DefaultLvalueConversion(E); 16552 if (Conv.isInvalid()) 16553 return Conv; 16554 16555 return tryConvertExprToType(Conv.get(), Context.getSizeType()); 16556 }; 16557 16558 // Apply conversion to row and column expressions. 16559 ExprResult RowsConv = ApplyArgumentConversions(RowsExpr); 16560 if (!RowsConv.isInvalid()) { 16561 RowsExpr = RowsConv.get(); 16562 TheCall->setArg(1, RowsExpr); 16563 } else 16564 RowsExpr = nullptr; 16565 16566 ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr); 16567 if (!ColumnsConv.isInvalid()) { 16568 ColumnsExpr = ColumnsConv.get(); 16569 TheCall->setArg(2, ColumnsExpr); 16570 } else 16571 ColumnsExpr = nullptr; 16572 16573 // If any any part of the result matrix type is still pending, just use 16574 // Context.DependentTy, until all parts are resolved. 16575 if ((RowsExpr && RowsExpr->isTypeDependent()) || 16576 (ColumnsExpr && ColumnsExpr->isTypeDependent())) { 16577 TheCall->setType(Context.DependentTy); 16578 return CallResult; 16579 } 16580 16581 // Check row and column dimenions. 16582 llvm::Optional<unsigned> MaybeRows; 16583 if (RowsExpr) 16584 MaybeRows = getAndVerifyMatrixDimension(RowsExpr, "row", *this); 16585 16586 llvm::Optional<unsigned> MaybeColumns; 16587 if (ColumnsExpr) 16588 MaybeColumns = getAndVerifyMatrixDimension(ColumnsExpr, "column", *this); 16589 16590 // Check stride argument. 16591 ExprResult StrideConv = ApplyArgumentConversions(StrideExpr); 16592 if (StrideConv.isInvalid()) 16593 return ExprError(); 16594 StrideExpr = StrideConv.get(); 16595 TheCall->setArg(3, StrideExpr); 16596 16597 if (MaybeRows) { 16598 if (Optional<llvm::APSInt> Value = 16599 StrideExpr->getIntegerConstantExpr(Context)) { 16600 uint64_t Stride = Value->getZExtValue(); 16601 if (Stride < *MaybeRows) { 16602 Diag(StrideExpr->getBeginLoc(), 16603 diag::err_builtin_matrix_stride_too_small); 16604 ArgError = true; 16605 } 16606 } 16607 } 16608 16609 if (ArgError || !MaybeRows || !MaybeColumns) 16610 return ExprError(); 16611 16612 TheCall->setType( 16613 Context.getConstantMatrixType(ElementTy, *MaybeRows, *MaybeColumns)); 16614 return CallResult; 16615 } 16616 16617 ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, 16618 ExprResult CallResult) { 16619 if (checkArgCount(*this, TheCall, 3)) 16620 return ExprError(); 16621 16622 unsigned PtrArgIdx = 1; 16623 Expr *MatrixExpr = TheCall->getArg(0); 16624 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 16625 Expr *StrideExpr = TheCall->getArg(2); 16626 16627 bool ArgError = false; 16628 16629 { 16630 ExprResult MatrixConv = DefaultLvalueConversion(MatrixExpr); 16631 if (MatrixConv.isInvalid()) 16632 return MatrixConv; 16633 MatrixExpr = MatrixConv.get(); 16634 TheCall->setArg(0, MatrixExpr); 16635 } 16636 if (MatrixExpr->isTypeDependent()) { 16637 TheCall->setType(Context.DependentTy); 16638 return TheCall; 16639 } 16640 16641 auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>(); 16642 if (!MatrixTy) { 16643 Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_matrix_arg) << 0; 16644 ArgError = true; 16645 } 16646 16647 { 16648 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 16649 if (PtrConv.isInvalid()) 16650 return PtrConv; 16651 PtrExpr = PtrConv.get(); 16652 TheCall->setArg(1, PtrExpr); 16653 if (PtrExpr->isTypeDependent()) { 16654 TheCall->setType(Context.DependentTy); 16655 return TheCall; 16656 } 16657 } 16658 16659 // Check pointer argument. 16660 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 16661 if (!PtrTy) { 16662 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg) 16663 << PtrArgIdx + 1; 16664 ArgError = true; 16665 } else { 16666 QualType ElementTy = PtrTy->getPointeeType(); 16667 if (ElementTy.isConstQualified()) { 16668 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const); 16669 ArgError = true; 16670 } 16671 ElementTy = ElementTy.getUnqualifiedType().getCanonicalType(); 16672 if (MatrixTy && 16673 !Context.hasSameType(ElementTy, MatrixTy->getElementType())) { 16674 Diag(PtrExpr->getBeginLoc(), 16675 diag::err_builtin_matrix_pointer_arg_mismatch) 16676 << ElementTy << MatrixTy->getElementType(); 16677 ArgError = true; 16678 } 16679 } 16680 16681 // Apply default Lvalue conversions and convert the stride expression to 16682 // size_t. 16683 { 16684 ExprResult StrideConv = DefaultLvalueConversion(StrideExpr); 16685 if (StrideConv.isInvalid()) 16686 return StrideConv; 16687 16688 StrideConv = tryConvertExprToType(StrideConv.get(), Context.getSizeType()); 16689 if (StrideConv.isInvalid()) 16690 return StrideConv; 16691 StrideExpr = StrideConv.get(); 16692 TheCall->setArg(2, StrideExpr); 16693 } 16694 16695 // Check stride argument. 16696 if (MatrixTy) { 16697 if (Optional<llvm::APSInt> Value = 16698 StrideExpr->getIntegerConstantExpr(Context)) { 16699 uint64_t Stride = Value->getZExtValue(); 16700 if (Stride < MatrixTy->getNumRows()) { 16701 Diag(StrideExpr->getBeginLoc(), 16702 diag::err_builtin_matrix_stride_too_small); 16703 ArgError = true; 16704 } 16705 } 16706 } 16707 16708 if (ArgError) 16709 return ExprError(); 16710 16711 return CallResult; 16712 } 16713 16714 /// \brief Enforce the bounds of a TCB 16715 /// CheckTCBEnforcement - Enforces that every function in a named TCB only 16716 /// directly calls other functions in the same TCB as marked by the enforce_tcb 16717 /// and enforce_tcb_leaf attributes. 16718 void Sema::CheckTCBEnforcement(const CallExpr *TheCall, 16719 const FunctionDecl *Callee) { 16720 const FunctionDecl *Caller = getCurFunctionDecl(); 16721 16722 // Calls to builtins are not enforced. 16723 if (!Caller || !Caller->hasAttr<EnforceTCBAttr>() || 16724 Callee->getBuiltinID() != 0) 16725 return; 16726 16727 // Search through the enforce_tcb and enforce_tcb_leaf attributes to find 16728 // all TCBs the callee is a part of. 16729 llvm::StringSet<> CalleeTCBs; 16730 for_each(Callee->specific_attrs<EnforceTCBAttr>(), 16731 [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); }); 16732 for_each(Callee->specific_attrs<EnforceTCBLeafAttr>(), 16733 [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); }); 16734 16735 // Go through the TCBs the caller is a part of and emit warnings if Caller 16736 // is in a TCB that the Callee is not. 16737 for_each( 16738 Caller->specific_attrs<EnforceTCBAttr>(), 16739 [&](const auto *A) { 16740 StringRef CallerTCB = A->getTCBName(); 16741 if (CalleeTCBs.count(CallerTCB) == 0) { 16742 this->Diag(TheCall->getExprLoc(), 16743 diag::warn_tcb_enforcement_violation) << Callee 16744 << CallerTCB; 16745 } 16746 }); 16747 } 16748