1 //===----- CGOpenMPRuntime.cpp - Interface to OpenMP Runtimes -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This provides a class for OpenMP runtime code generation. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGCXXABI.h" 15 #include "CGCleanup.h" 16 #include "CGOpenMPRuntime.h" 17 #include "CodeGenFunction.h" 18 #include "clang/CodeGen/ConstantInitBuilder.h" 19 #include "clang/AST/Decl.h" 20 #include "clang/AST/StmtOpenMP.h" 21 #include "llvm/ADT/ArrayRef.h" 22 #include "llvm/ADT/BitmaskEnum.h" 23 #include "llvm/Bitcode/BitcodeReader.h" 24 #include "llvm/IR/CallSite.h" 25 #include "llvm/IR/DerivedTypes.h" 26 #include "llvm/IR/GlobalValue.h" 27 #include "llvm/IR/Value.h" 28 #include "llvm/Support/Format.h" 29 #include "llvm/Support/raw_ostream.h" 30 #include <cassert> 31 32 using namespace clang; 33 using namespace CodeGen; 34 35 namespace { 36 /// \brief Base class for handling code generation inside OpenMP regions. 37 class CGOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo { 38 public: 39 /// \brief Kinds of OpenMP regions used in codegen. 40 enum CGOpenMPRegionKind { 41 /// \brief Region with outlined function for standalone 'parallel' 42 /// directive. 43 ParallelOutlinedRegion, 44 /// \brief Region with outlined function for standalone 'task' directive. 45 TaskOutlinedRegion, 46 /// \brief Region for constructs that do not require function outlining, 47 /// like 'for', 'sections', 'atomic' etc. directives. 48 InlinedRegion, 49 /// \brief Region with outlined function for standalone 'target' directive. 50 TargetRegion, 51 }; 52 53 CGOpenMPRegionInfo(const CapturedStmt &CS, 54 const CGOpenMPRegionKind RegionKind, 55 const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind, 56 bool HasCancel) 57 : CGCapturedStmtInfo(CS, CR_OpenMP), RegionKind(RegionKind), 58 CodeGen(CodeGen), Kind(Kind), HasCancel(HasCancel) {} 59 60 CGOpenMPRegionInfo(const CGOpenMPRegionKind RegionKind, 61 const RegionCodeGenTy &CodeGen, OpenMPDirectiveKind Kind, 62 bool HasCancel) 63 : CGCapturedStmtInfo(CR_OpenMP), RegionKind(RegionKind), CodeGen(CodeGen), 64 Kind(Kind), HasCancel(HasCancel) {} 65 66 /// \brief Get a variable or parameter for storing global thread id 67 /// inside OpenMP construct. 68 virtual const VarDecl *getThreadIDVariable() const = 0; 69 70 /// \brief Emit the captured statement body. 71 void EmitBody(CodeGenFunction &CGF, const Stmt *S) override; 72 73 /// \brief Get an LValue for the current ThreadID variable. 74 /// \return LValue for thread id variable. This LValue always has type int32*. 75 virtual LValue getThreadIDVariableLValue(CodeGenFunction &CGF); 76 77 virtual void emitUntiedSwitch(CodeGenFunction & /*CGF*/) {} 78 79 CGOpenMPRegionKind getRegionKind() const { return RegionKind; } 80 81 OpenMPDirectiveKind getDirectiveKind() const { return Kind; } 82 83 bool hasCancel() const { return HasCancel; } 84 85 static bool classof(const CGCapturedStmtInfo *Info) { 86 return Info->getKind() == CR_OpenMP; 87 } 88 89 ~CGOpenMPRegionInfo() override = default; 90 91 protected: 92 CGOpenMPRegionKind RegionKind; 93 RegionCodeGenTy CodeGen; 94 OpenMPDirectiveKind Kind; 95 bool HasCancel; 96 }; 97 98 /// \brief API for captured statement code generation in OpenMP constructs. 99 class CGOpenMPOutlinedRegionInfo final : public CGOpenMPRegionInfo { 100 public: 101 CGOpenMPOutlinedRegionInfo(const CapturedStmt &CS, const VarDecl *ThreadIDVar, 102 const RegionCodeGenTy &CodeGen, 103 OpenMPDirectiveKind Kind, bool HasCancel, 104 StringRef HelperName) 105 : CGOpenMPRegionInfo(CS, ParallelOutlinedRegion, CodeGen, Kind, 106 HasCancel), 107 ThreadIDVar(ThreadIDVar), HelperName(HelperName) { 108 assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region."); 109 } 110 111 /// \brief Get a variable or parameter for storing global thread id 112 /// inside OpenMP construct. 113 const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; } 114 115 /// \brief Get the name of the capture helper. 116 StringRef getHelperName() const override { return HelperName; } 117 118 static bool classof(const CGCapturedStmtInfo *Info) { 119 return CGOpenMPRegionInfo::classof(Info) && 120 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == 121 ParallelOutlinedRegion; 122 } 123 124 private: 125 /// \brief A variable or parameter storing global thread id for OpenMP 126 /// constructs. 127 const VarDecl *ThreadIDVar; 128 StringRef HelperName; 129 }; 130 131 /// \brief API for captured statement code generation in OpenMP constructs. 132 class CGOpenMPTaskOutlinedRegionInfo final : public CGOpenMPRegionInfo { 133 public: 134 class UntiedTaskActionTy final : public PrePostActionTy { 135 bool Untied; 136 const VarDecl *PartIDVar; 137 const RegionCodeGenTy UntiedCodeGen; 138 llvm::SwitchInst *UntiedSwitch = nullptr; 139 140 public: 141 UntiedTaskActionTy(bool Tied, const VarDecl *PartIDVar, 142 const RegionCodeGenTy &UntiedCodeGen) 143 : Untied(!Tied), PartIDVar(PartIDVar), UntiedCodeGen(UntiedCodeGen) {} 144 void Enter(CodeGenFunction &CGF) override { 145 if (Untied) { 146 // Emit task switching point. 147 auto PartIdLVal = CGF.EmitLoadOfPointerLValue( 148 CGF.GetAddrOfLocalVar(PartIDVar), 149 PartIDVar->getType()->castAs<PointerType>()); 150 auto *Res = CGF.EmitLoadOfScalar(PartIdLVal, SourceLocation()); 151 auto *DoneBB = CGF.createBasicBlock(".untied.done."); 152 UntiedSwitch = CGF.Builder.CreateSwitch(Res, DoneBB); 153 CGF.EmitBlock(DoneBB); 154 CGF.EmitBranchThroughCleanup(CGF.ReturnBlock); 155 CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp.")); 156 UntiedSwitch->addCase(CGF.Builder.getInt32(0), 157 CGF.Builder.GetInsertBlock()); 158 emitUntiedSwitch(CGF); 159 } 160 } 161 void emitUntiedSwitch(CodeGenFunction &CGF) const { 162 if (Untied) { 163 auto PartIdLVal = CGF.EmitLoadOfPointerLValue( 164 CGF.GetAddrOfLocalVar(PartIDVar), 165 PartIDVar->getType()->castAs<PointerType>()); 166 CGF.EmitStoreOfScalar(CGF.Builder.getInt32(UntiedSwitch->getNumCases()), 167 PartIdLVal); 168 UntiedCodeGen(CGF); 169 CodeGenFunction::JumpDest CurPoint = 170 CGF.getJumpDestInCurrentScope(".untied.next."); 171 CGF.EmitBranchThroughCleanup(CGF.ReturnBlock); 172 CGF.EmitBlock(CGF.createBasicBlock(".untied.jmp.")); 173 UntiedSwitch->addCase(CGF.Builder.getInt32(UntiedSwitch->getNumCases()), 174 CGF.Builder.GetInsertBlock()); 175 CGF.EmitBranchThroughCleanup(CurPoint); 176 CGF.EmitBlock(CurPoint.getBlock()); 177 } 178 } 179 unsigned getNumberOfParts() const { return UntiedSwitch->getNumCases(); } 180 }; 181 CGOpenMPTaskOutlinedRegionInfo(const CapturedStmt &CS, 182 const VarDecl *ThreadIDVar, 183 const RegionCodeGenTy &CodeGen, 184 OpenMPDirectiveKind Kind, bool HasCancel, 185 const UntiedTaskActionTy &Action) 186 : CGOpenMPRegionInfo(CS, TaskOutlinedRegion, CodeGen, Kind, HasCancel), 187 ThreadIDVar(ThreadIDVar), Action(Action) { 188 assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region."); 189 } 190 191 /// \brief Get a variable or parameter for storing global thread id 192 /// inside OpenMP construct. 193 const VarDecl *getThreadIDVariable() const override { return ThreadIDVar; } 194 195 /// \brief Get an LValue for the current ThreadID variable. 196 LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override; 197 198 /// \brief Get the name of the capture helper. 199 StringRef getHelperName() const override { return ".omp_outlined."; } 200 201 void emitUntiedSwitch(CodeGenFunction &CGF) override { 202 Action.emitUntiedSwitch(CGF); 203 } 204 205 static bool classof(const CGCapturedStmtInfo *Info) { 206 return CGOpenMPRegionInfo::classof(Info) && 207 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == 208 TaskOutlinedRegion; 209 } 210 211 private: 212 /// \brief A variable or parameter storing global thread id for OpenMP 213 /// constructs. 214 const VarDecl *ThreadIDVar; 215 /// Action for emitting code for untied tasks. 216 const UntiedTaskActionTy &Action; 217 }; 218 219 /// \brief API for inlined captured statement code generation in OpenMP 220 /// constructs. 221 class CGOpenMPInlinedRegionInfo : public CGOpenMPRegionInfo { 222 public: 223 CGOpenMPInlinedRegionInfo(CodeGenFunction::CGCapturedStmtInfo *OldCSI, 224 const RegionCodeGenTy &CodeGen, 225 OpenMPDirectiveKind Kind, bool HasCancel) 226 : CGOpenMPRegionInfo(InlinedRegion, CodeGen, Kind, HasCancel), 227 OldCSI(OldCSI), 228 OuterRegionInfo(dyn_cast_or_null<CGOpenMPRegionInfo>(OldCSI)) {} 229 230 // \brief Retrieve the value of the context parameter. 231 llvm::Value *getContextValue() const override { 232 if (OuterRegionInfo) 233 return OuterRegionInfo->getContextValue(); 234 llvm_unreachable("No context value for inlined OpenMP region"); 235 } 236 237 void setContextValue(llvm::Value *V) override { 238 if (OuterRegionInfo) { 239 OuterRegionInfo->setContextValue(V); 240 return; 241 } 242 llvm_unreachable("No context value for inlined OpenMP region"); 243 } 244 245 /// \brief Lookup the captured field decl for a variable. 246 const FieldDecl *lookup(const VarDecl *VD) const override { 247 if (OuterRegionInfo) 248 return OuterRegionInfo->lookup(VD); 249 // If there is no outer outlined region,no need to lookup in a list of 250 // captured variables, we can use the original one. 251 return nullptr; 252 } 253 254 FieldDecl *getThisFieldDecl() const override { 255 if (OuterRegionInfo) 256 return OuterRegionInfo->getThisFieldDecl(); 257 return nullptr; 258 } 259 260 /// \brief Get a variable or parameter for storing global thread id 261 /// inside OpenMP construct. 262 const VarDecl *getThreadIDVariable() const override { 263 if (OuterRegionInfo) 264 return OuterRegionInfo->getThreadIDVariable(); 265 return nullptr; 266 } 267 268 /// \brief Get an LValue for the current ThreadID variable. 269 LValue getThreadIDVariableLValue(CodeGenFunction &CGF) override { 270 if (OuterRegionInfo) 271 return OuterRegionInfo->getThreadIDVariableLValue(CGF); 272 llvm_unreachable("No LValue for inlined OpenMP construct"); 273 } 274 275 /// \brief Get the name of the capture helper. 276 StringRef getHelperName() const override { 277 if (auto *OuterRegionInfo = getOldCSI()) 278 return OuterRegionInfo->getHelperName(); 279 llvm_unreachable("No helper name for inlined OpenMP construct"); 280 } 281 282 void emitUntiedSwitch(CodeGenFunction &CGF) override { 283 if (OuterRegionInfo) 284 OuterRegionInfo->emitUntiedSwitch(CGF); 285 } 286 287 CodeGenFunction::CGCapturedStmtInfo *getOldCSI() const { return OldCSI; } 288 289 static bool classof(const CGCapturedStmtInfo *Info) { 290 return CGOpenMPRegionInfo::classof(Info) && 291 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == InlinedRegion; 292 } 293 294 ~CGOpenMPInlinedRegionInfo() override = default; 295 296 private: 297 /// \brief CodeGen info about outer OpenMP region. 298 CodeGenFunction::CGCapturedStmtInfo *OldCSI; 299 CGOpenMPRegionInfo *OuterRegionInfo; 300 }; 301 302 /// \brief API for captured statement code generation in OpenMP target 303 /// constructs. For this captures, implicit parameters are used instead of the 304 /// captured fields. The name of the target region has to be unique in a given 305 /// application so it is provided by the client, because only the client has 306 /// the information to generate that. 307 class CGOpenMPTargetRegionInfo final : public CGOpenMPRegionInfo { 308 public: 309 CGOpenMPTargetRegionInfo(const CapturedStmt &CS, 310 const RegionCodeGenTy &CodeGen, StringRef HelperName) 311 : CGOpenMPRegionInfo(CS, TargetRegion, CodeGen, OMPD_target, 312 /*HasCancel=*/false), 313 HelperName(HelperName) {} 314 315 /// \brief This is unused for target regions because each starts executing 316 /// with a single thread. 317 const VarDecl *getThreadIDVariable() const override { return nullptr; } 318 319 /// \brief Get the name of the capture helper. 320 StringRef getHelperName() const override { return HelperName; } 321 322 static bool classof(const CGCapturedStmtInfo *Info) { 323 return CGOpenMPRegionInfo::classof(Info) && 324 cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == TargetRegion; 325 } 326 327 private: 328 StringRef HelperName; 329 }; 330 331 static void EmptyCodeGen(CodeGenFunction &, PrePostActionTy &) { 332 llvm_unreachable("No codegen for expressions"); 333 } 334 /// \brief API for generation of expressions captured in a innermost OpenMP 335 /// region. 336 class CGOpenMPInnerExprInfo final : public CGOpenMPInlinedRegionInfo { 337 public: 338 CGOpenMPInnerExprInfo(CodeGenFunction &CGF, const CapturedStmt &CS) 339 : CGOpenMPInlinedRegionInfo(CGF.CapturedStmtInfo, EmptyCodeGen, 340 OMPD_unknown, 341 /*HasCancel=*/false), 342 PrivScope(CGF) { 343 // Make sure the globals captured in the provided statement are local by 344 // using the privatization logic. We assume the same variable is not 345 // captured more than once. 346 for (auto &C : CS.captures()) { 347 if (!C.capturesVariable() && !C.capturesVariableByCopy()) 348 continue; 349 350 const VarDecl *VD = C.getCapturedVar(); 351 if (VD->isLocalVarDeclOrParm()) 352 continue; 353 354 DeclRefExpr DRE(const_cast<VarDecl *>(VD), 355 /*RefersToEnclosingVariableOrCapture=*/false, 356 VD->getType().getNonReferenceType(), VK_LValue, 357 SourceLocation()); 358 PrivScope.addPrivate(VD, [&CGF, &DRE]() -> Address { 359 return CGF.EmitLValue(&DRE).getAddress(); 360 }); 361 } 362 (void)PrivScope.Privatize(); 363 } 364 365 /// \brief Lookup the captured field decl for a variable. 366 const FieldDecl *lookup(const VarDecl *VD) const override { 367 if (auto *FD = CGOpenMPInlinedRegionInfo::lookup(VD)) 368 return FD; 369 return nullptr; 370 } 371 372 /// \brief Emit the captured statement body. 373 void EmitBody(CodeGenFunction &CGF, const Stmt *S) override { 374 llvm_unreachable("No body for expressions"); 375 } 376 377 /// \brief Get a variable or parameter for storing global thread id 378 /// inside OpenMP construct. 379 const VarDecl *getThreadIDVariable() const override { 380 llvm_unreachable("No thread id for expressions"); 381 } 382 383 /// \brief Get the name of the capture helper. 384 StringRef getHelperName() const override { 385 llvm_unreachable("No helper name for expressions"); 386 } 387 388 static bool classof(const CGCapturedStmtInfo *Info) { return false; } 389 390 private: 391 /// Private scope to capture global variables. 392 CodeGenFunction::OMPPrivateScope PrivScope; 393 }; 394 395 /// \brief RAII for emitting code of OpenMP constructs. 396 class InlinedOpenMPRegionRAII { 397 CodeGenFunction &CGF; 398 llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields; 399 FieldDecl *LambdaThisCaptureField = nullptr; 400 const CodeGen::CGBlockInfo *BlockInfo = nullptr; 401 402 public: 403 /// \brief Constructs region for combined constructs. 404 /// \param CodeGen Code generation sequence for combined directives. Includes 405 /// a list of functions used for code generation of implicitly inlined 406 /// regions. 407 InlinedOpenMPRegionRAII(CodeGenFunction &CGF, const RegionCodeGenTy &CodeGen, 408 OpenMPDirectiveKind Kind, bool HasCancel) 409 : CGF(CGF) { 410 // Start emission for the construct. 411 CGF.CapturedStmtInfo = new CGOpenMPInlinedRegionInfo( 412 CGF.CapturedStmtInfo, CodeGen, Kind, HasCancel); 413 std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields); 414 LambdaThisCaptureField = CGF.LambdaThisCaptureField; 415 CGF.LambdaThisCaptureField = nullptr; 416 BlockInfo = CGF.BlockInfo; 417 CGF.BlockInfo = nullptr; 418 } 419 420 ~InlinedOpenMPRegionRAII() { 421 // Restore original CapturedStmtInfo only if we're done with code emission. 422 auto *OldCSI = 423 cast<CGOpenMPInlinedRegionInfo>(CGF.CapturedStmtInfo)->getOldCSI(); 424 delete CGF.CapturedStmtInfo; 425 CGF.CapturedStmtInfo = OldCSI; 426 std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields); 427 CGF.LambdaThisCaptureField = LambdaThisCaptureField; 428 CGF.BlockInfo = BlockInfo; 429 } 430 }; 431 432 /// \brief Values for bit flags used in the ident_t to describe the fields. 433 /// All enumeric elements are named and described in accordance with the code 434 /// from http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h 435 enum OpenMPLocationFlags : unsigned { 436 /// \brief Use trampoline for internal microtask. 437 OMP_IDENT_IMD = 0x01, 438 /// \brief Use c-style ident structure. 439 OMP_IDENT_KMPC = 0x02, 440 /// \brief Atomic reduction option for kmpc_reduce. 441 OMP_ATOMIC_REDUCE = 0x10, 442 /// \brief Explicit 'barrier' directive. 443 OMP_IDENT_BARRIER_EXPL = 0x20, 444 /// \brief Implicit barrier in code. 445 OMP_IDENT_BARRIER_IMPL = 0x40, 446 /// \brief Implicit barrier in 'for' directive. 447 OMP_IDENT_BARRIER_IMPL_FOR = 0x40, 448 /// \brief Implicit barrier in 'sections' directive. 449 OMP_IDENT_BARRIER_IMPL_SECTIONS = 0xC0, 450 /// \brief Implicit barrier in 'single' directive. 451 OMP_IDENT_BARRIER_IMPL_SINGLE = 0x140, 452 /// Call of __kmp_for_static_init for static loop. 453 OMP_IDENT_WORK_LOOP = 0x200, 454 /// Call of __kmp_for_static_init for sections. 455 OMP_IDENT_WORK_SECTIONS = 0x400, 456 /// Call of __kmp_for_static_init for distribute. 457 OMP_IDENT_WORK_DISTRIBUTE = 0x800, 458 LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_IDENT_WORK_DISTRIBUTE) 459 }; 460 461 /// \brief Describes ident structure that describes a source location. 462 /// All descriptions are taken from 463 /// http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h 464 /// Original structure: 465 /// typedef struct ident { 466 /// kmp_int32 reserved_1; /**< might be used in Fortran; 467 /// see above */ 468 /// kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags; 469 /// KMP_IDENT_KMPC identifies this union 470 /// member */ 471 /// kmp_int32 reserved_2; /**< not really used in Fortran any more; 472 /// see above */ 473 ///#if USE_ITT_BUILD 474 /// /* but currently used for storing 475 /// region-specific ITT */ 476 /// /* contextual information. */ 477 ///#endif /* USE_ITT_BUILD */ 478 /// kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for 479 /// C++ */ 480 /// char const *psource; /**< String describing the source location. 481 /// The string is composed of semi-colon separated 482 // fields which describe the source file, 483 /// the function and a pair of line numbers that 484 /// delimit the construct. 485 /// */ 486 /// } ident_t; 487 enum IdentFieldIndex { 488 /// \brief might be used in Fortran 489 IdentField_Reserved_1, 490 /// \brief OMP_IDENT_xxx flags; OMP_IDENT_KMPC identifies this union member. 491 IdentField_Flags, 492 /// \brief Not really used in Fortran any more 493 IdentField_Reserved_2, 494 /// \brief Source[4] in Fortran, do not use for C++ 495 IdentField_Reserved_3, 496 /// \brief String describing the source location. The string is composed of 497 /// semi-colon separated fields which describe the source file, the function 498 /// and a pair of line numbers that delimit the construct. 499 IdentField_PSource 500 }; 501 502 /// \brief Schedule types for 'omp for' loops (these enumerators are taken from 503 /// the enum sched_type in kmp.h). 504 enum OpenMPSchedType { 505 /// \brief Lower bound for default (unordered) versions. 506 OMP_sch_lower = 32, 507 OMP_sch_static_chunked = 33, 508 OMP_sch_static = 34, 509 OMP_sch_dynamic_chunked = 35, 510 OMP_sch_guided_chunked = 36, 511 OMP_sch_runtime = 37, 512 OMP_sch_auto = 38, 513 /// static with chunk adjustment (e.g., simd) 514 OMP_sch_static_balanced_chunked = 45, 515 /// \brief Lower bound for 'ordered' versions. 516 OMP_ord_lower = 64, 517 OMP_ord_static_chunked = 65, 518 OMP_ord_static = 66, 519 OMP_ord_dynamic_chunked = 67, 520 OMP_ord_guided_chunked = 68, 521 OMP_ord_runtime = 69, 522 OMP_ord_auto = 70, 523 OMP_sch_default = OMP_sch_static, 524 /// \brief dist_schedule types 525 OMP_dist_sch_static_chunked = 91, 526 OMP_dist_sch_static = 92, 527 /// Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers. 528 /// Set if the monotonic schedule modifier was present. 529 OMP_sch_modifier_monotonic = (1 << 29), 530 /// Set if the nonmonotonic schedule modifier was present. 531 OMP_sch_modifier_nonmonotonic = (1 << 30), 532 }; 533 534 enum OpenMPRTLFunction { 535 /// \brief Call to void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, 536 /// kmpc_micro microtask, ...); 537 OMPRTL__kmpc_fork_call, 538 /// \brief Call to void *__kmpc_threadprivate_cached(ident_t *loc, 539 /// kmp_int32 global_tid, void *data, size_t size, void ***cache); 540 OMPRTL__kmpc_threadprivate_cached, 541 /// \brief Call to void __kmpc_threadprivate_register( ident_t *, 542 /// void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor); 543 OMPRTL__kmpc_threadprivate_register, 544 // Call to __kmpc_int32 kmpc_global_thread_num(ident_t *loc); 545 OMPRTL__kmpc_global_thread_num, 546 // Call to void __kmpc_critical(ident_t *loc, kmp_int32 global_tid, 547 // kmp_critical_name *crit); 548 OMPRTL__kmpc_critical, 549 // Call to void __kmpc_critical_with_hint(ident_t *loc, kmp_int32 550 // global_tid, kmp_critical_name *crit, uintptr_t hint); 551 OMPRTL__kmpc_critical_with_hint, 552 // Call to void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid, 553 // kmp_critical_name *crit); 554 OMPRTL__kmpc_end_critical, 555 // Call to kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32 556 // global_tid); 557 OMPRTL__kmpc_cancel_barrier, 558 // Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid); 559 OMPRTL__kmpc_barrier, 560 // Call to void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid); 561 OMPRTL__kmpc_for_static_fini, 562 // Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32 563 // global_tid); 564 OMPRTL__kmpc_serialized_parallel, 565 // Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32 566 // global_tid); 567 OMPRTL__kmpc_end_serialized_parallel, 568 // Call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, 569 // kmp_int32 num_threads); 570 OMPRTL__kmpc_push_num_threads, 571 // Call to void __kmpc_flush(ident_t *loc); 572 OMPRTL__kmpc_flush, 573 // Call to kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid); 574 OMPRTL__kmpc_master, 575 // Call to void __kmpc_end_master(ident_t *, kmp_int32 global_tid); 576 OMPRTL__kmpc_end_master, 577 // Call to kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid, 578 // int end_part); 579 OMPRTL__kmpc_omp_taskyield, 580 // Call to kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid); 581 OMPRTL__kmpc_single, 582 // Call to void __kmpc_end_single(ident_t *, kmp_int32 global_tid); 583 OMPRTL__kmpc_end_single, 584 // Call to kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid, 585 // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, 586 // kmp_routine_entry_t *task_entry); 587 OMPRTL__kmpc_omp_task_alloc, 588 // Call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t * 589 // new_task); 590 OMPRTL__kmpc_omp_task, 591 // Call to void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid, 592 // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *), 593 // kmp_int32 didit); 594 OMPRTL__kmpc_copyprivate, 595 // Call to kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid, 596 // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void 597 // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck); 598 OMPRTL__kmpc_reduce, 599 // Call to kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32 600 // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, 601 // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name 602 // *lck); 603 OMPRTL__kmpc_reduce_nowait, 604 // Call to void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid, 605 // kmp_critical_name *lck); 606 OMPRTL__kmpc_end_reduce, 607 // Call to void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid, 608 // kmp_critical_name *lck); 609 OMPRTL__kmpc_end_reduce_nowait, 610 // Call to void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid, 611 // kmp_task_t * new_task); 612 OMPRTL__kmpc_omp_task_begin_if0, 613 // Call to void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid, 614 // kmp_task_t * new_task); 615 OMPRTL__kmpc_omp_task_complete_if0, 616 // Call to void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid); 617 OMPRTL__kmpc_ordered, 618 // Call to void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid); 619 OMPRTL__kmpc_end_ordered, 620 // Call to kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 621 // global_tid); 622 OMPRTL__kmpc_omp_taskwait, 623 // Call to void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid); 624 OMPRTL__kmpc_taskgroup, 625 // Call to void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid); 626 OMPRTL__kmpc_end_taskgroup, 627 // Call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid, 628 // int proc_bind); 629 OMPRTL__kmpc_push_proc_bind, 630 // Call to kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 631 // gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t 632 // *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list); 633 OMPRTL__kmpc_omp_task_with_deps, 634 // Call to void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 635 // gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 636 // ndeps_noalias, kmp_depend_info_t *noalias_dep_list); 637 OMPRTL__kmpc_omp_wait_deps, 638 // Call to kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32 639 // global_tid, kmp_int32 cncl_kind); 640 OMPRTL__kmpc_cancellationpoint, 641 // Call to kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid, 642 // kmp_int32 cncl_kind); 643 OMPRTL__kmpc_cancel, 644 // Call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid, 645 // kmp_int32 num_teams, kmp_int32 thread_limit); 646 OMPRTL__kmpc_push_num_teams, 647 // Call to void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro 648 // microtask, ...); 649 OMPRTL__kmpc_fork_teams, 650 // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int 651 // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int 652 // sched, kmp_uint64 grainsize, void *task_dup); 653 OMPRTL__kmpc_taskloop, 654 // Call to void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32 655 // num_dims, struct kmp_dim *dims); 656 OMPRTL__kmpc_doacross_init, 657 // Call to void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid); 658 OMPRTL__kmpc_doacross_fini, 659 // Call to void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64 660 // *vec); 661 OMPRTL__kmpc_doacross_post, 662 // Call to void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64 663 // *vec); 664 OMPRTL__kmpc_doacross_wait, 665 // Call to void *__kmpc_task_reduction_init(int gtid, int num_data, void 666 // *data); 667 OMPRTL__kmpc_task_reduction_init, 668 // Call to void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void 669 // *d); 670 OMPRTL__kmpc_task_reduction_get_th_data, 671 672 // 673 // Offloading related calls 674 // 675 // Call to int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t 676 // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t 677 // *arg_types); 678 OMPRTL__tgt_target, 679 // Call to int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr, 680 // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t 681 // *arg_types); 682 OMPRTL__tgt_target_nowait, 683 // Call to int32_t __tgt_target_teams(int64_t device_id, void *host_ptr, 684 // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t 685 // *arg_types, int32_t num_teams, int32_t thread_limit); 686 OMPRTL__tgt_target_teams, 687 // Call to int32_t __tgt_target_teams_nowait(int64_t device_id, void 688 // *host_ptr, int32_t arg_num, void** args_base, void **args, size_t 689 // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit); 690 OMPRTL__tgt_target_teams_nowait, 691 // Call to void __tgt_register_lib(__tgt_bin_desc *desc); 692 OMPRTL__tgt_register_lib, 693 // Call to void __tgt_unregister_lib(__tgt_bin_desc *desc); 694 OMPRTL__tgt_unregister_lib, 695 // Call to void __tgt_target_data_begin(int64_t device_id, int32_t arg_num, 696 // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types); 697 OMPRTL__tgt_target_data_begin, 698 // Call to void __tgt_target_data_begin_nowait(int64_t device_id, int32_t 699 // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t 700 // *arg_types); 701 OMPRTL__tgt_target_data_begin_nowait, 702 // Call to void __tgt_target_data_end(int64_t device_id, int32_t arg_num, 703 // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types); 704 OMPRTL__tgt_target_data_end, 705 // Call to void __tgt_target_data_end_nowait(int64_t device_id, int32_t 706 // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t 707 // *arg_types); 708 OMPRTL__tgt_target_data_end_nowait, 709 // Call to void __tgt_target_data_update(int64_t device_id, int32_t arg_num, 710 // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types); 711 OMPRTL__tgt_target_data_update, 712 // Call to void __tgt_target_data_update_nowait(int64_t device_id, int32_t 713 // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t 714 // *arg_types); 715 OMPRTL__tgt_target_data_update_nowait, 716 }; 717 718 /// A basic class for pre|post-action for advanced codegen sequence for OpenMP 719 /// region. 720 class CleanupTy final : public EHScopeStack::Cleanup { 721 PrePostActionTy *Action; 722 723 public: 724 explicit CleanupTy(PrePostActionTy *Action) : Action(Action) {} 725 void Emit(CodeGenFunction &CGF, Flags /*flags*/) override { 726 if (!CGF.HaveInsertPoint()) 727 return; 728 Action->Exit(CGF); 729 } 730 }; 731 732 } // anonymous namespace 733 734 void RegionCodeGenTy::operator()(CodeGenFunction &CGF) const { 735 CodeGenFunction::RunCleanupsScope Scope(CGF); 736 if (PrePostAction) { 737 CGF.EHStack.pushCleanup<CleanupTy>(NormalAndEHCleanup, PrePostAction); 738 Callback(CodeGen, CGF, *PrePostAction); 739 } else { 740 PrePostActionTy Action; 741 Callback(CodeGen, CGF, Action); 742 } 743 } 744 745 /// Check if the combiner is a call to UDR combiner and if it is so return the 746 /// UDR decl used for reduction. 747 static const OMPDeclareReductionDecl * 748 getReductionInit(const Expr *ReductionOp) { 749 if (auto *CE = dyn_cast<CallExpr>(ReductionOp)) 750 if (auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee())) 751 if (auto *DRE = 752 dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts())) 753 if (auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl())) 754 return DRD; 755 return nullptr; 756 } 757 758 static void emitInitWithReductionInitializer(CodeGenFunction &CGF, 759 const OMPDeclareReductionDecl *DRD, 760 const Expr *InitOp, 761 Address Private, Address Original, 762 QualType Ty) { 763 if (DRD->getInitializer()) { 764 std::pair<llvm::Function *, llvm::Function *> Reduction = 765 CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD); 766 auto *CE = cast<CallExpr>(InitOp); 767 auto *OVE = cast<OpaqueValueExpr>(CE->getCallee()); 768 const Expr *LHS = CE->getArg(/*Arg=*/0)->IgnoreParenImpCasts(); 769 const Expr *RHS = CE->getArg(/*Arg=*/1)->IgnoreParenImpCasts(); 770 auto *LHSDRE = cast<DeclRefExpr>(cast<UnaryOperator>(LHS)->getSubExpr()); 771 auto *RHSDRE = cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr()); 772 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 773 PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()), 774 [=]() -> Address { return Private; }); 775 PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()), 776 [=]() -> Address { return Original; }); 777 (void)PrivateScope.Privatize(); 778 RValue Func = RValue::get(Reduction.second); 779 CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func); 780 CGF.EmitIgnoredExpr(InitOp); 781 } else { 782 llvm::Constant *Init = CGF.CGM.EmitNullConstant(Ty); 783 auto *GV = new llvm::GlobalVariable( 784 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true, 785 llvm::GlobalValue::PrivateLinkage, Init, ".init"); 786 LValue LV = CGF.MakeNaturalAlignAddrLValue(GV, Ty); 787 RValue InitRVal; 788 switch (CGF.getEvaluationKind(Ty)) { 789 case TEK_Scalar: 790 InitRVal = CGF.EmitLoadOfLValue(LV, SourceLocation()); 791 break; 792 case TEK_Complex: 793 InitRVal = 794 RValue::getComplex(CGF.EmitLoadOfComplex(LV, SourceLocation())); 795 break; 796 case TEK_Aggregate: 797 InitRVal = RValue::getAggregate(LV.getAddress()); 798 break; 799 } 800 OpaqueValueExpr OVE(SourceLocation(), Ty, VK_RValue); 801 CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal); 802 CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(), 803 /*IsInitializer=*/false); 804 } 805 } 806 807 /// \brief Emit initialization of arrays of complex types. 808 /// \param DestAddr Address of the array. 809 /// \param Type Type of array. 810 /// \param Init Initial expression of array. 811 /// \param SrcAddr Address of the original array. 812 static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr, 813 QualType Type, bool EmitDeclareReductionInit, 814 const Expr *Init, 815 const OMPDeclareReductionDecl *DRD, 816 Address SrcAddr = Address::invalid()) { 817 // Perform element-by-element initialization. 818 QualType ElementTy; 819 820 // Drill down to the base element type on both arrays. 821 auto ArrayTy = Type->getAsArrayTypeUnsafe(); 822 auto NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr); 823 DestAddr = 824 CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType()); 825 if (DRD) 826 SrcAddr = 827 CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType()); 828 829 llvm::Value *SrcBegin = nullptr; 830 if (DRD) 831 SrcBegin = SrcAddr.getPointer(); 832 auto DestBegin = DestAddr.getPointer(); 833 // Cast from pointer to array type to pointer to single element. 834 auto DestEnd = CGF.Builder.CreateGEP(DestBegin, NumElements); 835 // The basic structure here is a while-do loop. 836 auto BodyBB = CGF.createBasicBlock("omp.arrayinit.body"); 837 auto DoneBB = CGF.createBasicBlock("omp.arrayinit.done"); 838 auto IsEmpty = 839 CGF.Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arrayinit.isempty"); 840 CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); 841 842 // Enter the loop body, making that address the current address. 843 auto EntryBB = CGF.Builder.GetInsertBlock(); 844 CGF.EmitBlock(BodyBB); 845 846 CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy); 847 848 llvm::PHINode *SrcElementPHI = nullptr; 849 Address SrcElementCurrent = Address::invalid(); 850 if (DRD) { 851 SrcElementPHI = CGF.Builder.CreatePHI(SrcBegin->getType(), 2, 852 "omp.arraycpy.srcElementPast"); 853 SrcElementPHI->addIncoming(SrcBegin, EntryBB); 854 SrcElementCurrent = 855 Address(SrcElementPHI, 856 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 857 } 858 llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI( 859 DestBegin->getType(), 2, "omp.arraycpy.destElementPast"); 860 DestElementPHI->addIncoming(DestBegin, EntryBB); 861 Address DestElementCurrent = 862 Address(DestElementPHI, 863 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 864 865 // Emit copy. 866 { 867 CodeGenFunction::RunCleanupsScope InitScope(CGF); 868 if (EmitDeclareReductionInit) { 869 emitInitWithReductionInitializer(CGF, DRD, Init, DestElementCurrent, 870 SrcElementCurrent, ElementTy); 871 } else 872 CGF.EmitAnyExprToMem(Init, DestElementCurrent, ElementTy.getQualifiers(), 873 /*IsInitializer=*/false); 874 } 875 876 if (DRD) { 877 // Shift the address forward by one element. 878 auto SrcElementNext = CGF.Builder.CreateConstGEP1_32( 879 SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element"); 880 SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock()); 881 } 882 883 // Shift the address forward by one element. 884 auto DestElementNext = CGF.Builder.CreateConstGEP1_32( 885 DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element"); 886 // Check whether we've reached the end. 887 auto Done = 888 CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done"); 889 CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB); 890 DestElementPHI->addIncoming(DestElementNext, CGF.Builder.GetInsertBlock()); 891 892 // Done. 893 CGF.EmitBlock(DoneBB, /*IsFinished=*/true); 894 } 895 896 LValue ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, const Expr *E) { 897 return CGF.EmitOMPSharedLValue(E); 898 } 899 900 LValue ReductionCodeGen::emitSharedLValueUB(CodeGenFunction &CGF, 901 const Expr *E) { 902 if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(E)) 903 return CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false); 904 return LValue(); 905 } 906 907 void ReductionCodeGen::emitAggregateInitialization( 908 CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal, 909 const OMPDeclareReductionDecl *DRD) { 910 // Emit VarDecl with copy init for arrays. 911 // Get the address of the original variable captured in current 912 // captured region. 913 auto *PrivateVD = 914 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl()); 915 bool EmitDeclareReductionInit = 916 DRD && (DRD->getInitializer() || !PrivateVD->hasInit()); 917 EmitOMPAggregateInit(CGF, PrivateAddr, PrivateVD->getType(), 918 EmitDeclareReductionInit, 919 EmitDeclareReductionInit ? ClausesData[N].ReductionOp 920 : PrivateVD->getInit(), 921 DRD, SharedLVal.getAddress()); 922 } 923 924 ReductionCodeGen::ReductionCodeGen(ArrayRef<const Expr *> Shareds, 925 ArrayRef<const Expr *> Privates, 926 ArrayRef<const Expr *> ReductionOps) { 927 ClausesData.reserve(Shareds.size()); 928 SharedAddresses.reserve(Shareds.size()); 929 Sizes.reserve(Shareds.size()); 930 BaseDecls.reserve(Shareds.size()); 931 auto IPriv = Privates.begin(); 932 auto IRed = ReductionOps.begin(); 933 for (const auto *Ref : Shareds) { 934 ClausesData.emplace_back(Ref, *IPriv, *IRed); 935 std::advance(IPriv, 1); 936 std::advance(IRed, 1); 937 } 938 } 939 940 void ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, unsigned N) { 941 assert(SharedAddresses.size() == N && 942 "Number of generated lvalues must be exactly N."); 943 LValue First = emitSharedLValue(CGF, ClausesData[N].Ref); 944 LValue Second = emitSharedLValueUB(CGF, ClausesData[N].Ref); 945 SharedAddresses.emplace_back(First, Second); 946 } 947 948 void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) { 949 auto *PrivateVD = 950 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl()); 951 QualType PrivateType = PrivateVD->getType(); 952 bool AsArraySection = isa<OMPArraySectionExpr>(ClausesData[N].Ref); 953 if (!PrivateType->isVariablyModifiedType()) { 954 Sizes.emplace_back( 955 CGF.getTypeSize( 956 SharedAddresses[N].first.getType().getNonReferenceType()), 957 nullptr); 958 return; 959 } 960 llvm::Value *Size; 961 llvm::Value *SizeInChars; 962 llvm::Type *ElemType = 963 cast<llvm::PointerType>(SharedAddresses[N].first.getPointer()->getType()) 964 ->getElementType(); 965 auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType); 966 if (AsArraySection) { 967 Size = CGF.Builder.CreatePtrDiff(SharedAddresses[N].second.getPointer(), 968 SharedAddresses[N].first.getPointer()); 969 Size = CGF.Builder.CreateNUWAdd( 970 Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1)); 971 SizeInChars = CGF.Builder.CreateNUWMul(Size, ElemSizeOf); 972 } else { 973 SizeInChars = CGF.getTypeSize( 974 SharedAddresses[N].first.getType().getNonReferenceType()); 975 Size = CGF.Builder.CreateExactUDiv(SizeInChars, ElemSizeOf); 976 } 977 Sizes.emplace_back(SizeInChars, Size); 978 CodeGenFunction::OpaqueValueMapping OpaqueMap( 979 CGF, 980 cast<OpaqueValueExpr>( 981 CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()), 982 RValue::get(Size)); 983 CGF.EmitVariablyModifiedType(PrivateType); 984 } 985 986 void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N, 987 llvm::Value *Size) { 988 auto *PrivateVD = 989 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl()); 990 QualType PrivateType = PrivateVD->getType(); 991 if (!PrivateType->isVariablyModifiedType()) { 992 assert(!Size && !Sizes[N].second && 993 "Size should be nullptr for non-variably modified reduction " 994 "items."); 995 return; 996 } 997 CodeGenFunction::OpaqueValueMapping OpaqueMap( 998 CGF, 999 cast<OpaqueValueExpr>( 1000 CGF.getContext().getAsVariableArrayType(PrivateType)->getSizeExpr()), 1001 RValue::get(Size)); 1002 CGF.EmitVariablyModifiedType(PrivateType); 1003 } 1004 1005 void ReductionCodeGen::emitInitialization( 1006 CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal, 1007 llvm::function_ref<bool(CodeGenFunction &)> DefaultInit) { 1008 assert(SharedAddresses.size() > N && "No variable was generated"); 1009 auto *PrivateVD = 1010 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl()); 1011 auto *DRD = getReductionInit(ClausesData[N].ReductionOp); 1012 QualType PrivateType = PrivateVD->getType(); 1013 PrivateAddr = CGF.Builder.CreateElementBitCast( 1014 PrivateAddr, CGF.ConvertTypeForMem(PrivateType)); 1015 QualType SharedType = SharedAddresses[N].first.getType(); 1016 SharedLVal = CGF.MakeAddrLValue( 1017 CGF.Builder.CreateElementBitCast(SharedLVal.getAddress(), 1018 CGF.ConvertTypeForMem(SharedType)), 1019 SharedType, SharedAddresses[N].first.getBaseInfo(), 1020 CGF.CGM.getTBAAInfoForSubobject(SharedAddresses[N].first, SharedType)); 1021 if (CGF.getContext().getAsArrayType(PrivateVD->getType())) { 1022 emitAggregateInitialization(CGF, N, PrivateAddr, SharedLVal, DRD); 1023 } else if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) { 1024 emitInitWithReductionInitializer(CGF, DRD, ClausesData[N].ReductionOp, 1025 PrivateAddr, SharedLVal.getAddress(), 1026 SharedLVal.getType()); 1027 } else if (!DefaultInit(CGF) && PrivateVD->hasInit() && 1028 !CGF.isTrivialInitializer(PrivateVD->getInit())) { 1029 CGF.EmitAnyExprToMem(PrivateVD->getInit(), PrivateAddr, 1030 PrivateVD->getType().getQualifiers(), 1031 /*IsInitializer=*/false); 1032 } 1033 } 1034 1035 bool ReductionCodeGen::needCleanups(unsigned N) { 1036 auto *PrivateVD = 1037 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl()); 1038 QualType PrivateType = PrivateVD->getType(); 1039 QualType::DestructionKind DTorKind = PrivateType.isDestructedType(); 1040 return DTorKind != QualType::DK_none; 1041 } 1042 1043 void ReductionCodeGen::emitCleanups(CodeGenFunction &CGF, unsigned N, 1044 Address PrivateAddr) { 1045 auto *PrivateVD = 1046 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl()); 1047 QualType PrivateType = PrivateVD->getType(); 1048 QualType::DestructionKind DTorKind = PrivateType.isDestructedType(); 1049 if (needCleanups(N)) { 1050 PrivateAddr = CGF.Builder.CreateElementBitCast( 1051 PrivateAddr, CGF.ConvertTypeForMem(PrivateType)); 1052 CGF.pushDestroy(DTorKind, PrivateAddr, PrivateType); 1053 } 1054 } 1055 1056 static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy, 1057 LValue BaseLV) { 1058 BaseTy = BaseTy.getNonReferenceType(); 1059 while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) && 1060 !CGF.getContext().hasSameType(BaseTy, ElTy)) { 1061 if (auto *PtrTy = BaseTy->getAs<PointerType>()) 1062 BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(), PtrTy); 1063 else { 1064 LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(), BaseTy); 1065 BaseLV = CGF.EmitLoadOfReferenceLValue(RefLVal); 1066 } 1067 BaseTy = BaseTy->getPointeeType(); 1068 } 1069 return CGF.MakeAddrLValue( 1070 CGF.Builder.CreateElementBitCast(BaseLV.getAddress(), 1071 CGF.ConvertTypeForMem(ElTy)), 1072 BaseLV.getType(), BaseLV.getBaseInfo(), 1073 CGF.CGM.getTBAAInfoForSubobject(BaseLV, BaseLV.getType())); 1074 } 1075 1076 static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy, 1077 llvm::Type *BaseLVType, CharUnits BaseLVAlignment, 1078 llvm::Value *Addr) { 1079 Address Tmp = Address::invalid(); 1080 Address TopTmp = Address::invalid(); 1081 Address MostTopTmp = Address::invalid(); 1082 BaseTy = BaseTy.getNonReferenceType(); 1083 while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) && 1084 !CGF.getContext().hasSameType(BaseTy, ElTy)) { 1085 Tmp = CGF.CreateMemTemp(BaseTy); 1086 if (TopTmp.isValid()) 1087 CGF.Builder.CreateStore(Tmp.getPointer(), TopTmp); 1088 else 1089 MostTopTmp = Tmp; 1090 TopTmp = Tmp; 1091 BaseTy = BaseTy->getPointeeType(); 1092 } 1093 llvm::Type *Ty = BaseLVType; 1094 if (Tmp.isValid()) 1095 Ty = Tmp.getElementType(); 1096 Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty); 1097 if (Tmp.isValid()) { 1098 CGF.Builder.CreateStore(Addr, Tmp); 1099 return MostTopTmp; 1100 } 1101 return Address(Addr, BaseLVAlignment); 1102 } 1103 1104 Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N, 1105 Address PrivateAddr) { 1106 const DeclRefExpr *DE; 1107 const VarDecl *OrigVD = nullptr; 1108 if (auto *OASE = dyn_cast<OMPArraySectionExpr>(ClausesData[N].Ref)) { 1109 auto *Base = OASE->getBase()->IgnoreParenImpCasts(); 1110 while (auto *TempOASE = dyn_cast<OMPArraySectionExpr>(Base)) 1111 Base = TempOASE->getBase()->IgnoreParenImpCasts(); 1112 while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base)) 1113 Base = TempASE->getBase()->IgnoreParenImpCasts(); 1114 DE = cast<DeclRefExpr>(Base); 1115 OrigVD = cast<VarDecl>(DE->getDecl()); 1116 } else if (auto *ASE = dyn_cast<ArraySubscriptExpr>(ClausesData[N].Ref)) { 1117 auto *Base = ASE->getBase()->IgnoreParenImpCasts(); 1118 while (auto *TempASE = dyn_cast<ArraySubscriptExpr>(Base)) 1119 Base = TempASE->getBase()->IgnoreParenImpCasts(); 1120 DE = cast<DeclRefExpr>(Base); 1121 OrigVD = cast<VarDecl>(DE->getDecl()); 1122 } 1123 if (OrigVD) { 1124 BaseDecls.emplace_back(OrigVD); 1125 auto OriginalBaseLValue = CGF.EmitLValue(DE); 1126 LValue BaseLValue = 1127 loadToBegin(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(), 1128 OriginalBaseLValue); 1129 llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff( 1130 BaseLValue.getPointer(), SharedAddresses[N].first.getPointer()); 1131 llvm::Value *PrivatePointer = 1132 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 1133 PrivateAddr.getPointer(), 1134 SharedAddresses[N].first.getAddress().getType()); 1135 llvm::Value *Ptr = CGF.Builder.CreateGEP(PrivatePointer, Adjustment); 1136 return castToBase(CGF, OrigVD->getType(), 1137 SharedAddresses[N].first.getType(), 1138 OriginalBaseLValue.getAddress().getType(), 1139 OriginalBaseLValue.getAlignment(), Ptr); 1140 } 1141 BaseDecls.emplace_back( 1142 cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Ref)->getDecl())); 1143 return PrivateAddr; 1144 } 1145 1146 bool ReductionCodeGen::usesReductionInitializer(unsigned N) const { 1147 auto *DRD = getReductionInit(ClausesData[N].ReductionOp); 1148 return DRD && DRD->getInitializer(); 1149 } 1150 1151 LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) { 1152 return CGF.EmitLoadOfPointerLValue( 1153 CGF.GetAddrOfLocalVar(getThreadIDVariable()), 1154 getThreadIDVariable()->getType()->castAs<PointerType>()); 1155 } 1156 1157 void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt * /*S*/) { 1158 if (!CGF.HaveInsertPoint()) 1159 return; 1160 // 1.2.2 OpenMP Language Terminology 1161 // Structured block - An executable statement with a single entry at the 1162 // top and a single exit at the bottom. 1163 // The point of exit cannot be a branch out of the structured block. 1164 // longjmp() and throw() must not violate the entry/exit criteria. 1165 CGF.EHStack.pushTerminate(); 1166 CodeGen(CGF); 1167 CGF.EHStack.popTerminate(); 1168 } 1169 1170 LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue( 1171 CodeGenFunction &CGF) { 1172 return CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(getThreadIDVariable()), 1173 getThreadIDVariable()->getType(), 1174 AlignmentSource::Decl); 1175 } 1176 1177 CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM) 1178 : CGM(CGM), OffloadEntriesInfoManager(CGM) { 1179 IdentTy = llvm::StructType::create( 1180 "ident_t", CGM.Int32Ty /* reserved_1 */, CGM.Int32Ty /* flags */, 1181 CGM.Int32Ty /* reserved_2 */, CGM.Int32Ty /* reserved_3 */, 1182 CGM.Int8PtrTy /* psource */); 1183 KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8); 1184 1185 loadOffloadInfoMetadata(); 1186 } 1187 1188 void CGOpenMPRuntime::clear() { 1189 InternalVars.clear(); 1190 } 1191 1192 static llvm::Function * 1193 emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty, 1194 const Expr *CombinerInitializer, const VarDecl *In, 1195 const VarDecl *Out, bool IsCombiner) { 1196 // void .omp_combiner.(Ty *in, Ty *out); 1197 auto &C = CGM.getContext(); 1198 QualType PtrTy = C.getPointerType(Ty).withRestrict(); 1199 FunctionArgList Args; 1200 ImplicitParamDecl OmpOutParm(C, /*DC=*/nullptr, Out->getLocation(), 1201 /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other); 1202 ImplicitParamDecl OmpInParm(C, /*DC=*/nullptr, In->getLocation(), 1203 /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other); 1204 Args.push_back(&OmpOutParm); 1205 Args.push_back(&OmpInParm); 1206 auto &FnInfo = 1207 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 1208 auto *FnTy = CGM.getTypes().GetFunctionType(FnInfo); 1209 auto *Fn = llvm::Function::Create( 1210 FnTy, llvm::GlobalValue::InternalLinkage, 1211 IsCombiner ? ".omp_combiner." : ".omp_initializer.", &CGM.getModule()); 1212 CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, FnInfo); 1213 Fn->removeFnAttr(llvm::Attribute::NoInline); 1214 Fn->removeFnAttr(llvm::Attribute::OptimizeNone); 1215 Fn->addFnAttr(llvm::Attribute::AlwaysInline); 1216 CodeGenFunction CGF(CGM); 1217 // Map "T omp_in;" variable to "*omp_in_parm" value in all expressions. 1218 // Map "T omp_out;" variable to "*omp_out_parm" value in all expressions. 1219 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, In->getLocation(), 1220 Out->getLocation()); 1221 CodeGenFunction::OMPPrivateScope Scope(CGF); 1222 Address AddrIn = CGF.GetAddrOfLocalVar(&OmpInParm); 1223 Scope.addPrivate(In, [&CGF, AddrIn, PtrTy]() -> Address { 1224 return CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs<PointerType>()) 1225 .getAddress(); 1226 }); 1227 Address AddrOut = CGF.GetAddrOfLocalVar(&OmpOutParm); 1228 Scope.addPrivate(Out, [&CGF, AddrOut, PtrTy]() -> Address { 1229 return CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs<PointerType>()) 1230 .getAddress(); 1231 }); 1232 (void)Scope.Privatize(); 1233 if (!IsCombiner && Out->hasInit() && 1234 !CGF.isTrivialInitializer(Out->getInit())) { 1235 CGF.EmitAnyExprToMem(Out->getInit(), CGF.GetAddrOfLocalVar(Out), 1236 Out->getType().getQualifiers(), 1237 /*IsInitializer=*/true); 1238 } 1239 if (CombinerInitializer) 1240 CGF.EmitIgnoredExpr(CombinerInitializer); 1241 Scope.ForceCleanup(); 1242 CGF.FinishFunction(); 1243 return Fn; 1244 } 1245 1246 void CGOpenMPRuntime::emitUserDefinedReduction( 1247 CodeGenFunction *CGF, const OMPDeclareReductionDecl *D) { 1248 if (UDRMap.count(D) > 0) 1249 return; 1250 auto &C = CGM.getContext(); 1251 if (!In || !Out) { 1252 In = &C.Idents.get("omp_in"); 1253 Out = &C.Idents.get("omp_out"); 1254 } 1255 llvm::Function *Combiner = emitCombinerOrInitializer( 1256 CGM, D->getType(), D->getCombiner(), cast<VarDecl>(D->lookup(In).front()), 1257 cast<VarDecl>(D->lookup(Out).front()), 1258 /*IsCombiner=*/true); 1259 llvm::Function *Initializer = nullptr; 1260 if (auto *Init = D->getInitializer()) { 1261 if (!Priv || !Orig) { 1262 Priv = &C.Idents.get("omp_priv"); 1263 Orig = &C.Idents.get("omp_orig"); 1264 } 1265 Initializer = emitCombinerOrInitializer( 1266 CGM, D->getType(), 1267 D->getInitializerKind() == OMPDeclareReductionDecl::CallInit ? Init 1268 : nullptr, 1269 cast<VarDecl>(D->lookup(Orig).front()), 1270 cast<VarDecl>(D->lookup(Priv).front()), 1271 /*IsCombiner=*/false); 1272 } 1273 UDRMap.insert(std::make_pair(D, std::make_pair(Combiner, Initializer))); 1274 if (CGF) { 1275 auto &Decls = FunctionUDRMap.FindAndConstruct(CGF->CurFn); 1276 Decls.second.push_back(D); 1277 } 1278 } 1279 1280 std::pair<llvm::Function *, llvm::Function *> 1281 CGOpenMPRuntime::getUserDefinedReduction(const OMPDeclareReductionDecl *D) { 1282 auto I = UDRMap.find(D); 1283 if (I != UDRMap.end()) 1284 return I->second; 1285 emitUserDefinedReduction(/*CGF=*/nullptr, D); 1286 return UDRMap.lookup(D); 1287 } 1288 1289 // Layout information for ident_t. 1290 static CharUnits getIdentAlign(CodeGenModule &CGM) { 1291 return CGM.getPointerAlign(); 1292 } 1293 static CharUnits getIdentSize(CodeGenModule &CGM) { 1294 assert((4 * CGM.getPointerSize()).isMultipleOf(CGM.getPointerAlign())); 1295 return CharUnits::fromQuantity(16) + CGM.getPointerSize(); 1296 } 1297 static CharUnits getOffsetOfIdentField(IdentFieldIndex Field) { 1298 // All the fields except the last are i32, so this works beautifully. 1299 return unsigned(Field) * CharUnits::fromQuantity(4); 1300 } 1301 static Address createIdentFieldGEP(CodeGenFunction &CGF, Address Addr, 1302 IdentFieldIndex Field, 1303 const llvm::Twine &Name = "") { 1304 auto Offset = getOffsetOfIdentField(Field); 1305 return CGF.Builder.CreateStructGEP(Addr, Field, Offset, Name); 1306 } 1307 1308 static llvm::Value *emitParallelOrTeamsOutlinedFunction( 1309 CodeGenModule &CGM, const OMPExecutableDirective &D, const CapturedStmt *CS, 1310 const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, 1311 const StringRef OutlinedHelperName, const RegionCodeGenTy &CodeGen) { 1312 assert(ThreadIDVar->getType()->isPointerType() && 1313 "thread id variable must be of type kmp_int32 *"); 1314 CodeGenFunction CGF(CGM, true); 1315 bool HasCancel = false; 1316 if (auto *OPD = dyn_cast<OMPParallelDirective>(&D)) 1317 HasCancel = OPD->hasCancel(); 1318 else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&D)) 1319 HasCancel = OPSD->hasCancel(); 1320 else if (auto *OPFD = dyn_cast<OMPParallelForDirective>(&D)) 1321 HasCancel = OPFD->hasCancel(); 1322 else if (auto *OPFD = dyn_cast<OMPTargetParallelForDirective>(&D)) 1323 HasCancel = OPFD->hasCancel(); 1324 else if (auto *OPFD = dyn_cast<OMPDistributeParallelForDirective>(&D)) 1325 HasCancel = OPFD->hasCancel(); 1326 else if (auto *OPFD = dyn_cast<OMPTeamsDistributeParallelForDirective>(&D)) 1327 HasCancel = OPFD->hasCancel(); 1328 else if (auto *OPFD = 1329 dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&D)) 1330 HasCancel = OPFD->hasCancel(); 1331 CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind, 1332 HasCancel, OutlinedHelperName); 1333 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo); 1334 return CGF.GenerateOpenMPCapturedStmtFunction(*CS); 1335 } 1336 1337 llvm::Value *CGOpenMPRuntime::emitParallelOutlinedFunction( 1338 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, 1339 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) { 1340 const CapturedStmt *CS = D.getCapturedStmt(OMPD_parallel); 1341 return emitParallelOrTeamsOutlinedFunction( 1342 CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen); 1343 } 1344 1345 llvm::Value *CGOpenMPRuntime::emitTeamsOutlinedFunction( 1346 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, 1347 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) { 1348 const CapturedStmt *CS = D.getCapturedStmt(OMPD_teams); 1349 return emitParallelOrTeamsOutlinedFunction( 1350 CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen); 1351 } 1352 1353 llvm::Value *CGOpenMPRuntime::emitTaskOutlinedFunction( 1354 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, 1355 const VarDecl *PartIDVar, const VarDecl *TaskTVar, 1356 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, 1357 bool Tied, unsigned &NumberOfParts) { 1358 auto &&UntiedCodeGen = [this, &D, TaskTVar](CodeGenFunction &CGF, 1359 PrePostActionTy &) { 1360 auto *ThreadID = getThreadID(CGF, D.getLocStart()); 1361 auto *UpLoc = emitUpdateLocation(CGF, D.getLocStart()); 1362 llvm::Value *TaskArgs[] = { 1363 UpLoc, ThreadID, 1364 CGF.EmitLoadOfPointerLValue(CGF.GetAddrOfLocalVar(TaskTVar), 1365 TaskTVar->getType()->castAs<PointerType>()) 1366 .getPointer()}; 1367 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task), TaskArgs); 1368 }; 1369 CGOpenMPTaskOutlinedRegionInfo::UntiedTaskActionTy Action(Tied, PartIDVar, 1370 UntiedCodeGen); 1371 CodeGen.setAction(Action); 1372 assert(!ThreadIDVar->getType()->isPointerType() && 1373 "thread id variable must be of type kmp_int32 for tasks"); 1374 const OpenMPDirectiveKind Region = 1375 isOpenMPTaskLoopDirective(D.getDirectiveKind()) ? OMPD_taskloop 1376 : OMPD_task; 1377 auto *CS = D.getCapturedStmt(Region); 1378 auto *TD = dyn_cast<OMPTaskDirective>(&D); 1379 CodeGenFunction CGF(CGM, true); 1380 CGOpenMPTaskOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, 1381 InnermostKind, 1382 TD ? TD->hasCancel() : false, Action); 1383 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo); 1384 auto *Res = CGF.GenerateCapturedStmtFunction(*CS); 1385 if (!Tied) 1386 NumberOfParts = Action.getNumberOfParts(); 1387 return Res; 1388 } 1389 1390 Address CGOpenMPRuntime::getOrCreateDefaultLocation(unsigned Flags) { 1391 CharUnits Align = getIdentAlign(CGM); 1392 llvm::Value *Entry = OpenMPDefaultLocMap.lookup(Flags); 1393 if (!Entry) { 1394 if (!DefaultOpenMPPSource) { 1395 // Initialize default location for psource field of ident_t structure of 1396 // all ident_t objects. Format is ";file;function;line;column;;". 1397 // Taken from 1398 // http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp_str.c 1399 DefaultOpenMPPSource = 1400 CGM.GetAddrOfConstantCString(";unknown;unknown;0;0;;").getPointer(); 1401 DefaultOpenMPPSource = 1402 llvm::ConstantExpr::getBitCast(DefaultOpenMPPSource, CGM.Int8PtrTy); 1403 } 1404 1405 ConstantInitBuilder builder(CGM); 1406 auto fields = builder.beginStruct(IdentTy); 1407 fields.addInt(CGM.Int32Ty, 0); 1408 fields.addInt(CGM.Int32Ty, Flags); 1409 fields.addInt(CGM.Int32Ty, 0); 1410 fields.addInt(CGM.Int32Ty, 0); 1411 fields.add(DefaultOpenMPPSource); 1412 auto DefaultOpenMPLocation = 1413 fields.finishAndCreateGlobal("", Align, /*isConstant*/ true, 1414 llvm::GlobalValue::PrivateLinkage); 1415 DefaultOpenMPLocation->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 1416 1417 OpenMPDefaultLocMap[Flags] = Entry = DefaultOpenMPLocation; 1418 } 1419 return Address(Entry, Align); 1420 } 1421 1422 llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF, 1423 SourceLocation Loc, 1424 unsigned Flags) { 1425 Flags |= OMP_IDENT_KMPC; 1426 // If no debug info is generated - return global default location. 1427 if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo || 1428 Loc.isInvalid()) 1429 return getOrCreateDefaultLocation(Flags).getPointer(); 1430 1431 assert(CGF.CurFn && "No function in current CodeGenFunction."); 1432 1433 Address LocValue = Address::invalid(); 1434 auto I = OpenMPLocThreadIDMap.find(CGF.CurFn); 1435 if (I != OpenMPLocThreadIDMap.end()) 1436 LocValue = Address(I->second.DebugLoc, getIdentAlign(CGF.CGM)); 1437 1438 // OpenMPLocThreadIDMap may have null DebugLoc and non-null ThreadID, if 1439 // GetOpenMPThreadID was called before this routine. 1440 if (!LocValue.isValid()) { 1441 // Generate "ident_t .kmpc_loc.addr;" 1442 Address AI = CGF.CreateTempAlloca(IdentTy, getIdentAlign(CGF.CGM), 1443 ".kmpc_loc.addr"); 1444 auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn); 1445 Elem.second.DebugLoc = AI.getPointer(); 1446 LocValue = AI; 1447 1448 CGBuilderTy::InsertPointGuard IPG(CGF.Builder); 1449 CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt); 1450 CGF.Builder.CreateMemCpy(LocValue, getOrCreateDefaultLocation(Flags), 1451 CGM.getSize(getIdentSize(CGF.CGM))); 1452 } 1453 1454 // char **psource = &.kmpc_loc_<flags>.addr.psource; 1455 Address PSource = createIdentFieldGEP(CGF, LocValue, IdentField_PSource); 1456 1457 auto OMPDebugLoc = OpenMPDebugLocMap.lookup(Loc.getRawEncoding()); 1458 if (OMPDebugLoc == nullptr) { 1459 SmallString<128> Buffer2; 1460 llvm::raw_svector_ostream OS2(Buffer2); 1461 // Build debug location 1462 PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc); 1463 OS2 << ";" << PLoc.getFilename() << ";"; 1464 if (const FunctionDecl *FD = 1465 dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl)) { 1466 OS2 << FD->getQualifiedNameAsString(); 1467 } 1468 OS2 << ";" << PLoc.getLine() << ";" << PLoc.getColumn() << ";;"; 1469 OMPDebugLoc = CGF.Builder.CreateGlobalStringPtr(OS2.str()); 1470 OpenMPDebugLocMap[Loc.getRawEncoding()] = OMPDebugLoc; 1471 } 1472 // *psource = ";<File>;<Function>;<Line>;<Column>;;"; 1473 CGF.Builder.CreateStore(OMPDebugLoc, PSource); 1474 1475 // Our callers always pass this to a runtime function, so for 1476 // convenience, go ahead and return a naked pointer. 1477 return LocValue.getPointer(); 1478 } 1479 1480 llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF, 1481 SourceLocation Loc) { 1482 assert(CGF.CurFn && "No function in current CodeGenFunction."); 1483 1484 llvm::Value *ThreadID = nullptr; 1485 // Check whether we've already cached a load of the thread id in this 1486 // function. 1487 auto I = OpenMPLocThreadIDMap.find(CGF.CurFn); 1488 if (I != OpenMPLocThreadIDMap.end()) { 1489 ThreadID = I->second.ThreadID; 1490 if (ThreadID != nullptr) 1491 return ThreadID; 1492 } 1493 // If exceptions are enabled, do not use parameter to avoid possible crash. 1494 if (!CGF.EHStack.requiresLandingPad() || !CGF.getLangOpts().Exceptions || 1495 !CGF.getLangOpts().CXXExceptions || 1496 CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) { 1497 if (auto *OMPRegionInfo = 1498 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) { 1499 if (OMPRegionInfo->getThreadIDVariable()) { 1500 // Check if this an outlined function with thread id passed as argument. 1501 auto LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF); 1502 ThreadID = CGF.EmitLoadOfLValue(LVal, Loc).getScalarVal(); 1503 // If value loaded in entry block, cache it and use it everywhere in 1504 // function. 1505 if (CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) { 1506 auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn); 1507 Elem.second.ThreadID = ThreadID; 1508 } 1509 return ThreadID; 1510 } 1511 } 1512 } 1513 1514 // This is not an outlined function region - need to call __kmpc_int32 1515 // kmpc_global_thread_num(ident_t *loc). 1516 // Generate thread id value and cache this value for use across the 1517 // function. 1518 CGBuilderTy::InsertPointGuard IPG(CGF.Builder); 1519 CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt); 1520 auto *Call = CGF.Builder.CreateCall( 1521 createRuntimeFunction(OMPRTL__kmpc_global_thread_num), 1522 emitUpdateLocation(CGF, Loc)); 1523 Call->setCallingConv(CGF.getRuntimeCC()); 1524 auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn); 1525 Elem.second.ThreadID = Call; 1526 return Call; 1527 } 1528 1529 void CGOpenMPRuntime::functionFinished(CodeGenFunction &CGF) { 1530 assert(CGF.CurFn && "No function in current CodeGenFunction."); 1531 if (OpenMPLocThreadIDMap.count(CGF.CurFn)) 1532 OpenMPLocThreadIDMap.erase(CGF.CurFn); 1533 if (FunctionUDRMap.count(CGF.CurFn) > 0) { 1534 for(auto *D : FunctionUDRMap[CGF.CurFn]) { 1535 UDRMap.erase(D); 1536 } 1537 FunctionUDRMap.erase(CGF.CurFn); 1538 } 1539 } 1540 1541 llvm::Type *CGOpenMPRuntime::getIdentTyPointerTy() { 1542 if (!IdentTy) { 1543 } 1544 return llvm::PointerType::getUnqual(IdentTy); 1545 } 1546 1547 llvm::Type *CGOpenMPRuntime::getKmpc_MicroPointerTy() { 1548 if (!Kmpc_MicroTy) { 1549 // Build void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...) 1550 llvm::Type *MicroParams[] = {llvm::PointerType::getUnqual(CGM.Int32Ty), 1551 llvm::PointerType::getUnqual(CGM.Int32Ty)}; 1552 Kmpc_MicroTy = llvm::FunctionType::get(CGM.VoidTy, MicroParams, true); 1553 } 1554 return llvm::PointerType::getUnqual(Kmpc_MicroTy); 1555 } 1556 1557 llvm::Constant * 1558 CGOpenMPRuntime::createRuntimeFunction(unsigned Function) { 1559 llvm::Constant *RTLFn = nullptr; 1560 switch (static_cast<OpenMPRTLFunction>(Function)) { 1561 case OMPRTL__kmpc_fork_call: { 1562 // Build void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro 1563 // microtask, ...); 1564 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, 1565 getKmpc_MicroPointerTy()}; 1566 llvm::FunctionType *FnTy = 1567 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true); 1568 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_call"); 1569 break; 1570 } 1571 case OMPRTL__kmpc_global_thread_num: { 1572 // Build kmp_int32 __kmpc_global_thread_num(ident_t *loc); 1573 llvm::Type *TypeParams[] = {getIdentTyPointerTy()}; 1574 llvm::FunctionType *FnTy = 1575 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false); 1576 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_global_thread_num"); 1577 break; 1578 } 1579 case OMPRTL__kmpc_threadprivate_cached: { 1580 // Build void *__kmpc_threadprivate_cached(ident_t *loc, 1581 // kmp_int32 global_tid, void *data, size_t size, void ***cache); 1582 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, 1583 CGM.VoidPtrTy, CGM.SizeTy, 1584 CGM.VoidPtrTy->getPointerTo()->getPointerTo()}; 1585 llvm::FunctionType *FnTy = 1586 llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg*/ false); 1587 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_cached"); 1588 break; 1589 } 1590 case OMPRTL__kmpc_critical: { 1591 // Build void __kmpc_critical(ident_t *loc, kmp_int32 global_tid, 1592 // kmp_critical_name *crit); 1593 llvm::Type *TypeParams[] = { 1594 getIdentTyPointerTy(), CGM.Int32Ty, 1595 llvm::PointerType::getUnqual(KmpCriticalNameTy)}; 1596 llvm::FunctionType *FnTy = 1597 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1598 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical"); 1599 break; 1600 } 1601 case OMPRTL__kmpc_critical_with_hint: { 1602 // Build void __kmpc_critical_with_hint(ident_t *loc, kmp_int32 global_tid, 1603 // kmp_critical_name *crit, uintptr_t hint); 1604 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, 1605 llvm::PointerType::getUnqual(KmpCriticalNameTy), 1606 CGM.IntPtrTy}; 1607 llvm::FunctionType *FnTy = 1608 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1609 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical_with_hint"); 1610 break; 1611 } 1612 case OMPRTL__kmpc_threadprivate_register: { 1613 // Build void __kmpc_threadprivate_register(ident_t *, void *data, 1614 // kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor); 1615 // typedef void *(*kmpc_ctor)(void *); 1616 auto KmpcCtorTy = 1617 llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy, 1618 /*isVarArg*/ false)->getPointerTo(); 1619 // typedef void *(*kmpc_cctor)(void *, void *); 1620 llvm::Type *KmpcCopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy}; 1621 auto KmpcCopyCtorTy = 1622 llvm::FunctionType::get(CGM.VoidPtrTy, KmpcCopyCtorTyArgs, 1623 /*isVarArg*/ false)->getPointerTo(); 1624 // typedef void (*kmpc_dtor)(void *); 1625 auto KmpcDtorTy = 1626 llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy, /*isVarArg*/ false) 1627 ->getPointerTo(); 1628 llvm::Type *FnTyArgs[] = {getIdentTyPointerTy(), CGM.VoidPtrTy, KmpcCtorTy, 1629 KmpcCopyCtorTy, KmpcDtorTy}; 1630 auto FnTy = llvm::FunctionType::get(CGM.VoidTy, FnTyArgs, 1631 /*isVarArg*/ false); 1632 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_register"); 1633 break; 1634 } 1635 case OMPRTL__kmpc_end_critical: { 1636 // Build void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid, 1637 // kmp_critical_name *crit); 1638 llvm::Type *TypeParams[] = { 1639 getIdentTyPointerTy(), CGM.Int32Ty, 1640 llvm::PointerType::getUnqual(KmpCriticalNameTy)}; 1641 llvm::FunctionType *FnTy = 1642 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1643 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_critical"); 1644 break; 1645 } 1646 case OMPRTL__kmpc_cancel_barrier: { 1647 // Build kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32 1648 // global_tid); 1649 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; 1650 llvm::FunctionType *FnTy = 1651 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false); 1652 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_cancel_barrier"); 1653 break; 1654 } 1655 case OMPRTL__kmpc_barrier: { 1656 // Build void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid); 1657 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; 1658 llvm::FunctionType *FnTy = 1659 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1660 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_barrier"); 1661 break; 1662 } 1663 case OMPRTL__kmpc_for_static_fini: { 1664 // Build void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid); 1665 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; 1666 llvm::FunctionType *FnTy = 1667 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1668 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_fini"); 1669 break; 1670 } 1671 case OMPRTL__kmpc_push_num_threads: { 1672 // Build void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, 1673 // kmp_int32 num_threads) 1674 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, 1675 CGM.Int32Ty}; 1676 llvm::FunctionType *FnTy = 1677 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1678 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_threads"); 1679 break; 1680 } 1681 case OMPRTL__kmpc_serialized_parallel: { 1682 // Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32 1683 // global_tid); 1684 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; 1685 llvm::FunctionType *FnTy = 1686 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1687 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel"); 1688 break; 1689 } 1690 case OMPRTL__kmpc_end_serialized_parallel: { 1691 // Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32 1692 // global_tid); 1693 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; 1694 llvm::FunctionType *FnTy = 1695 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1696 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel"); 1697 break; 1698 } 1699 case OMPRTL__kmpc_flush: { 1700 // Build void __kmpc_flush(ident_t *loc); 1701 llvm::Type *TypeParams[] = {getIdentTyPointerTy()}; 1702 llvm::FunctionType *FnTy = 1703 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1704 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_flush"); 1705 break; 1706 } 1707 case OMPRTL__kmpc_master: { 1708 // Build kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid); 1709 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; 1710 llvm::FunctionType *FnTy = 1711 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false); 1712 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_master"); 1713 break; 1714 } 1715 case OMPRTL__kmpc_end_master: { 1716 // Build void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid); 1717 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; 1718 llvm::FunctionType *FnTy = 1719 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); 1720 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_master"); 1721 break; 1722 } 1723 case OMPRTL__kmpc_omp_taskyield: { 1724 // Build kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid, 1725 // int end_part); 1726 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy}; 1727 llvm::FunctionType *FnTy = 1728 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false); 1729 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_taskyield"); 1730 break; 1731 } 1732 case OMPRTL__kmpc_single: { 1733 // Build kmp_int32 __kmpc_single(ident_t *loc, kmp_int32 global_tid); 1734 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; 1735 llvm::FunctionType *FnTy = 1736 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false); 1737 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_single"); 1738 break; 1739 } 1740 case OMPRTL__kmpc_end_single: { 1741 // Build void __kmpc_end_single(ident_t *loc, kmp_int32 global_tid); 1742 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; 1743 llvm::FunctionType *FnTy = 1744 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); 1745 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_single"); 1746 break; 1747 } 1748 case OMPRTL__kmpc_omp_task_alloc: { 1749 // Build kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid, 1750 // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, 1751 // kmp_routine_entry_t *task_entry); 1752 assert(KmpRoutineEntryPtrTy != nullptr && 1753 "Type kmp_routine_entry_t must be created."); 1754 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, 1755 CGM.SizeTy, CGM.SizeTy, KmpRoutineEntryPtrTy}; 1756 // Return void * and then cast to particular kmp_task_t type. 1757 llvm::FunctionType *FnTy = 1758 llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false); 1759 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_alloc"); 1760 break; 1761 } 1762 case OMPRTL__kmpc_omp_task: { 1763 // Build kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t 1764 // *new_task); 1765 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, 1766 CGM.VoidPtrTy}; 1767 llvm::FunctionType *FnTy = 1768 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false); 1769 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task"); 1770 break; 1771 } 1772 case OMPRTL__kmpc_copyprivate: { 1773 // Build void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid, 1774 // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *), 1775 // kmp_int32 didit); 1776 llvm::Type *CpyTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy}; 1777 auto *CpyFnTy = 1778 llvm::FunctionType::get(CGM.VoidTy, CpyTypeParams, /*isVarArg=*/false); 1779 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.SizeTy, 1780 CGM.VoidPtrTy, CpyFnTy->getPointerTo(), 1781 CGM.Int32Ty}; 1782 llvm::FunctionType *FnTy = 1783 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); 1784 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_copyprivate"); 1785 break; 1786 } 1787 case OMPRTL__kmpc_reduce: { 1788 // Build kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid, 1789 // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void 1790 // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck); 1791 llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy}; 1792 auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams, 1793 /*isVarArg=*/false); 1794 llvm::Type *TypeParams[] = { 1795 getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy, 1796 CGM.VoidPtrTy, ReduceFnTy->getPointerTo(), 1797 llvm::PointerType::getUnqual(KmpCriticalNameTy)}; 1798 llvm::FunctionType *FnTy = 1799 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false); 1800 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce"); 1801 break; 1802 } 1803 case OMPRTL__kmpc_reduce_nowait: { 1804 // Build kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32 1805 // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, 1806 // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name 1807 // *lck); 1808 llvm::Type *ReduceTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy}; 1809 auto *ReduceFnTy = llvm::FunctionType::get(CGM.VoidTy, ReduceTypeParams, 1810 /*isVarArg=*/false); 1811 llvm::Type *TypeParams[] = { 1812 getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, CGM.SizeTy, 1813 CGM.VoidPtrTy, ReduceFnTy->getPointerTo(), 1814 llvm::PointerType::getUnqual(KmpCriticalNameTy)}; 1815 llvm::FunctionType *FnTy = 1816 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false); 1817 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_reduce_nowait"); 1818 break; 1819 } 1820 case OMPRTL__kmpc_end_reduce: { 1821 // Build void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid, 1822 // kmp_critical_name *lck); 1823 llvm::Type *TypeParams[] = { 1824 getIdentTyPointerTy(), CGM.Int32Ty, 1825 llvm::PointerType::getUnqual(KmpCriticalNameTy)}; 1826 llvm::FunctionType *FnTy = 1827 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); 1828 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce"); 1829 break; 1830 } 1831 case OMPRTL__kmpc_end_reduce_nowait: { 1832 // Build __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid, 1833 // kmp_critical_name *lck); 1834 llvm::Type *TypeParams[] = { 1835 getIdentTyPointerTy(), CGM.Int32Ty, 1836 llvm::PointerType::getUnqual(KmpCriticalNameTy)}; 1837 llvm::FunctionType *FnTy = 1838 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); 1839 RTLFn = 1840 CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_reduce_nowait"); 1841 break; 1842 } 1843 case OMPRTL__kmpc_omp_task_begin_if0: { 1844 // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t 1845 // *new_task); 1846 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, 1847 CGM.VoidPtrTy}; 1848 llvm::FunctionType *FnTy = 1849 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); 1850 RTLFn = 1851 CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_begin_if0"); 1852 break; 1853 } 1854 case OMPRTL__kmpc_omp_task_complete_if0: { 1855 // Build void __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t 1856 // *new_task); 1857 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, 1858 CGM.VoidPtrTy}; 1859 llvm::FunctionType *FnTy = 1860 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); 1861 RTLFn = CGM.CreateRuntimeFunction(FnTy, 1862 /*Name=*/"__kmpc_omp_task_complete_if0"); 1863 break; 1864 } 1865 case OMPRTL__kmpc_ordered: { 1866 // Build void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid); 1867 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; 1868 llvm::FunctionType *FnTy = 1869 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); 1870 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_ordered"); 1871 break; 1872 } 1873 case OMPRTL__kmpc_end_ordered: { 1874 // Build void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid); 1875 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; 1876 llvm::FunctionType *FnTy = 1877 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); 1878 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_ordered"); 1879 break; 1880 } 1881 case OMPRTL__kmpc_omp_taskwait: { 1882 // Build kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 global_tid); 1883 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; 1884 llvm::FunctionType *FnTy = 1885 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false); 1886 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_omp_taskwait"); 1887 break; 1888 } 1889 case OMPRTL__kmpc_taskgroup: { 1890 // Build void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid); 1891 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; 1892 llvm::FunctionType *FnTy = 1893 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); 1894 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_taskgroup"); 1895 break; 1896 } 1897 case OMPRTL__kmpc_end_taskgroup: { 1898 // Build void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid); 1899 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; 1900 llvm::FunctionType *FnTy = 1901 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); 1902 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_taskgroup"); 1903 break; 1904 } 1905 case OMPRTL__kmpc_push_proc_bind: { 1906 // Build void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid, 1907 // int proc_bind) 1908 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy}; 1909 llvm::FunctionType *FnTy = 1910 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 1911 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_proc_bind"); 1912 break; 1913 } 1914 case OMPRTL__kmpc_omp_task_with_deps: { 1915 // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid, 1916 // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, 1917 // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list); 1918 llvm::Type *TypeParams[] = { 1919 getIdentTyPointerTy(), CGM.Int32Ty, CGM.VoidPtrTy, CGM.Int32Ty, 1920 CGM.VoidPtrTy, CGM.Int32Ty, CGM.VoidPtrTy}; 1921 llvm::FunctionType *FnTy = 1922 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false); 1923 RTLFn = 1924 CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_task_with_deps"); 1925 break; 1926 } 1927 case OMPRTL__kmpc_omp_wait_deps: { 1928 // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid, 1929 // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, 1930 // kmp_depend_info_t *noalias_dep_list); 1931 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, 1932 CGM.Int32Ty, CGM.VoidPtrTy, 1933 CGM.Int32Ty, CGM.VoidPtrTy}; 1934 llvm::FunctionType *FnTy = 1935 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); 1936 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_omp_wait_deps"); 1937 break; 1938 } 1939 case OMPRTL__kmpc_cancellationpoint: { 1940 // Build kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32 1941 // global_tid, kmp_int32 cncl_kind) 1942 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy}; 1943 llvm::FunctionType *FnTy = 1944 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false); 1945 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancellationpoint"); 1946 break; 1947 } 1948 case OMPRTL__kmpc_cancel: { 1949 // Build kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid, 1950 // kmp_int32 cncl_kind) 1951 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.IntTy}; 1952 llvm::FunctionType *FnTy = 1953 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false); 1954 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_cancel"); 1955 break; 1956 } 1957 case OMPRTL__kmpc_push_num_teams: { 1958 // Build void kmpc_push_num_teams (ident_t loc, kmp_int32 global_tid, 1959 // kmp_int32 num_teams, kmp_int32 num_threads) 1960 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, CGM.Int32Ty, 1961 CGM.Int32Ty}; 1962 llvm::FunctionType *FnTy = 1963 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false); 1964 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_teams"); 1965 break; 1966 } 1967 case OMPRTL__kmpc_fork_teams: { 1968 // Build void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro 1969 // microtask, ...); 1970 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, 1971 getKmpc_MicroPointerTy()}; 1972 llvm::FunctionType *FnTy = 1973 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true); 1974 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_teams"); 1975 break; 1976 } 1977 case OMPRTL__kmpc_taskloop: { 1978 // Build void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int 1979 // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int 1980 // sched, kmp_uint64 grainsize, void *task_dup); 1981 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), 1982 CGM.IntTy, 1983 CGM.VoidPtrTy, 1984 CGM.IntTy, 1985 CGM.Int64Ty->getPointerTo(), 1986 CGM.Int64Ty->getPointerTo(), 1987 CGM.Int64Ty, 1988 CGM.IntTy, 1989 CGM.IntTy, 1990 CGM.Int64Ty, 1991 CGM.VoidPtrTy}; 1992 llvm::FunctionType *FnTy = 1993 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); 1994 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_taskloop"); 1995 break; 1996 } 1997 case OMPRTL__kmpc_doacross_init: { 1998 // Build void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, kmp_int32 1999 // num_dims, struct kmp_dim *dims); 2000 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), 2001 CGM.Int32Ty, 2002 CGM.Int32Ty, 2003 CGM.VoidPtrTy}; 2004 llvm::FunctionType *FnTy = 2005 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); 2006 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_init"); 2007 break; 2008 } 2009 case OMPRTL__kmpc_doacross_fini: { 2010 // Build void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid); 2011 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty}; 2012 llvm::FunctionType *FnTy = 2013 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); 2014 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_fini"); 2015 break; 2016 } 2017 case OMPRTL__kmpc_doacross_post: { 2018 // Build void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid, kmp_int64 2019 // *vec); 2020 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, 2021 CGM.Int64Ty->getPointerTo()}; 2022 llvm::FunctionType *FnTy = 2023 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); 2024 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_post"); 2025 break; 2026 } 2027 case OMPRTL__kmpc_doacross_wait: { 2028 // Build void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid, kmp_int64 2029 // *vec); 2030 llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty, 2031 CGM.Int64Ty->getPointerTo()}; 2032 llvm::FunctionType *FnTy = 2033 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); 2034 RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_doacross_wait"); 2035 break; 2036 } 2037 case OMPRTL__kmpc_task_reduction_init: { 2038 // Build void *__kmpc_task_reduction_init(int gtid, int num_data, void 2039 // *data); 2040 llvm::Type *TypeParams[] = {CGM.IntTy, CGM.IntTy, CGM.VoidPtrTy}; 2041 llvm::FunctionType *FnTy = 2042 llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false); 2043 RTLFn = 2044 CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_task_reduction_init"); 2045 break; 2046 } 2047 case OMPRTL__kmpc_task_reduction_get_th_data: { 2048 // Build void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void 2049 // *d); 2050 llvm::Type *TypeParams[] = {CGM.IntTy, CGM.VoidPtrTy, CGM.VoidPtrTy}; 2051 llvm::FunctionType *FnTy = 2052 llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg=*/false); 2053 RTLFn = CGM.CreateRuntimeFunction( 2054 FnTy, /*Name=*/"__kmpc_task_reduction_get_th_data"); 2055 break; 2056 } 2057 case OMPRTL__tgt_target: { 2058 // Build int32_t __tgt_target(int64_t device_id, void *host_ptr, int32_t 2059 // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t 2060 // *arg_types); 2061 llvm::Type *TypeParams[] = {CGM.Int64Ty, 2062 CGM.VoidPtrTy, 2063 CGM.Int32Ty, 2064 CGM.VoidPtrPtrTy, 2065 CGM.VoidPtrPtrTy, 2066 CGM.SizeTy->getPointerTo(), 2067 CGM.Int64Ty->getPointerTo()}; 2068 llvm::FunctionType *FnTy = 2069 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false); 2070 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target"); 2071 break; 2072 } 2073 case OMPRTL__tgt_target_nowait: { 2074 // Build int32_t __tgt_target_nowait(int64_t device_id, void *host_ptr, 2075 // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes, 2076 // int64_t *arg_types); 2077 llvm::Type *TypeParams[] = {CGM.Int64Ty, 2078 CGM.VoidPtrTy, 2079 CGM.Int32Ty, 2080 CGM.VoidPtrPtrTy, 2081 CGM.VoidPtrPtrTy, 2082 CGM.SizeTy->getPointerTo(), 2083 CGM.Int64Ty->getPointerTo()}; 2084 llvm::FunctionType *FnTy = 2085 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false); 2086 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_nowait"); 2087 break; 2088 } 2089 case OMPRTL__tgt_target_teams: { 2090 // Build int32_t __tgt_target_teams(int64_t device_id, void *host_ptr, 2091 // int32_t arg_num, void** args_base, void **args, size_t *arg_sizes, 2092 // int64_t *arg_types, int32_t num_teams, int32_t thread_limit); 2093 llvm::Type *TypeParams[] = {CGM.Int64Ty, 2094 CGM.VoidPtrTy, 2095 CGM.Int32Ty, 2096 CGM.VoidPtrPtrTy, 2097 CGM.VoidPtrPtrTy, 2098 CGM.SizeTy->getPointerTo(), 2099 CGM.Int64Ty->getPointerTo(), 2100 CGM.Int32Ty, 2101 CGM.Int32Ty}; 2102 llvm::FunctionType *FnTy = 2103 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false); 2104 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams"); 2105 break; 2106 } 2107 case OMPRTL__tgt_target_teams_nowait: { 2108 // Build int32_t __tgt_target_teams_nowait(int64_t device_id, void 2109 // *host_ptr, int32_t arg_num, void** args_base, void **args, size_t 2110 // *arg_sizes, int64_t *arg_types, int32_t num_teams, int32_t thread_limit); 2111 llvm::Type *TypeParams[] = {CGM.Int64Ty, 2112 CGM.VoidPtrTy, 2113 CGM.Int32Ty, 2114 CGM.VoidPtrPtrTy, 2115 CGM.VoidPtrPtrTy, 2116 CGM.SizeTy->getPointerTo(), 2117 CGM.Int64Ty->getPointerTo(), 2118 CGM.Int32Ty, 2119 CGM.Int32Ty}; 2120 llvm::FunctionType *FnTy = 2121 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false); 2122 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_teams_nowait"); 2123 break; 2124 } 2125 case OMPRTL__tgt_register_lib: { 2126 // Build void __tgt_register_lib(__tgt_bin_desc *desc); 2127 QualType ParamTy = 2128 CGM.getContext().getPointerType(getTgtBinaryDescriptorQTy()); 2129 llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)}; 2130 llvm::FunctionType *FnTy = 2131 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false); 2132 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_register_lib"); 2133 break; 2134 } 2135 case OMPRTL__tgt_unregister_lib: { 2136 // Build void __tgt_unregister_lib(__tgt_bin_desc *desc); 2137 QualType ParamTy = 2138 CGM.getContext().getPointerType(getTgtBinaryDescriptorQTy()); 2139 llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)}; 2140 llvm::FunctionType *FnTy = 2141 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false); 2142 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_unregister_lib"); 2143 break; 2144 } 2145 case OMPRTL__tgt_target_data_begin: { 2146 // Build void __tgt_target_data_begin(int64_t device_id, int32_t arg_num, 2147 // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types); 2148 llvm::Type *TypeParams[] = {CGM.Int64Ty, 2149 CGM.Int32Ty, 2150 CGM.VoidPtrPtrTy, 2151 CGM.VoidPtrPtrTy, 2152 CGM.SizeTy->getPointerTo(), 2153 CGM.Int64Ty->getPointerTo()}; 2154 llvm::FunctionType *FnTy = 2155 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 2156 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin"); 2157 break; 2158 } 2159 case OMPRTL__tgt_target_data_begin_nowait: { 2160 // Build void __tgt_target_data_begin_nowait(int64_t device_id, int32_t 2161 // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t 2162 // *arg_types); 2163 llvm::Type *TypeParams[] = {CGM.Int64Ty, 2164 CGM.Int32Ty, 2165 CGM.VoidPtrPtrTy, 2166 CGM.VoidPtrPtrTy, 2167 CGM.SizeTy->getPointerTo(), 2168 CGM.Int64Ty->getPointerTo()}; 2169 auto *FnTy = 2170 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); 2171 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_begin_nowait"); 2172 break; 2173 } 2174 case OMPRTL__tgt_target_data_end: { 2175 // Build void __tgt_target_data_end(int64_t device_id, int32_t arg_num, 2176 // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types); 2177 llvm::Type *TypeParams[] = {CGM.Int64Ty, 2178 CGM.Int32Ty, 2179 CGM.VoidPtrPtrTy, 2180 CGM.VoidPtrPtrTy, 2181 CGM.SizeTy->getPointerTo(), 2182 CGM.Int64Ty->getPointerTo()}; 2183 llvm::FunctionType *FnTy = 2184 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 2185 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end"); 2186 break; 2187 } 2188 case OMPRTL__tgt_target_data_end_nowait: { 2189 // Build void __tgt_target_data_end_nowait(int64_t device_id, int32_t 2190 // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t 2191 // *arg_types); 2192 llvm::Type *TypeParams[] = {CGM.Int64Ty, 2193 CGM.Int32Ty, 2194 CGM.VoidPtrPtrTy, 2195 CGM.VoidPtrPtrTy, 2196 CGM.SizeTy->getPointerTo(), 2197 CGM.Int64Ty->getPointerTo()}; 2198 auto *FnTy = 2199 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); 2200 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_end_nowait"); 2201 break; 2202 } 2203 case OMPRTL__tgt_target_data_update: { 2204 // Build void __tgt_target_data_update(int64_t device_id, int32_t arg_num, 2205 // void** args_base, void **args, size_t *arg_sizes, int64_t *arg_types); 2206 llvm::Type *TypeParams[] = {CGM.Int64Ty, 2207 CGM.Int32Ty, 2208 CGM.VoidPtrPtrTy, 2209 CGM.VoidPtrPtrTy, 2210 CGM.SizeTy->getPointerTo(), 2211 CGM.Int64Ty->getPointerTo()}; 2212 llvm::FunctionType *FnTy = 2213 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 2214 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update"); 2215 break; 2216 } 2217 case OMPRTL__tgt_target_data_update_nowait: { 2218 // Build void __tgt_target_data_update_nowait(int64_t device_id, int32_t 2219 // arg_num, void** args_base, void **args, size_t *arg_sizes, int64_t 2220 // *arg_types); 2221 llvm::Type *TypeParams[] = {CGM.Int64Ty, 2222 CGM.Int32Ty, 2223 CGM.VoidPtrPtrTy, 2224 CGM.VoidPtrPtrTy, 2225 CGM.SizeTy->getPointerTo(), 2226 CGM.Int64Ty->getPointerTo()}; 2227 auto *FnTy = 2228 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); 2229 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target_data_update_nowait"); 2230 break; 2231 } 2232 } 2233 assert(RTLFn && "Unable to find OpenMP runtime function"); 2234 return RTLFn; 2235 } 2236 2237 llvm::Constant *CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize, 2238 bool IVSigned) { 2239 assert((IVSize == 32 || IVSize == 64) && 2240 "IV size is not compatible with the omp runtime"); 2241 auto Name = IVSize == 32 ? (IVSigned ? "__kmpc_for_static_init_4" 2242 : "__kmpc_for_static_init_4u") 2243 : (IVSigned ? "__kmpc_for_static_init_8" 2244 : "__kmpc_for_static_init_8u"); 2245 auto ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty; 2246 auto PtrTy = llvm::PointerType::getUnqual(ITy); 2247 llvm::Type *TypeParams[] = { 2248 getIdentTyPointerTy(), // loc 2249 CGM.Int32Ty, // tid 2250 CGM.Int32Ty, // schedtype 2251 llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter 2252 PtrTy, // p_lower 2253 PtrTy, // p_upper 2254 PtrTy, // p_stride 2255 ITy, // incr 2256 ITy // chunk 2257 }; 2258 llvm::FunctionType *FnTy = 2259 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 2260 return CGM.CreateRuntimeFunction(FnTy, Name); 2261 } 2262 2263 llvm::Constant *CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize, 2264 bool IVSigned) { 2265 assert((IVSize == 32 || IVSize == 64) && 2266 "IV size is not compatible with the omp runtime"); 2267 auto Name = 2268 IVSize == 32 2269 ? (IVSigned ? "__kmpc_dispatch_init_4" : "__kmpc_dispatch_init_4u") 2270 : (IVSigned ? "__kmpc_dispatch_init_8" : "__kmpc_dispatch_init_8u"); 2271 auto ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty; 2272 llvm::Type *TypeParams[] = { getIdentTyPointerTy(), // loc 2273 CGM.Int32Ty, // tid 2274 CGM.Int32Ty, // schedtype 2275 ITy, // lower 2276 ITy, // upper 2277 ITy, // stride 2278 ITy // chunk 2279 }; 2280 llvm::FunctionType *FnTy = 2281 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false); 2282 return CGM.CreateRuntimeFunction(FnTy, Name); 2283 } 2284 2285 llvm::Constant *CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize, 2286 bool IVSigned) { 2287 assert((IVSize == 32 || IVSize == 64) && 2288 "IV size is not compatible with the omp runtime"); 2289 auto Name = 2290 IVSize == 32 2291 ? (IVSigned ? "__kmpc_dispatch_fini_4" : "__kmpc_dispatch_fini_4u") 2292 : (IVSigned ? "__kmpc_dispatch_fini_8" : "__kmpc_dispatch_fini_8u"); 2293 llvm::Type *TypeParams[] = { 2294 getIdentTyPointerTy(), // loc 2295 CGM.Int32Ty, // tid 2296 }; 2297 llvm::FunctionType *FnTy = 2298 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false); 2299 return CGM.CreateRuntimeFunction(FnTy, Name); 2300 } 2301 2302 llvm::Constant *CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize, 2303 bool IVSigned) { 2304 assert((IVSize == 32 || IVSize == 64) && 2305 "IV size is not compatible with the omp runtime"); 2306 auto Name = 2307 IVSize == 32 2308 ? (IVSigned ? "__kmpc_dispatch_next_4" : "__kmpc_dispatch_next_4u") 2309 : (IVSigned ? "__kmpc_dispatch_next_8" : "__kmpc_dispatch_next_8u"); 2310 auto ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty; 2311 auto PtrTy = llvm::PointerType::getUnqual(ITy); 2312 llvm::Type *TypeParams[] = { 2313 getIdentTyPointerTy(), // loc 2314 CGM.Int32Ty, // tid 2315 llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter 2316 PtrTy, // p_lower 2317 PtrTy, // p_upper 2318 PtrTy // p_stride 2319 }; 2320 llvm::FunctionType *FnTy = 2321 llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false); 2322 return CGM.CreateRuntimeFunction(FnTy, Name); 2323 } 2324 2325 llvm::Constant * 2326 CGOpenMPRuntime::getOrCreateThreadPrivateCache(const VarDecl *VD) { 2327 assert(!CGM.getLangOpts().OpenMPUseTLS || 2328 !CGM.getContext().getTargetInfo().isTLSSupported()); 2329 // Lookup the entry, lazily creating it if necessary. 2330 return getOrCreateInternalVariable(CGM.Int8PtrPtrTy, 2331 Twine(CGM.getMangledName(VD)) + ".cache."); 2332 } 2333 2334 Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF, 2335 const VarDecl *VD, 2336 Address VDAddr, 2337 SourceLocation Loc) { 2338 if (CGM.getLangOpts().OpenMPUseTLS && 2339 CGM.getContext().getTargetInfo().isTLSSupported()) 2340 return VDAddr; 2341 2342 auto VarTy = VDAddr.getElementType(); 2343 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), 2344 CGF.Builder.CreatePointerCast(VDAddr.getPointer(), 2345 CGM.Int8PtrTy), 2346 CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)), 2347 getOrCreateThreadPrivateCache(VD)}; 2348 return Address(CGF.EmitRuntimeCall( 2349 createRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args), 2350 VDAddr.getAlignment()); 2351 } 2352 2353 void CGOpenMPRuntime::emitThreadPrivateVarInit( 2354 CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor, 2355 llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) { 2356 // Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime 2357 // library. 2358 auto OMPLoc = emitUpdateLocation(CGF, Loc); 2359 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_global_thread_num), 2360 OMPLoc); 2361 // Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor) 2362 // to register constructor/destructor for variable. 2363 llvm::Value *Args[] = {OMPLoc, 2364 CGF.Builder.CreatePointerCast(VDAddr.getPointer(), 2365 CGM.VoidPtrTy), 2366 Ctor, CopyCtor, Dtor}; 2367 CGF.EmitRuntimeCall( 2368 createRuntimeFunction(OMPRTL__kmpc_threadprivate_register), Args); 2369 } 2370 2371 llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition( 2372 const VarDecl *VD, Address VDAddr, SourceLocation Loc, 2373 bool PerformInit, CodeGenFunction *CGF) { 2374 if (CGM.getLangOpts().OpenMPUseTLS && 2375 CGM.getContext().getTargetInfo().isTLSSupported()) 2376 return nullptr; 2377 2378 VD = VD->getDefinition(CGM.getContext()); 2379 if (VD && ThreadPrivateWithDefinition.count(VD) == 0) { 2380 ThreadPrivateWithDefinition.insert(VD); 2381 QualType ASTTy = VD->getType(); 2382 2383 llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr; 2384 auto Init = VD->getAnyInitializer(); 2385 if (CGM.getLangOpts().CPlusPlus && PerformInit) { 2386 // Generate function that re-emits the declaration's initializer into the 2387 // threadprivate copy of the variable VD 2388 CodeGenFunction CtorCGF(CGM); 2389 FunctionArgList Args; 2390 ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc, 2391 /*Id=*/nullptr, CGM.getContext().VoidPtrTy, 2392 ImplicitParamDecl::Other); 2393 Args.push_back(&Dst); 2394 2395 auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration( 2396 CGM.getContext().VoidPtrTy, Args); 2397 auto FTy = CGM.getTypes().GetFunctionType(FI); 2398 auto Fn = CGM.CreateGlobalInitOrDestructFunction( 2399 FTy, ".__kmpc_global_ctor_.", FI, Loc); 2400 CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI, 2401 Args, Loc, Loc); 2402 auto ArgVal = CtorCGF.EmitLoadOfScalar( 2403 CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false, 2404 CGM.getContext().VoidPtrTy, Dst.getLocation()); 2405 Address Arg = Address(ArgVal, VDAddr.getAlignment()); 2406 Arg = CtorCGF.Builder.CreateElementBitCast( 2407 Arg, CtorCGF.ConvertTypeForMem(ASTTy)); 2408 CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(), 2409 /*IsInitializer=*/true); 2410 ArgVal = CtorCGF.EmitLoadOfScalar( 2411 CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false, 2412 CGM.getContext().VoidPtrTy, Dst.getLocation()); 2413 CtorCGF.Builder.CreateStore(ArgVal, CtorCGF.ReturnValue); 2414 CtorCGF.FinishFunction(); 2415 Ctor = Fn; 2416 } 2417 if (VD->getType().isDestructedType() != QualType::DK_none) { 2418 // Generate function that emits destructor call for the threadprivate copy 2419 // of the variable VD 2420 CodeGenFunction DtorCGF(CGM); 2421 FunctionArgList Args; 2422 ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc, 2423 /*Id=*/nullptr, CGM.getContext().VoidPtrTy, 2424 ImplicitParamDecl::Other); 2425 Args.push_back(&Dst); 2426 2427 auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration( 2428 CGM.getContext().VoidTy, Args); 2429 auto FTy = CGM.getTypes().GetFunctionType(FI); 2430 auto Fn = CGM.CreateGlobalInitOrDestructFunction( 2431 FTy, ".__kmpc_global_dtor_.", FI, Loc); 2432 auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF); 2433 DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args, 2434 Loc, Loc); 2435 // Create a scope with an artificial location for the body of this function. 2436 auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF); 2437 auto ArgVal = DtorCGF.EmitLoadOfScalar( 2438 DtorCGF.GetAddrOfLocalVar(&Dst), 2439 /*Volatile=*/false, CGM.getContext().VoidPtrTy, Dst.getLocation()); 2440 DtorCGF.emitDestroy(Address(ArgVal, VDAddr.getAlignment()), ASTTy, 2441 DtorCGF.getDestroyer(ASTTy.isDestructedType()), 2442 DtorCGF.needsEHCleanup(ASTTy.isDestructedType())); 2443 DtorCGF.FinishFunction(); 2444 Dtor = Fn; 2445 } 2446 // Do not emit init function if it is not required. 2447 if (!Ctor && !Dtor) 2448 return nullptr; 2449 2450 llvm::Type *CopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy}; 2451 auto CopyCtorTy = 2452 llvm::FunctionType::get(CGM.VoidPtrTy, CopyCtorTyArgs, 2453 /*isVarArg=*/false)->getPointerTo(); 2454 // Copying constructor for the threadprivate variable. 2455 // Must be NULL - reserved by runtime, but currently it requires that this 2456 // parameter is always NULL. Otherwise it fires assertion. 2457 CopyCtor = llvm::Constant::getNullValue(CopyCtorTy); 2458 if (Ctor == nullptr) { 2459 auto CtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy, 2460 /*isVarArg=*/false)->getPointerTo(); 2461 Ctor = llvm::Constant::getNullValue(CtorTy); 2462 } 2463 if (Dtor == nullptr) { 2464 auto DtorTy = llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy, 2465 /*isVarArg=*/false)->getPointerTo(); 2466 Dtor = llvm::Constant::getNullValue(DtorTy); 2467 } 2468 if (!CGF) { 2469 auto InitFunctionTy = 2470 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false); 2471 auto InitFunction = CGM.CreateGlobalInitOrDestructFunction( 2472 InitFunctionTy, ".__omp_threadprivate_init_.", 2473 CGM.getTypes().arrangeNullaryFunction()); 2474 CodeGenFunction InitCGF(CGM); 2475 FunctionArgList ArgList; 2476 InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction, 2477 CGM.getTypes().arrangeNullaryFunction(), ArgList, 2478 Loc, Loc); 2479 emitThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc); 2480 InitCGF.FinishFunction(); 2481 return InitFunction; 2482 } 2483 emitThreadPrivateVarInit(*CGF, VDAddr, Ctor, CopyCtor, Dtor, Loc); 2484 } 2485 return nullptr; 2486 } 2487 2488 Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF, 2489 QualType VarType, 2490 StringRef Name) { 2491 llvm::Twine VarName(Name, ".artificial."); 2492 llvm::Type *VarLVType = CGF.ConvertTypeForMem(VarType); 2493 llvm::Value *GAddr = getOrCreateInternalVariable(VarLVType, VarName); 2494 llvm::Value *Args[] = { 2495 emitUpdateLocation(CGF, SourceLocation()), 2496 getThreadID(CGF, SourceLocation()), 2497 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(GAddr, CGM.VoidPtrTy), 2498 CGF.Builder.CreateIntCast(CGF.getTypeSize(VarType), CGM.SizeTy, 2499 /*IsSigned=*/false), 2500 getOrCreateInternalVariable(CGM.VoidPtrPtrTy, VarName + ".cache.")}; 2501 return Address( 2502 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 2503 CGF.EmitRuntimeCall( 2504 createRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args), 2505 VarLVType->getPointerTo(/*AddrSpace=*/0)), 2506 CGM.getPointerAlign()); 2507 } 2508 2509 /// \brief Emits code for OpenMP 'if' clause using specified \a CodeGen 2510 /// function. Here is the logic: 2511 /// if (Cond) { 2512 /// ThenGen(); 2513 /// } else { 2514 /// ElseGen(); 2515 /// } 2516 void CGOpenMPRuntime::emitOMPIfClause(CodeGenFunction &CGF, const Expr *Cond, 2517 const RegionCodeGenTy &ThenGen, 2518 const RegionCodeGenTy &ElseGen) { 2519 CodeGenFunction::LexicalScope ConditionScope(CGF, Cond->getSourceRange()); 2520 2521 // If the condition constant folds and can be elided, try to avoid emitting 2522 // the condition and the dead arm of the if/else. 2523 bool CondConstant; 2524 if (CGF.ConstantFoldsToSimpleInteger(Cond, CondConstant)) { 2525 if (CondConstant) 2526 ThenGen(CGF); 2527 else 2528 ElseGen(CGF); 2529 return; 2530 } 2531 2532 // Otherwise, the condition did not fold, or we couldn't elide it. Just 2533 // emit the conditional branch. 2534 auto ThenBlock = CGF.createBasicBlock("omp_if.then"); 2535 auto ElseBlock = CGF.createBasicBlock("omp_if.else"); 2536 auto ContBlock = CGF.createBasicBlock("omp_if.end"); 2537 CGF.EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, /*TrueCount=*/0); 2538 2539 // Emit the 'then' code. 2540 CGF.EmitBlock(ThenBlock); 2541 ThenGen(CGF); 2542 CGF.EmitBranch(ContBlock); 2543 // Emit the 'else' code if present. 2544 // There is no need to emit line number for unconditional branch. 2545 (void)ApplyDebugLocation::CreateEmpty(CGF); 2546 CGF.EmitBlock(ElseBlock); 2547 ElseGen(CGF); 2548 // There is no need to emit line number for unconditional branch. 2549 (void)ApplyDebugLocation::CreateEmpty(CGF); 2550 CGF.EmitBranch(ContBlock); 2551 // Emit the continuation block for code after the if. 2552 CGF.EmitBlock(ContBlock, /*IsFinished=*/true); 2553 } 2554 2555 void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, 2556 llvm::Value *OutlinedFn, 2557 ArrayRef<llvm::Value *> CapturedVars, 2558 const Expr *IfCond) { 2559 if (!CGF.HaveInsertPoint()) 2560 return; 2561 auto *RTLoc = emitUpdateLocation(CGF, Loc); 2562 auto &&ThenGen = [OutlinedFn, CapturedVars, RTLoc](CodeGenFunction &CGF, 2563 PrePostActionTy &) { 2564 // Build call __kmpc_fork_call(loc, n, microtask, var1, .., varn); 2565 auto &RT = CGF.CGM.getOpenMPRuntime(); 2566 llvm::Value *Args[] = { 2567 RTLoc, 2568 CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars 2569 CGF.Builder.CreateBitCast(OutlinedFn, RT.getKmpc_MicroPointerTy())}; 2570 llvm::SmallVector<llvm::Value *, 16> RealArgs; 2571 RealArgs.append(std::begin(Args), std::end(Args)); 2572 RealArgs.append(CapturedVars.begin(), CapturedVars.end()); 2573 2574 auto RTLFn = RT.createRuntimeFunction(OMPRTL__kmpc_fork_call); 2575 CGF.EmitRuntimeCall(RTLFn, RealArgs); 2576 }; 2577 auto &&ElseGen = [OutlinedFn, CapturedVars, RTLoc, Loc](CodeGenFunction &CGF, 2578 PrePostActionTy &) { 2579 auto &RT = CGF.CGM.getOpenMPRuntime(); 2580 auto ThreadID = RT.getThreadID(CGF, Loc); 2581 // Build calls: 2582 // __kmpc_serialized_parallel(&Loc, GTid); 2583 llvm::Value *Args[] = {RTLoc, ThreadID}; 2584 CGF.EmitRuntimeCall( 2585 RT.createRuntimeFunction(OMPRTL__kmpc_serialized_parallel), Args); 2586 2587 // OutlinedFn(>id, &zero, CapturedStruct); 2588 auto ThreadIDAddr = RT.emitThreadIDAddress(CGF, Loc); 2589 Address ZeroAddr = 2590 CGF.CreateTempAlloca(CGF.Int32Ty, CharUnits::fromQuantity(4), 2591 /*Name*/ ".zero.addr"); 2592 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0)); 2593 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs; 2594 OutlinedFnArgs.push_back(ThreadIDAddr.getPointer()); 2595 OutlinedFnArgs.push_back(ZeroAddr.getPointer()); 2596 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end()); 2597 RT.emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs); 2598 2599 // __kmpc_end_serialized_parallel(&Loc, GTid); 2600 llvm::Value *EndArgs[] = {RT.emitUpdateLocation(CGF, Loc), ThreadID}; 2601 CGF.EmitRuntimeCall( 2602 RT.createRuntimeFunction(OMPRTL__kmpc_end_serialized_parallel), 2603 EndArgs); 2604 }; 2605 if (IfCond) 2606 emitOMPIfClause(CGF, IfCond, ThenGen, ElseGen); 2607 else { 2608 RegionCodeGenTy ThenRCG(ThenGen); 2609 ThenRCG(CGF); 2610 } 2611 } 2612 2613 // If we're inside an (outlined) parallel region, use the region info's 2614 // thread-ID variable (it is passed in a first argument of the outlined function 2615 // as "kmp_int32 *gtid"). Otherwise, if we're not inside parallel region, but in 2616 // regular serial code region, get thread ID by calling kmp_int32 2617 // kmpc_global_thread_num(ident_t *loc), stash this thread ID in a temporary and 2618 // return the address of that temp. 2619 Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF, 2620 SourceLocation Loc) { 2621 if (auto *OMPRegionInfo = 2622 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) 2623 if (OMPRegionInfo->getThreadIDVariable()) 2624 return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress(); 2625 2626 auto ThreadID = getThreadID(CGF, Loc); 2627 auto Int32Ty = 2628 CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true); 2629 auto ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp."); 2630 CGF.EmitStoreOfScalar(ThreadID, 2631 CGF.MakeAddrLValue(ThreadIDTemp, Int32Ty)); 2632 2633 return ThreadIDTemp; 2634 } 2635 2636 llvm::Constant * 2637 CGOpenMPRuntime::getOrCreateInternalVariable(llvm::Type *Ty, 2638 const llvm::Twine &Name) { 2639 SmallString<256> Buffer; 2640 llvm::raw_svector_ostream Out(Buffer); 2641 Out << Name; 2642 auto RuntimeName = Out.str(); 2643 auto &Elem = *InternalVars.insert(std::make_pair(RuntimeName, nullptr)).first; 2644 if (Elem.second) { 2645 assert(Elem.second->getType()->getPointerElementType() == Ty && 2646 "OMP internal variable has different type than requested"); 2647 return &*Elem.second; 2648 } 2649 2650 return Elem.second = new llvm::GlobalVariable( 2651 CGM.getModule(), Ty, /*IsConstant*/ false, 2652 llvm::GlobalValue::CommonLinkage, llvm::Constant::getNullValue(Ty), 2653 Elem.first()); 2654 } 2655 2656 llvm::Value *CGOpenMPRuntime::getCriticalRegionLock(StringRef CriticalName) { 2657 llvm::Twine Name(".gomp_critical_user_", CriticalName); 2658 return getOrCreateInternalVariable(KmpCriticalNameTy, Name.concat(".var")); 2659 } 2660 2661 namespace { 2662 /// Common pre(post)-action for different OpenMP constructs. 2663 class CommonActionTy final : public PrePostActionTy { 2664 llvm::Value *EnterCallee; 2665 ArrayRef<llvm::Value *> EnterArgs; 2666 llvm::Value *ExitCallee; 2667 ArrayRef<llvm::Value *> ExitArgs; 2668 bool Conditional; 2669 llvm::BasicBlock *ContBlock = nullptr; 2670 2671 public: 2672 CommonActionTy(llvm::Value *EnterCallee, ArrayRef<llvm::Value *> EnterArgs, 2673 llvm::Value *ExitCallee, ArrayRef<llvm::Value *> ExitArgs, 2674 bool Conditional = false) 2675 : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee), 2676 ExitArgs(ExitArgs), Conditional(Conditional) {} 2677 void Enter(CodeGenFunction &CGF) override { 2678 llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs); 2679 if (Conditional) { 2680 llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes); 2681 auto *ThenBlock = CGF.createBasicBlock("omp_if.then"); 2682 ContBlock = CGF.createBasicBlock("omp_if.end"); 2683 // Generate the branch (If-stmt) 2684 CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock); 2685 CGF.EmitBlock(ThenBlock); 2686 } 2687 } 2688 void Done(CodeGenFunction &CGF) { 2689 // Emit the rest of blocks/branches 2690 CGF.EmitBranch(ContBlock); 2691 CGF.EmitBlock(ContBlock, true); 2692 } 2693 void Exit(CodeGenFunction &CGF) override { 2694 CGF.EmitRuntimeCall(ExitCallee, ExitArgs); 2695 } 2696 }; 2697 } // anonymous namespace 2698 2699 void CGOpenMPRuntime::emitCriticalRegion(CodeGenFunction &CGF, 2700 StringRef CriticalName, 2701 const RegionCodeGenTy &CriticalOpGen, 2702 SourceLocation Loc, const Expr *Hint) { 2703 // __kmpc_critical[_with_hint](ident_t *, gtid, Lock[, hint]); 2704 // CriticalOpGen(); 2705 // __kmpc_end_critical(ident_t *, gtid, Lock); 2706 // Prepare arguments and build a call to __kmpc_critical 2707 if (!CGF.HaveInsertPoint()) 2708 return; 2709 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), 2710 getCriticalRegionLock(CriticalName)}; 2711 llvm::SmallVector<llvm::Value *, 4> EnterArgs(std::begin(Args), 2712 std::end(Args)); 2713 if (Hint) { 2714 EnterArgs.push_back(CGF.Builder.CreateIntCast( 2715 CGF.EmitScalarExpr(Hint), CGM.IntPtrTy, /*isSigned=*/false)); 2716 } 2717 CommonActionTy Action( 2718 createRuntimeFunction(Hint ? OMPRTL__kmpc_critical_with_hint 2719 : OMPRTL__kmpc_critical), 2720 EnterArgs, createRuntimeFunction(OMPRTL__kmpc_end_critical), Args); 2721 CriticalOpGen.setAction(Action); 2722 emitInlinedDirective(CGF, OMPD_critical, CriticalOpGen); 2723 } 2724 2725 void CGOpenMPRuntime::emitMasterRegion(CodeGenFunction &CGF, 2726 const RegionCodeGenTy &MasterOpGen, 2727 SourceLocation Loc) { 2728 if (!CGF.HaveInsertPoint()) 2729 return; 2730 // if(__kmpc_master(ident_t *, gtid)) { 2731 // MasterOpGen(); 2732 // __kmpc_end_master(ident_t *, gtid); 2733 // } 2734 // Prepare arguments and build a call to __kmpc_master 2735 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)}; 2736 CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_master), Args, 2737 createRuntimeFunction(OMPRTL__kmpc_end_master), Args, 2738 /*Conditional=*/true); 2739 MasterOpGen.setAction(Action); 2740 emitInlinedDirective(CGF, OMPD_master, MasterOpGen); 2741 Action.Done(CGF); 2742 } 2743 2744 void CGOpenMPRuntime::emitTaskyieldCall(CodeGenFunction &CGF, 2745 SourceLocation Loc) { 2746 if (!CGF.HaveInsertPoint()) 2747 return; 2748 // Build call __kmpc_omp_taskyield(loc, thread_id, 0); 2749 llvm::Value *Args[] = { 2750 emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), 2751 llvm::ConstantInt::get(CGM.IntTy, /*V=*/0, /*isSigned=*/true)}; 2752 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_taskyield), Args); 2753 if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) 2754 Region->emitUntiedSwitch(CGF); 2755 } 2756 2757 void CGOpenMPRuntime::emitTaskgroupRegion(CodeGenFunction &CGF, 2758 const RegionCodeGenTy &TaskgroupOpGen, 2759 SourceLocation Loc) { 2760 if (!CGF.HaveInsertPoint()) 2761 return; 2762 // __kmpc_taskgroup(ident_t *, gtid); 2763 // TaskgroupOpGen(); 2764 // __kmpc_end_taskgroup(ident_t *, gtid); 2765 // Prepare arguments and build a call to __kmpc_taskgroup 2766 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)}; 2767 CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_taskgroup), Args, 2768 createRuntimeFunction(OMPRTL__kmpc_end_taskgroup), 2769 Args); 2770 TaskgroupOpGen.setAction(Action); 2771 emitInlinedDirective(CGF, OMPD_taskgroup, TaskgroupOpGen); 2772 } 2773 2774 /// Given an array of pointers to variables, project the address of a 2775 /// given variable. 2776 static Address emitAddrOfVarFromArray(CodeGenFunction &CGF, Address Array, 2777 unsigned Index, const VarDecl *Var) { 2778 // Pull out the pointer to the variable. 2779 Address PtrAddr = 2780 CGF.Builder.CreateConstArrayGEP(Array, Index, CGF.getPointerSize()); 2781 llvm::Value *Ptr = CGF.Builder.CreateLoad(PtrAddr); 2782 2783 Address Addr = Address(Ptr, CGF.getContext().getDeclAlign(Var)); 2784 Addr = CGF.Builder.CreateElementBitCast( 2785 Addr, CGF.ConvertTypeForMem(Var->getType())); 2786 return Addr; 2787 } 2788 2789 static llvm::Value *emitCopyprivateCopyFunction( 2790 CodeGenModule &CGM, llvm::Type *ArgsType, 2791 ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs, 2792 ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps, 2793 SourceLocation Loc) { 2794 auto &C = CGM.getContext(); 2795 // void copy_func(void *LHSArg, void *RHSArg); 2796 FunctionArgList Args; 2797 ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy, 2798 ImplicitParamDecl::Other); 2799 ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy, 2800 ImplicitParamDecl::Other); 2801 Args.push_back(&LHSArg); 2802 Args.push_back(&RHSArg); 2803 auto &CGFI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 2804 auto *Fn = llvm::Function::Create( 2805 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 2806 ".omp.copyprivate.copy_func", &CGM.getModule()); 2807 CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, CGFI); 2808 CodeGenFunction CGF(CGM); 2809 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); 2810 // Dest = (void*[n])(LHSArg); 2811 // Src = (void*[n])(RHSArg); 2812 Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 2813 CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)), 2814 ArgsType), CGF.getPointerAlign()); 2815 Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 2816 CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)), 2817 ArgsType), CGF.getPointerAlign()); 2818 // *(Type0*)Dst[0] = *(Type0*)Src[0]; 2819 // *(Type1*)Dst[1] = *(Type1*)Src[1]; 2820 // ... 2821 // *(Typen*)Dst[n] = *(Typen*)Src[n]; 2822 for (unsigned I = 0, E = AssignmentOps.size(); I < E; ++I) { 2823 auto DestVar = cast<VarDecl>(cast<DeclRefExpr>(DestExprs[I])->getDecl()); 2824 Address DestAddr = emitAddrOfVarFromArray(CGF, LHS, I, DestVar); 2825 2826 auto SrcVar = cast<VarDecl>(cast<DeclRefExpr>(SrcExprs[I])->getDecl()); 2827 Address SrcAddr = emitAddrOfVarFromArray(CGF, RHS, I, SrcVar); 2828 2829 auto *VD = cast<DeclRefExpr>(CopyprivateVars[I])->getDecl(); 2830 QualType Type = VD->getType(); 2831 CGF.EmitOMPCopy(Type, DestAddr, SrcAddr, DestVar, SrcVar, AssignmentOps[I]); 2832 } 2833 CGF.FinishFunction(); 2834 return Fn; 2835 } 2836 2837 void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF, 2838 const RegionCodeGenTy &SingleOpGen, 2839 SourceLocation Loc, 2840 ArrayRef<const Expr *> CopyprivateVars, 2841 ArrayRef<const Expr *> SrcExprs, 2842 ArrayRef<const Expr *> DstExprs, 2843 ArrayRef<const Expr *> AssignmentOps) { 2844 if (!CGF.HaveInsertPoint()) 2845 return; 2846 assert(CopyprivateVars.size() == SrcExprs.size() && 2847 CopyprivateVars.size() == DstExprs.size() && 2848 CopyprivateVars.size() == AssignmentOps.size()); 2849 auto &C = CGM.getContext(); 2850 // int32 did_it = 0; 2851 // if(__kmpc_single(ident_t *, gtid)) { 2852 // SingleOpGen(); 2853 // __kmpc_end_single(ident_t *, gtid); 2854 // did_it = 1; 2855 // } 2856 // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>, 2857 // <copy_func>, did_it); 2858 2859 Address DidIt = Address::invalid(); 2860 if (!CopyprivateVars.empty()) { 2861 // int32 did_it = 0; 2862 auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1); 2863 DidIt = CGF.CreateMemTemp(KmpInt32Ty, ".omp.copyprivate.did_it"); 2864 CGF.Builder.CreateStore(CGF.Builder.getInt32(0), DidIt); 2865 } 2866 // Prepare arguments and build a call to __kmpc_single 2867 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)}; 2868 CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_single), Args, 2869 createRuntimeFunction(OMPRTL__kmpc_end_single), Args, 2870 /*Conditional=*/true); 2871 SingleOpGen.setAction(Action); 2872 emitInlinedDirective(CGF, OMPD_single, SingleOpGen); 2873 if (DidIt.isValid()) { 2874 // did_it = 1; 2875 CGF.Builder.CreateStore(CGF.Builder.getInt32(1), DidIt); 2876 } 2877 Action.Done(CGF); 2878 // call __kmpc_copyprivate(ident_t *, gtid, <buf_size>, <copyprivate list>, 2879 // <copy_func>, did_it); 2880 if (DidIt.isValid()) { 2881 llvm::APInt ArraySize(/*unsigned int numBits=*/32, CopyprivateVars.size()); 2882 auto CopyprivateArrayTy = 2883 C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal, 2884 /*IndexTypeQuals=*/0); 2885 // Create a list of all private variables for copyprivate. 2886 Address CopyprivateList = 2887 CGF.CreateMemTemp(CopyprivateArrayTy, ".omp.copyprivate.cpr_list"); 2888 for (unsigned I = 0, E = CopyprivateVars.size(); I < E; ++I) { 2889 Address Elem = CGF.Builder.CreateConstArrayGEP( 2890 CopyprivateList, I, CGF.getPointerSize()); 2891 CGF.Builder.CreateStore( 2892 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 2893 CGF.EmitLValue(CopyprivateVars[I]).getPointer(), CGF.VoidPtrTy), 2894 Elem); 2895 } 2896 // Build function that copies private values from single region to all other 2897 // threads in the corresponding parallel region. 2898 auto *CpyFn = emitCopyprivateCopyFunction( 2899 CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy)->getPointerTo(), 2900 CopyprivateVars, SrcExprs, DstExprs, AssignmentOps, Loc); 2901 auto *BufSize = CGF.getTypeSize(CopyprivateArrayTy); 2902 Address CL = 2903 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(CopyprivateList, 2904 CGF.VoidPtrTy); 2905 auto *DidItVal = CGF.Builder.CreateLoad(DidIt); 2906 llvm::Value *Args[] = { 2907 emitUpdateLocation(CGF, Loc), // ident_t *<loc> 2908 getThreadID(CGF, Loc), // i32 <gtid> 2909 BufSize, // size_t <buf_size> 2910 CL.getPointer(), // void *<copyprivate list> 2911 CpyFn, // void (*) (void *, void *) <copy_func> 2912 DidItVal // i32 did_it 2913 }; 2914 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_copyprivate), Args); 2915 } 2916 } 2917 2918 void CGOpenMPRuntime::emitOrderedRegion(CodeGenFunction &CGF, 2919 const RegionCodeGenTy &OrderedOpGen, 2920 SourceLocation Loc, bool IsThreads) { 2921 if (!CGF.HaveInsertPoint()) 2922 return; 2923 // __kmpc_ordered(ident_t *, gtid); 2924 // OrderedOpGen(); 2925 // __kmpc_end_ordered(ident_t *, gtid); 2926 // Prepare arguments and build a call to __kmpc_ordered 2927 if (IsThreads) { 2928 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)}; 2929 CommonActionTy Action(createRuntimeFunction(OMPRTL__kmpc_ordered), Args, 2930 createRuntimeFunction(OMPRTL__kmpc_end_ordered), 2931 Args); 2932 OrderedOpGen.setAction(Action); 2933 emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen); 2934 return; 2935 } 2936 emitInlinedDirective(CGF, OMPD_ordered, OrderedOpGen); 2937 } 2938 2939 void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, 2940 OpenMPDirectiveKind Kind, bool EmitChecks, 2941 bool ForceSimpleCall) { 2942 if (!CGF.HaveInsertPoint()) 2943 return; 2944 // Build call __kmpc_cancel_barrier(loc, thread_id); 2945 // Build call __kmpc_barrier(loc, thread_id); 2946 unsigned Flags; 2947 if (Kind == OMPD_for) 2948 Flags = OMP_IDENT_BARRIER_IMPL_FOR; 2949 else if (Kind == OMPD_sections) 2950 Flags = OMP_IDENT_BARRIER_IMPL_SECTIONS; 2951 else if (Kind == OMPD_single) 2952 Flags = OMP_IDENT_BARRIER_IMPL_SINGLE; 2953 else if (Kind == OMPD_barrier) 2954 Flags = OMP_IDENT_BARRIER_EXPL; 2955 else 2956 Flags = OMP_IDENT_BARRIER_IMPL; 2957 // Build call __kmpc_cancel_barrier(loc, thread_id) or __kmpc_barrier(loc, 2958 // thread_id); 2959 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc, Flags), 2960 getThreadID(CGF, Loc)}; 2961 if (auto *OMPRegionInfo = 2962 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) { 2963 if (!ForceSimpleCall && OMPRegionInfo->hasCancel()) { 2964 auto *Result = CGF.EmitRuntimeCall( 2965 createRuntimeFunction(OMPRTL__kmpc_cancel_barrier), Args); 2966 if (EmitChecks) { 2967 // if (__kmpc_cancel_barrier()) { 2968 // exit from construct; 2969 // } 2970 auto *ExitBB = CGF.createBasicBlock(".cancel.exit"); 2971 auto *ContBB = CGF.createBasicBlock(".cancel.continue"); 2972 auto *Cmp = CGF.Builder.CreateIsNotNull(Result); 2973 CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB); 2974 CGF.EmitBlock(ExitBB); 2975 // exit from construct; 2976 auto CancelDestination = 2977 CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind()); 2978 CGF.EmitBranchThroughCleanup(CancelDestination); 2979 CGF.EmitBlock(ContBB, /*IsFinished=*/true); 2980 } 2981 return; 2982 } 2983 } 2984 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_barrier), Args); 2985 } 2986 2987 /// \brief Map the OpenMP loop schedule to the runtime enumeration. 2988 static OpenMPSchedType getRuntimeSchedule(OpenMPScheduleClauseKind ScheduleKind, 2989 bool Chunked, bool Ordered) { 2990 switch (ScheduleKind) { 2991 case OMPC_SCHEDULE_static: 2992 return Chunked ? (Ordered ? OMP_ord_static_chunked : OMP_sch_static_chunked) 2993 : (Ordered ? OMP_ord_static : OMP_sch_static); 2994 case OMPC_SCHEDULE_dynamic: 2995 return Ordered ? OMP_ord_dynamic_chunked : OMP_sch_dynamic_chunked; 2996 case OMPC_SCHEDULE_guided: 2997 return Ordered ? OMP_ord_guided_chunked : OMP_sch_guided_chunked; 2998 case OMPC_SCHEDULE_runtime: 2999 return Ordered ? OMP_ord_runtime : OMP_sch_runtime; 3000 case OMPC_SCHEDULE_auto: 3001 return Ordered ? OMP_ord_auto : OMP_sch_auto; 3002 case OMPC_SCHEDULE_unknown: 3003 assert(!Chunked && "chunk was specified but schedule kind not known"); 3004 return Ordered ? OMP_ord_static : OMP_sch_static; 3005 } 3006 llvm_unreachable("Unexpected runtime schedule"); 3007 } 3008 3009 /// \brief Map the OpenMP distribute schedule to the runtime enumeration. 3010 static OpenMPSchedType 3011 getRuntimeSchedule(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) { 3012 // only static is allowed for dist_schedule 3013 return Chunked ? OMP_dist_sch_static_chunked : OMP_dist_sch_static; 3014 } 3015 3016 bool CGOpenMPRuntime::isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind, 3017 bool Chunked) const { 3018 auto Schedule = getRuntimeSchedule(ScheduleKind, Chunked, /*Ordered=*/false); 3019 return Schedule == OMP_sch_static; 3020 } 3021 3022 bool CGOpenMPRuntime::isStaticNonchunked( 3023 OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const { 3024 auto Schedule = getRuntimeSchedule(ScheduleKind, Chunked); 3025 return Schedule == OMP_dist_sch_static; 3026 } 3027 3028 3029 bool CGOpenMPRuntime::isDynamic(OpenMPScheduleClauseKind ScheduleKind) const { 3030 auto Schedule = 3031 getRuntimeSchedule(ScheduleKind, /*Chunked=*/false, /*Ordered=*/false); 3032 assert(Schedule != OMP_sch_static_chunked && "cannot be chunked here"); 3033 return Schedule != OMP_sch_static; 3034 } 3035 3036 static int addMonoNonMonoModifier(OpenMPSchedType Schedule, 3037 OpenMPScheduleClauseModifier M1, 3038 OpenMPScheduleClauseModifier M2) { 3039 int Modifier = 0; 3040 switch (M1) { 3041 case OMPC_SCHEDULE_MODIFIER_monotonic: 3042 Modifier = OMP_sch_modifier_monotonic; 3043 break; 3044 case OMPC_SCHEDULE_MODIFIER_nonmonotonic: 3045 Modifier = OMP_sch_modifier_nonmonotonic; 3046 break; 3047 case OMPC_SCHEDULE_MODIFIER_simd: 3048 if (Schedule == OMP_sch_static_chunked) 3049 Schedule = OMP_sch_static_balanced_chunked; 3050 break; 3051 case OMPC_SCHEDULE_MODIFIER_last: 3052 case OMPC_SCHEDULE_MODIFIER_unknown: 3053 break; 3054 } 3055 switch (M2) { 3056 case OMPC_SCHEDULE_MODIFIER_monotonic: 3057 Modifier = OMP_sch_modifier_monotonic; 3058 break; 3059 case OMPC_SCHEDULE_MODIFIER_nonmonotonic: 3060 Modifier = OMP_sch_modifier_nonmonotonic; 3061 break; 3062 case OMPC_SCHEDULE_MODIFIER_simd: 3063 if (Schedule == OMP_sch_static_chunked) 3064 Schedule = OMP_sch_static_balanced_chunked; 3065 break; 3066 case OMPC_SCHEDULE_MODIFIER_last: 3067 case OMPC_SCHEDULE_MODIFIER_unknown: 3068 break; 3069 } 3070 return Schedule | Modifier; 3071 } 3072 3073 void CGOpenMPRuntime::emitForDispatchInit( 3074 CodeGenFunction &CGF, SourceLocation Loc, 3075 const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, 3076 bool Ordered, const DispatchRTInput &DispatchValues) { 3077 if (!CGF.HaveInsertPoint()) 3078 return; 3079 OpenMPSchedType Schedule = getRuntimeSchedule( 3080 ScheduleKind.Schedule, DispatchValues.Chunk != nullptr, Ordered); 3081 assert(Ordered || 3082 (Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked && 3083 Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked && 3084 Schedule != OMP_sch_static_balanced_chunked)); 3085 // Call __kmpc_dispatch_init( 3086 // ident_t *loc, kmp_int32 tid, kmp_int32 schedule, 3087 // kmp_int[32|64] lower, kmp_int[32|64] upper, 3088 // kmp_int[32|64] stride, kmp_int[32|64] chunk); 3089 3090 // If the Chunk was not specified in the clause - use default value 1. 3091 llvm::Value *Chunk = DispatchValues.Chunk ? DispatchValues.Chunk 3092 : CGF.Builder.getIntN(IVSize, 1); 3093 llvm::Value *Args[] = { 3094 emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), 3095 CGF.Builder.getInt32(addMonoNonMonoModifier( 3096 Schedule, ScheduleKind.M1, ScheduleKind.M2)), // Schedule type 3097 DispatchValues.LB, // Lower 3098 DispatchValues.UB, // Upper 3099 CGF.Builder.getIntN(IVSize, 1), // Stride 3100 Chunk // Chunk 3101 }; 3102 CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args); 3103 } 3104 3105 static void emitForStaticInitCall( 3106 CodeGenFunction &CGF, llvm::Value *UpdateLocation, llvm::Value *ThreadId, 3107 llvm::Constant *ForStaticInitFunction, OpenMPSchedType Schedule, 3108 OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, 3109 const CGOpenMPRuntime::StaticRTInput &Values) { 3110 if (!CGF.HaveInsertPoint()) 3111 return; 3112 3113 assert(!Values.Ordered); 3114 assert(Schedule == OMP_sch_static || Schedule == OMP_sch_static_chunked || 3115 Schedule == OMP_sch_static_balanced_chunked || 3116 Schedule == OMP_ord_static || Schedule == OMP_ord_static_chunked || 3117 Schedule == OMP_dist_sch_static || 3118 Schedule == OMP_dist_sch_static_chunked); 3119 3120 // Call __kmpc_for_static_init( 3121 // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype, 3122 // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower, 3123 // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride, 3124 // kmp_int[32|64] incr, kmp_int[32|64] chunk); 3125 llvm::Value *Chunk = Values.Chunk; 3126 if (Chunk == nullptr) { 3127 assert((Schedule == OMP_sch_static || Schedule == OMP_ord_static || 3128 Schedule == OMP_dist_sch_static) && 3129 "expected static non-chunked schedule"); 3130 // If the Chunk was not specified in the clause - use default value 1. 3131 Chunk = CGF.Builder.getIntN(Values.IVSize, 1); 3132 } else { 3133 assert((Schedule == OMP_sch_static_chunked || 3134 Schedule == OMP_sch_static_balanced_chunked || 3135 Schedule == OMP_ord_static_chunked || 3136 Schedule == OMP_dist_sch_static_chunked) && 3137 "expected static chunked schedule"); 3138 } 3139 llvm::Value *Args[] = { 3140 UpdateLocation, 3141 ThreadId, 3142 CGF.Builder.getInt32(addMonoNonMonoModifier(Schedule, M1, 3143 M2)), // Schedule type 3144 Values.IL.getPointer(), // &isLastIter 3145 Values.LB.getPointer(), // &LB 3146 Values.UB.getPointer(), // &UB 3147 Values.ST.getPointer(), // &Stride 3148 CGF.Builder.getIntN(Values.IVSize, 1), // Incr 3149 Chunk // Chunk 3150 }; 3151 CGF.EmitRuntimeCall(ForStaticInitFunction, Args); 3152 } 3153 3154 void CGOpenMPRuntime::emitForStaticInit(CodeGenFunction &CGF, 3155 SourceLocation Loc, 3156 OpenMPDirectiveKind DKind, 3157 const OpenMPScheduleTy &ScheduleKind, 3158 const StaticRTInput &Values) { 3159 OpenMPSchedType ScheduleNum = getRuntimeSchedule( 3160 ScheduleKind.Schedule, Values.Chunk != nullptr, Values.Ordered); 3161 assert(isOpenMPWorksharingDirective(DKind) && 3162 "Expected loop-based or sections-based directive."); 3163 auto *UpdatedLocation = emitUpdateLocation(CGF, Loc, 3164 isOpenMPLoopDirective(DKind) 3165 ? OMP_IDENT_WORK_LOOP 3166 : OMP_IDENT_WORK_SECTIONS); 3167 auto *ThreadId = getThreadID(CGF, Loc); 3168 auto *StaticInitFunction = 3169 createForStaticInitFunction(Values.IVSize, Values.IVSigned); 3170 emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction, 3171 ScheduleNum, ScheduleKind.M1, ScheduleKind.M2, Values); 3172 } 3173 3174 void CGOpenMPRuntime::emitDistributeStaticInit( 3175 CodeGenFunction &CGF, SourceLocation Loc, 3176 OpenMPDistScheduleClauseKind SchedKind, 3177 const CGOpenMPRuntime::StaticRTInput &Values) { 3178 OpenMPSchedType ScheduleNum = 3179 getRuntimeSchedule(SchedKind, Values.Chunk != nullptr); 3180 auto *UpdatedLocation = 3181 emitUpdateLocation(CGF, Loc, OMP_IDENT_WORK_DISTRIBUTE); 3182 auto *ThreadId = getThreadID(CGF, Loc); 3183 auto *StaticInitFunction = 3184 createForStaticInitFunction(Values.IVSize, Values.IVSigned); 3185 emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction, 3186 ScheduleNum, OMPC_SCHEDULE_MODIFIER_unknown, 3187 OMPC_SCHEDULE_MODIFIER_unknown, Values); 3188 } 3189 3190 void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF, 3191 SourceLocation Loc, 3192 OpenMPDirectiveKind DKind) { 3193 if (!CGF.HaveInsertPoint()) 3194 return; 3195 // Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid); 3196 llvm::Value *Args[] = { 3197 emitUpdateLocation(CGF, Loc, 3198 isOpenMPDistributeDirective(DKind) 3199 ? OMP_IDENT_WORK_DISTRIBUTE 3200 : isOpenMPLoopDirective(DKind) 3201 ? OMP_IDENT_WORK_LOOP 3202 : OMP_IDENT_WORK_SECTIONS), 3203 getThreadID(CGF, Loc)}; 3204 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_for_static_fini), 3205 Args); 3206 } 3207 3208 void CGOpenMPRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF, 3209 SourceLocation Loc, 3210 unsigned IVSize, 3211 bool IVSigned) { 3212 if (!CGF.HaveInsertPoint()) 3213 return; 3214 // Call __kmpc_for_dynamic_fini_(4|8)[u](ident_t *loc, kmp_int32 tid); 3215 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)}; 3216 CGF.EmitRuntimeCall(createDispatchFiniFunction(IVSize, IVSigned), Args); 3217 } 3218 3219 llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF, 3220 SourceLocation Loc, unsigned IVSize, 3221 bool IVSigned, Address IL, 3222 Address LB, Address UB, 3223 Address ST) { 3224 // Call __kmpc_dispatch_next( 3225 // ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, 3226 // kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper, 3227 // kmp_int[32|64] *p_stride); 3228 llvm::Value *Args[] = { 3229 emitUpdateLocation(CGF, Loc), 3230 getThreadID(CGF, Loc), 3231 IL.getPointer(), // &isLastIter 3232 LB.getPointer(), // &Lower 3233 UB.getPointer(), // &Upper 3234 ST.getPointer() // &Stride 3235 }; 3236 llvm::Value *Call = 3237 CGF.EmitRuntimeCall(createDispatchNextFunction(IVSize, IVSigned), Args); 3238 return CGF.EmitScalarConversion( 3239 Call, CGF.getContext().getIntTypeForBitwidth(32, /* Signed */ true), 3240 CGF.getContext().BoolTy, Loc); 3241 } 3242 3243 void CGOpenMPRuntime::emitNumThreadsClause(CodeGenFunction &CGF, 3244 llvm::Value *NumThreads, 3245 SourceLocation Loc) { 3246 if (!CGF.HaveInsertPoint()) 3247 return; 3248 // Build call __kmpc_push_num_threads(&loc, global_tid, num_threads) 3249 llvm::Value *Args[] = { 3250 emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), 3251 CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)}; 3252 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_num_threads), 3253 Args); 3254 } 3255 3256 void CGOpenMPRuntime::emitProcBindClause(CodeGenFunction &CGF, 3257 OpenMPProcBindClauseKind ProcBind, 3258 SourceLocation Loc) { 3259 if (!CGF.HaveInsertPoint()) 3260 return; 3261 // Constants for proc bind value accepted by the runtime. 3262 enum ProcBindTy { 3263 ProcBindFalse = 0, 3264 ProcBindTrue, 3265 ProcBindMaster, 3266 ProcBindClose, 3267 ProcBindSpread, 3268 ProcBindIntel, 3269 ProcBindDefault 3270 } RuntimeProcBind; 3271 switch (ProcBind) { 3272 case OMPC_PROC_BIND_master: 3273 RuntimeProcBind = ProcBindMaster; 3274 break; 3275 case OMPC_PROC_BIND_close: 3276 RuntimeProcBind = ProcBindClose; 3277 break; 3278 case OMPC_PROC_BIND_spread: 3279 RuntimeProcBind = ProcBindSpread; 3280 break; 3281 case OMPC_PROC_BIND_unknown: 3282 llvm_unreachable("Unsupported proc_bind value."); 3283 } 3284 // Build call __kmpc_push_proc_bind(&loc, global_tid, proc_bind) 3285 llvm::Value *Args[] = { 3286 emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), 3287 llvm::ConstantInt::get(CGM.IntTy, RuntimeProcBind, /*isSigned=*/true)}; 3288 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_proc_bind), Args); 3289 } 3290 3291 void CGOpenMPRuntime::emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>, 3292 SourceLocation Loc) { 3293 if (!CGF.HaveInsertPoint()) 3294 return; 3295 // Build call void __kmpc_flush(ident_t *loc) 3296 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_flush), 3297 emitUpdateLocation(CGF, Loc)); 3298 } 3299 3300 namespace { 3301 /// \brief Indexes of fields for type kmp_task_t. 3302 enum KmpTaskTFields { 3303 /// \brief List of shared variables. 3304 KmpTaskTShareds, 3305 /// \brief Task routine. 3306 KmpTaskTRoutine, 3307 /// \brief Partition id for the untied tasks. 3308 KmpTaskTPartId, 3309 /// Function with call of destructors for private variables. 3310 Data1, 3311 /// Task priority. 3312 Data2, 3313 /// (Taskloops only) Lower bound. 3314 KmpTaskTLowerBound, 3315 /// (Taskloops only) Upper bound. 3316 KmpTaskTUpperBound, 3317 /// (Taskloops only) Stride. 3318 KmpTaskTStride, 3319 /// (Taskloops only) Is last iteration flag. 3320 KmpTaskTLastIter, 3321 /// (Taskloops only) Reduction data. 3322 KmpTaskTReductions, 3323 }; 3324 } // anonymous namespace 3325 3326 bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::empty() const { 3327 // FIXME: Add other entries type when they become supported. 3328 return OffloadEntriesTargetRegion.empty(); 3329 } 3330 3331 /// \brief Initialize target region entry. 3332 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy:: 3333 initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, 3334 StringRef ParentName, unsigned LineNum, 3335 unsigned Order) { 3336 assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is " 3337 "only required for the device " 3338 "code generation."); 3339 OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] = 3340 OffloadEntryInfoTargetRegion(Order, /*Addr=*/nullptr, /*ID=*/nullptr, 3341 /*Flags=*/0); 3342 ++OffloadingEntriesNum; 3343 } 3344 3345 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy:: 3346 registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, 3347 StringRef ParentName, unsigned LineNum, 3348 llvm::Constant *Addr, llvm::Constant *ID, 3349 int32_t Flags) { 3350 // If we are emitting code for a target, the entry is already initialized, 3351 // only has to be registered. 3352 if (CGM.getLangOpts().OpenMPIsDevice) { 3353 assert(hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum) && 3354 "Entry must exist."); 3355 auto &Entry = 3356 OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum]; 3357 assert(Entry.isValid() && "Entry not initialized!"); 3358 Entry.setAddress(Addr); 3359 Entry.setID(ID); 3360 Entry.setFlags(Flags); 3361 return; 3362 } else { 3363 OffloadEntryInfoTargetRegion Entry(OffloadingEntriesNum++, Addr, ID, Flags); 3364 OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] = Entry; 3365 } 3366 } 3367 3368 bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::hasTargetRegionEntryInfo( 3369 unsigned DeviceID, unsigned FileID, StringRef ParentName, 3370 unsigned LineNum) const { 3371 auto PerDevice = OffloadEntriesTargetRegion.find(DeviceID); 3372 if (PerDevice == OffloadEntriesTargetRegion.end()) 3373 return false; 3374 auto PerFile = PerDevice->second.find(FileID); 3375 if (PerFile == PerDevice->second.end()) 3376 return false; 3377 auto PerParentName = PerFile->second.find(ParentName); 3378 if (PerParentName == PerFile->second.end()) 3379 return false; 3380 auto PerLine = PerParentName->second.find(LineNum); 3381 if (PerLine == PerParentName->second.end()) 3382 return false; 3383 // Fail if this entry is already registered. 3384 if (PerLine->second.getAddress() || PerLine->second.getID()) 3385 return false; 3386 return true; 3387 } 3388 3389 void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::actOnTargetRegionEntriesInfo( 3390 const OffloadTargetRegionEntryInfoActTy &Action) { 3391 // Scan all target region entries and perform the provided action. 3392 for (auto &D : OffloadEntriesTargetRegion) 3393 for (auto &F : D.second) 3394 for (auto &P : F.second) 3395 for (auto &L : P.second) 3396 Action(D.first, F.first, P.first(), L.first, L.second); 3397 } 3398 3399 /// \brief Create a Ctor/Dtor-like function whose body is emitted through 3400 /// \a Codegen. This is used to emit the two functions that register and 3401 /// unregister the descriptor of the current compilation unit. 3402 static llvm::Function * 3403 createOffloadingBinaryDescriptorFunction(CodeGenModule &CGM, StringRef Name, 3404 const RegionCodeGenTy &Codegen) { 3405 auto &C = CGM.getContext(); 3406 FunctionArgList Args; 3407 ImplicitParamDecl DummyPtr(C, C.VoidPtrTy, ImplicitParamDecl::Other); 3408 Args.push_back(&DummyPtr); 3409 3410 CodeGenFunction CGF(CGM); 3411 // Disable debug info for global (de-)initializer because they are not part of 3412 // some particular construct. 3413 CGF.disableDebugInfo(); 3414 auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 3415 auto FTy = CGM.getTypes().GetFunctionType(FI); 3416 auto *Fn = CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI); 3417 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FI, Args); 3418 Codegen(CGF); 3419 CGF.FinishFunction(); 3420 return Fn; 3421 } 3422 3423 llvm::Function * 3424 CGOpenMPRuntime::createOffloadingBinaryDescriptorRegistration() { 3425 // If we don't have entries or if we are emitting code for the device, we 3426 // don't need to do anything. 3427 if (CGM.getLangOpts().OpenMPIsDevice || OffloadEntriesInfoManager.empty()) 3428 return nullptr; 3429 3430 auto &M = CGM.getModule(); 3431 auto &C = CGM.getContext(); 3432 3433 // Get list of devices we care about 3434 auto &Devices = CGM.getLangOpts().OMPTargetTriples; 3435 3436 // We should be creating an offloading descriptor only if there are devices 3437 // specified. 3438 assert(!Devices.empty() && "No OpenMP offloading devices??"); 3439 3440 // Create the external variables that will point to the begin and end of the 3441 // host entries section. These will be defined by the linker. 3442 auto *OffloadEntryTy = 3443 CGM.getTypes().ConvertTypeForMem(getTgtOffloadEntryQTy()); 3444 llvm::GlobalVariable *HostEntriesBegin = new llvm::GlobalVariable( 3445 M, OffloadEntryTy, /*isConstant=*/true, 3446 llvm::GlobalValue::ExternalLinkage, /*Initializer=*/nullptr, 3447 ".omp_offloading.entries_begin"); 3448 llvm::GlobalVariable *HostEntriesEnd = new llvm::GlobalVariable( 3449 M, OffloadEntryTy, /*isConstant=*/true, 3450 llvm::GlobalValue::ExternalLinkage, /*Initializer=*/nullptr, 3451 ".omp_offloading.entries_end"); 3452 3453 // Create all device images 3454 auto *DeviceImageTy = cast<llvm::StructType>( 3455 CGM.getTypes().ConvertTypeForMem(getTgtDeviceImageQTy())); 3456 ConstantInitBuilder DeviceImagesBuilder(CGM); 3457 auto DeviceImagesEntries = DeviceImagesBuilder.beginArray(DeviceImageTy); 3458 3459 for (unsigned i = 0; i < Devices.size(); ++i) { 3460 StringRef T = Devices[i].getTriple(); 3461 auto *ImgBegin = new llvm::GlobalVariable( 3462 M, CGM.Int8Ty, /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, 3463 /*Initializer=*/nullptr, 3464 Twine(".omp_offloading.img_start.") + Twine(T)); 3465 auto *ImgEnd = new llvm::GlobalVariable( 3466 M, CGM.Int8Ty, /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, 3467 /*Initializer=*/nullptr, Twine(".omp_offloading.img_end.") + Twine(T)); 3468 3469 auto Dev = DeviceImagesEntries.beginStruct(DeviceImageTy); 3470 Dev.add(ImgBegin); 3471 Dev.add(ImgEnd); 3472 Dev.add(HostEntriesBegin); 3473 Dev.add(HostEntriesEnd); 3474 Dev.finishAndAddTo(DeviceImagesEntries); 3475 } 3476 3477 // Create device images global array. 3478 llvm::GlobalVariable *DeviceImages = 3479 DeviceImagesEntries.finishAndCreateGlobal(".omp_offloading.device_images", 3480 CGM.getPointerAlign(), 3481 /*isConstant=*/true); 3482 DeviceImages->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 3483 3484 // This is a Zero array to be used in the creation of the constant expressions 3485 llvm::Constant *Index[] = {llvm::Constant::getNullValue(CGM.Int32Ty), 3486 llvm::Constant::getNullValue(CGM.Int32Ty)}; 3487 3488 // Create the target region descriptor. 3489 auto *BinaryDescriptorTy = cast<llvm::StructType>( 3490 CGM.getTypes().ConvertTypeForMem(getTgtBinaryDescriptorQTy())); 3491 ConstantInitBuilder DescBuilder(CGM); 3492 auto DescInit = DescBuilder.beginStruct(BinaryDescriptorTy); 3493 DescInit.addInt(CGM.Int32Ty, Devices.size()); 3494 DescInit.add(llvm::ConstantExpr::getGetElementPtr(DeviceImages->getValueType(), 3495 DeviceImages, 3496 Index)); 3497 DescInit.add(HostEntriesBegin); 3498 DescInit.add(HostEntriesEnd); 3499 3500 auto *Desc = DescInit.finishAndCreateGlobal(".omp_offloading.descriptor", 3501 CGM.getPointerAlign(), 3502 /*isConstant=*/true); 3503 3504 // Emit code to register or unregister the descriptor at execution 3505 // startup or closing, respectively. 3506 3507 // Create a variable to drive the registration and unregistration of the 3508 // descriptor, so we can reuse the logic that emits Ctors and Dtors. 3509 auto *IdentInfo = &C.Idents.get(".omp_offloading.reg_unreg_var"); 3510 ImplicitParamDecl RegUnregVar(C, C.getTranslationUnitDecl(), SourceLocation(), 3511 IdentInfo, C.CharTy, ImplicitParamDecl::Other); 3512 3513 auto *UnRegFn = createOffloadingBinaryDescriptorFunction( 3514 CGM, ".omp_offloading.descriptor_unreg", 3515 [&](CodeGenFunction &CGF, PrePostActionTy &) { 3516 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_unregister_lib), 3517 Desc); 3518 }); 3519 auto *RegFn = createOffloadingBinaryDescriptorFunction( 3520 CGM, ".omp_offloading.descriptor_reg", 3521 [&](CodeGenFunction &CGF, PrePostActionTy &) { 3522 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_register_lib), 3523 Desc); 3524 CGM.getCXXABI().registerGlobalDtor(CGF, RegUnregVar, UnRegFn, Desc); 3525 }); 3526 if (CGM.supportsCOMDAT()) { 3527 // It is sufficient to call registration function only once, so create a 3528 // COMDAT group for registration/unregistration functions and associated 3529 // data. That would reduce startup time and code size. Registration 3530 // function serves as a COMDAT group key. 3531 auto ComdatKey = M.getOrInsertComdat(RegFn->getName()); 3532 RegFn->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage); 3533 RegFn->setVisibility(llvm::GlobalValue::HiddenVisibility); 3534 RegFn->setComdat(ComdatKey); 3535 UnRegFn->setComdat(ComdatKey); 3536 DeviceImages->setComdat(ComdatKey); 3537 Desc->setComdat(ComdatKey); 3538 } 3539 return RegFn; 3540 } 3541 3542 void CGOpenMPRuntime::createOffloadEntry(llvm::Constant *ID, 3543 llvm::Constant *Addr, uint64_t Size, 3544 int32_t Flags) { 3545 StringRef Name = Addr->getName(); 3546 auto *TgtOffloadEntryType = cast<llvm::StructType>( 3547 CGM.getTypes().ConvertTypeForMem(getTgtOffloadEntryQTy())); 3548 llvm::LLVMContext &C = CGM.getModule().getContext(); 3549 llvm::Module &M = CGM.getModule(); 3550 3551 // Make sure the address has the right type. 3552 llvm::Constant *AddrPtr = llvm::ConstantExpr::getBitCast(ID, CGM.VoidPtrTy); 3553 3554 // Create constant string with the name. 3555 llvm::Constant *StrPtrInit = llvm::ConstantDataArray::getString(C, Name); 3556 3557 llvm::GlobalVariable *Str = 3558 new llvm::GlobalVariable(M, StrPtrInit->getType(), /*isConstant=*/true, 3559 llvm::GlobalValue::InternalLinkage, StrPtrInit, 3560 ".omp_offloading.entry_name"); 3561 Str->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 3562 llvm::Constant *StrPtr = llvm::ConstantExpr::getBitCast(Str, CGM.Int8PtrTy); 3563 3564 // We can't have any padding between symbols, so we need to have 1-byte 3565 // alignment. 3566 auto Align = CharUnits::fromQuantity(1); 3567 3568 // Create the entry struct. 3569 ConstantInitBuilder EntryBuilder(CGM); 3570 auto EntryInit = EntryBuilder.beginStruct(TgtOffloadEntryType); 3571 EntryInit.add(AddrPtr); 3572 EntryInit.add(StrPtr); 3573 EntryInit.addInt(CGM.SizeTy, Size); 3574 EntryInit.addInt(CGM.Int32Ty, Flags); 3575 EntryInit.addInt(CGM.Int32Ty, 0); 3576 llvm::GlobalVariable *Entry = 3577 EntryInit.finishAndCreateGlobal(".omp_offloading.entry", 3578 Align, 3579 /*constant*/ true, 3580 llvm::GlobalValue::ExternalLinkage); 3581 3582 // The entry has to be created in the section the linker expects it to be. 3583 Entry->setSection(".omp_offloading.entries"); 3584 } 3585 3586 void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() { 3587 // Emit the offloading entries and metadata so that the device codegen side 3588 // can easily figure out what to emit. The produced metadata looks like 3589 // this: 3590 // 3591 // !omp_offload.info = !{!1, ...} 3592 // 3593 // Right now we only generate metadata for function that contain target 3594 // regions. 3595 3596 // If we do not have entries, we dont need to do anything. 3597 if (OffloadEntriesInfoManager.empty()) 3598 return; 3599 3600 llvm::Module &M = CGM.getModule(); 3601 llvm::LLVMContext &C = M.getContext(); 3602 SmallVector<OffloadEntriesInfoManagerTy::OffloadEntryInfo *, 16> 3603 OrderedEntries(OffloadEntriesInfoManager.size()); 3604 3605 // Create the offloading info metadata node. 3606 llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("omp_offload.info"); 3607 3608 // Auxiliary methods to create metadata values and strings. 3609 auto getMDInt = [&](unsigned v) { 3610 return llvm::ConstantAsMetadata::get( 3611 llvm::ConstantInt::get(llvm::Type::getInt32Ty(C), v)); 3612 }; 3613 3614 auto getMDString = [&](StringRef v) { return llvm::MDString::get(C, v); }; 3615 3616 // Create function that emits metadata for each target region entry; 3617 auto &&TargetRegionMetadataEmitter = [&]( 3618 unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned Line, 3619 OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion &E) { 3620 llvm::SmallVector<llvm::Metadata *, 32> Ops; 3621 // Generate metadata for target regions. Each entry of this metadata 3622 // contains: 3623 // - Entry 0 -> Kind of this type of metadata (0). 3624 // - Entry 1 -> Device ID of the file where the entry was identified. 3625 // - Entry 2 -> File ID of the file where the entry was identified. 3626 // - Entry 3 -> Mangled name of the function where the entry was identified. 3627 // - Entry 4 -> Line in the file where the entry was identified. 3628 // - Entry 5 -> Order the entry was created. 3629 // The first element of the metadata node is the kind. 3630 Ops.push_back(getMDInt(E.getKind())); 3631 Ops.push_back(getMDInt(DeviceID)); 3632 Ops.push_back(getMDInt(FileID)); 3633 Ops.push_back(getMDString(ParentName)); 3634 Ops.push_back(getMDInt(Line)); 3635 Ops.push_back(getMDInt(E.getOrder())); 3636 3637 // Save this entry in the right position of the ordered entries array. 3638 OrderedEntries[E.getOrder()] = &E; 3639 3640 // Add metadata to the named metadata node. 3641 MD->addOperand(llvm::MDNode::get(C, Ops)); 3642 }; 3643 3644 OffloadEntriesInfoManager.actOnTargetRegionEntriesInfo( 3645 TargetRegionMetadataEmitter); 3646 3647 for (auto *E : OrderedEntries) { 3648 assert(E && "All ordered entries must exist!"); 3649 if (auto *CE = 3650 dyn_cast<OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion>( 3651 E)) { 3652 assert(CE->getID() && CE->getAddress() && 3653 "Entry ID and Addr are invalid!"); 3654 createOffloadEntry(CE->getID(), CE->getAddress(), /*Size=*/0); 3655 } else 3656 llvm_unreachable("Unsupported entry kind."); 3657 } 3658 } 3659 3660 /// \brief Loads all the offload entries information from the host IR 3661 /// metadata. 3662 void CGOpenMPRuntime::loadOffloadInfoMetadata() { 3663 // If we are in target mode, load the metadata from the host IR. This code has 3664 // to match the metadaata creation in createOffloadEntriesAndInfoMetadata(). 3665 3666 if (!CGM.getLangOpts().OpenMPIsDevice) 3667 return; 3668 3669 if (CGM.getLangOpts().OMPHostIRFile.empty()) 3670 return; 3671 3672 auto Buf = llvm::MemoryBuffer::getFile(CGM.getLangOpts().OMPHostIRFile); 3673 if (Buf.getError()) 3674 return; 3675 3676 llvm::LLVMContext C; 3677 auto ME = expectedToErrorOrAndEmitErrors( 3678 C, llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C)); 3679 3680 if (ME.getError()) 3681 return; 3682 3683 llvm::NamedMDNode *MD = ME.get()->getNamedMetadata("omp_offload.info"); 3684 if (!MD) 3685 return; 3686 3687 for (auto I : MD->operands()) { 3688 llvm::MDNode *MN = cast<llvm::MDNode>(I); 3689 3690 auto getMDInt = [&](unsigned Idx) { 3691 llvm::ConstantAsMetadata *V = 3692 cast<llvm::ConstantAsMetadata>(MN->getOperand(Idx)); 3693 return cast<llvm::ConstantInt>(V->getValue())->getZExtValue(); 3694 }; 3695 3696 auto getMDString = [&](unsigned Idx) { 3697 llvm::MDString *V = cast<llvm::MDString>(MN->getOperand(Idx)); 3698 return V->getString(); 3699 }; 3700 3701 switch (getMDInt(0)) { 3702 default: 3703 llvm_unreachable("Unexpected metadata!"); 3704 break; 3705 case OffloadEntriesInfoManagerTy::OffloadEntryInfo:: 3706 OFFLOAD_ENTRY_INFO_TARGET_REGION: 3707 OffloadEntriesInfoManager.initializeTargetRegionEntryInfo( 3708 /*DeviceID=*/getMDInt(1), /*FileID=*/getMDInt(2), 3709 /*ParentName=*/getMDString(3), /*Line=*/getMDInt(4), 3710 /*Order=*/getMDInt(5)); 3711 break; 3712 } 3713 } 3714 } 3715 3716 void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) { 3717 if (!KmpRoutineEntryPtrTy) { 3718 // Build typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); type. 3719 auto &C = CGM.getContext(); 3720 QualType KmpRoutineEntryTyArgs[] = {KmpInt32Ty, C.VoidPtrTy}; 3721 FunctionProtoType::ExtProtoInfo EPI; 3722 KmpRoutineEntryPtrQTy = C.getPointerType( 3723 C.getFunctionType(KmpInt32Ty, KmpRoutineEntryTyArgs, EPI)); 3724 KmpRoutineEntryPtrTy = CGM.getTypes().ConvertType(KmpRoutineEntryPtrQTy); 3725 } 3726 } 3727 3728 static FieldDecl *addFieldToRecordDecl(ASTContext &C, DeclContext *DC, 3729 QualType FieldTy) { 3730 auto *Field = FieldDecl::Create( 3731 C, DC, SourceLocation(), SourceLocation(), /*Id=*/nullptr, FieldTy, 3732 C.getTrivialTypeSourceInfo(FieldTy, SourceLocation()), 3733 /*BW=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit); 3734 Field->setAccess(AS_public); 3735 DC->addDecl(Field); 3736 return Field; 3737 } 3738 3739 QualType CGOpenMPRuntime::getTgtOffloadEntryQTy() { 3740 3741 // Make sure the type of the entry is already created. This is the type we 3742 // have to create: 3743 // struct __tgt_offload_entry{ 3744 // void *addr; // Pointer to the offload entry info. 3745 // // (function or global) 3746 // char *name; // Name of the function or global. 3747 // size_t size; // Size of the entry info (0 if it a function). 3748 // int32_t flags; // Flags associated with the entry, e.g. 'link'. 3749 // int32_t reserved; // Reserved, to use by the runtime library. 3750 // }; 3751 if (TgtOffloadEntryQTy.isNull()) { 3752 ASTContext &C = CGM.getContext(); 3753 auto *RD = C.buildImplicitRecord("__tgt_offload_entry"); 3754 RD->startDefinition(); 3755 addFieldToRecordDecl(C, RD, C.VoidPtrTy); 3756 addFieldToRecordDecl(C, RD, C.getPointerType(C.CharTy)); 3757 addFieldToRecordDecl(C, RD, C.getSizeType()); 3758 addFieldToRecordDecl( 3759 C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true)); 3760 addFieldToRecordDecl( 3761 C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true)); 3762 RD->completeDefinition(); 3763 TgtOffloadEntryQTy = C.getRecordType(RD); 3764 } 3765 return TgtOffloadEntryQTy; 3766 } 3767 3768 QualType CGOpenMPRuntime::getTgtDeviceImageQTy() { 3769 // These are the types we need to build: 3770 // struct __tgt_device_image{ 3771 // void *ImageStart; // Pointer to the target code start. 3772 // void *ImageEnd; // Pointer to the target code end. 3773 // // We also add the host entries to the device image, as it may be useful 3774 // // for the target runtime to have access to that information. 3775 // __tgt_offload_entry *EntriesBegin; // Begin of the table with all 3776 // // the entries. 3777 // __tgt_offload_entry *EntriesEnd; // End of the table with all the 3778 // // entries (non inclusive). 3779 // }; 3780 if (TgtDeviceImageQTy.isNull()) { 3781 ASTContext &C = CGM.getContext(); 3782 auto *RD = C.buildImplicitRecord("__tgt_device_image"); 3783 RD->startDefinition(); 3784 addFieldToRecordDecl(C, RD, C.VoidPtrTy); 3785 addFieldToRecordDecl(C, RD, C.VoidPtrTy); 3786 addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy())); 3787 addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy())); 3788 RD->completeDefinition(); 3789 TgtDeviceImageQTy = C.getRecordType(RD); 3790 } 3791 return TgtDeviceImageQTy; 3792 } 3793 3794 QualType CGOpenMPRuntime::getTgtBinaryDescriptorQTy() { 3795 // struct __tgt_bin_desc{ 3796 // int32_t NumDevices; // Number of devices supported. 3797 // __tgt_device_image *DeviceImages; // Arrays of device images 3798 // // (one per device). 3799 // __tgt_offload_entry *EntriesBegin; // Begin of the table with all the 3800 // // entries. 3801 // __tgt_offload_entry *EntriesEnd; // End of the table with all the 3802 // // entries (non inclusive). 3803 // }; 3804 if (TgtBinaryDescriptorQTy.isNull()) { 3805 ASTContext &C = CGM.getContext(); 3806 auto *RD = C.buildImplicitRecord("__tgt_bin_desc"); 3807 RD->startDefinition(); 3808 addFieldToRecordDecl( 3809 C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true)); 3810 addFieldToRecordDecl(C, RD, C.getPointerType(getTgtDeviceImageQTy())); 3811 addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy())); 3812 addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy())); 3813 RD->completeDefinition(); 3814 TgtBinaryDescriptorQTy = C.getRecordType(RD); 3815 } 3816 return TgtBinaryDescriptorQTy; 3817 } 3818 3819 namespace { 3820 struct PrivateHelpersTy { 3821 PrivateHelpersTy(const VarDecl *Original, const VarDecl *PrivateCopy, 3822 const VarDecl *PrivateElemInit) 3823 : Original(Original), PrivateCopy(PrivateCopy), 3824 PrivateElemInit(PrivateElemInit) {} 3825 const VarDecl *Original; 3826 const VarDecl *PrivateCopy; 3827 const VarDecl *PrivateElemInit; 3828 }; 3829 typedef std::pair<CharUnits /*Align*/, PrivateHelpersTy> PrivateDataTy; 3830 } // anonymous namespace 3831 3832 static RecordDecl * 3833 createPrivatesRecordDecl(CodeGenModule &CGM, ArrayRef<PrivateDataTy> Privates) { 3834 if (!Privates.empty()) { 3835 auto &C = CGM.getContext(); 3836 // Build struct .kmp_privates_t. { 3837 // /* private vars */ 3838 // }; 3839 auto *RD = C.buildImplicitRecord(".kmp_privates.t"); 3840 RD->startDefinition(); 3841 for (auto &&Pair : Privates) { 3842 auto *VD = Pair.second.Original; 3843 auto Type = VD->getType(); 3844 Type = Type.getNonReferenceType(); 3845 auto *FD = addFieldToRecordDecl(C, RD, Type); 3846 if (VD->hasAttrs()) { 3847 for (specific_attr_iterator<AlignedAttr> I(VD->getAttrs().begin()), 3848 E(VD->getAttrs().end()); 3849 I != E; ++I) 3850 FD->addAttr(*I); 3851 } 3852 } 3853 RD->completeDefinition(); 3854 return RD; 3855 } 3856 return nullptr; 3857 } 3858 3859 static RecordDecl * 3860 createKmpTaskTRecordDecl(CodeGenModule &CGM, OpenMPDirectiveKind Kind, 3861 QualType KmpInt32Ty, 3862 QualType KmpRoutineEntryPointerQTy) { 3863 auto &C = CGM.getContext(); 3864 // Build struct kmp_task_t { 3865 // void * shareds; 3866 // kmp_routine_entry_t routine; 3867 // kmp_int32 part_id; 3868 // kmp_cmplrdata_t data1; 3869 // kmp_cmplrdata_t data2; 3870 // For taskloops additional fields: 3871 // kmp_uint64 lb; 3872 // kmp_uint64 ub; 3873 // kmp_int64 st; 3874 // kmp_int32 liter; 3875 // void * reductions; 3876 // }; 3877 auto *UD = C.buildImplicitRecord("kmp_cmplrdata_t", TTK_Union); 3878 UD->startDefinition(); 3879 addFieldToRecordDecl(C, UD, KmpInt32Ty); 3880 addFieldToRecordDecl(C, UD, KmpRoutineEntryPointerQTy); 3881 UD->completeDefinition(); 3882 QualType KmpCmplrdataTy = C.getRecordType(UD); 3883 auto *RD = C.buildImplicitRecord("kmp_task_t"); 3884 RD->startDefinition(); 3885 addFieldToRecordDecl(C, RD, C.VoidPtrTy); 3886 addFieldToRecordDecl(C, RD, KmpRoutineEntryPointerQTy); 3887 addFieldToRecordDecl(C, RD, KmpInt32Ty); 3888 addFieldToRecordDecl(C, RD, KmpCmplrdataTy); 3889 addFieldToRecordDecl(C, RD, KmpCmplrdataTy); 3890 if (isOpenMPTaskLoopDirective(Kind)) { 3891 QualType KmpUInt64Ty = 3892 CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0); 3893 QualType KmpInt64Ty = 3894 CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1); 3895 addFieldToRecordDecl(C, RD, KmpUInt64Ty); 3896 addFieldToRecordDecl(C, RD, KmpUInt64Ty); 3897 addFieldToRecordDecl(C, RD, KmpInt64Ty); 3898 addFieldToRecordDecl(C, RD, KmpInt32Ty); 3899 addFieldToRecordDecl(C, RD, C.VoidPtrTy); 3900 } 3901 RD->completeDefinition(); 3902 return RD; 3903 } 3904 3905 static RecordDecl * 3906 createKmpTaskTWithPrivatesRecordDecl(CodeGenModule &CGM, QualType KmpTaskTQTy, 3907 ArrayRef<PrivateDataTy> Privates) { 3908 auto &C = CGM.getContext(); 3909 // Build struct kmp_task_t_with_privates { 3910 // kmp_task_t task_data; 3911 // .kmp_privates_t. privates; 3912 // }; 3913 auto *RD = C.buildImplicitRecord("kmp_task_t_with_privates"); 3914 RD->startDefinition(); 3915 addFieldToRecordDecl(C, RD, KmpTaskTQTy); 3916 if (auto *PrivateRD = createPrivatesRecordDecl(CGM, Privates)) { 3917 addFieldToRecordDecl(C, RD, C.getRecordType(PrivateRD)); 3918 } 3919 RD->completeDefinition(); 3920 return RD; 3921 } 3922 3923 /// \brief Emit a proxy function which accepts kmp_task_t as the second 3924 /// argument. 3925 /// \code 3926 /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { 3927 /// TaskFunction(gtid, tt->part_id, &tt->privates, task_privates_map, tt, 3928 /// For taskloops: 3929 /// tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter, 3930 /// tt->reductions, tt->shareds); 3931 /// return 0; 3932 /// } 3933 /// \endcode 3934 static llvm::Value * 3935 emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc, 3936 OpenMPDirectiveKind Kind, QualType KmpInt32Ty, 3937 QualType KmpTaskTWithPrivatesPtrQTy, 3938 QualType KmpTaskTWithPrivatesQTy, QualType KmpTaskTQTy, 3939 QualType SharedsPtrTy, llvm::Value *TaskFunction, 3940 llvm::Value *TaskPrivatesMap) { 3941 auto &C = CGM.getContext(); 3942 FunctionArgList Args; 3943 ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty, 3944 ImplicitParamDecl::Other); 3945 ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 3946 KmpTaskTWithPrivatesPtrQTy.withRestrict(), 3947 ImplicitParamDecl::Other); 3948 Args.push_back(&GtidArg); 3949 Args.push_back(&TaskTypeArg); 3950 auto &TaskEntryFnInfo = 3951 CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args); 3952 auto *TaskEntryTy = CGM.getTypes().GetFunctionType(TaskEntryFnInfo); 3953 auto *TaskEntry = 3954 llvm::Function::Create(TaskEntryTy, llvm::GlobalValue::InternalLinkage, 3955 ".omp_task_entry.", &CGM.getModule()); 3956 CGM.SetInternalFunctionAttributes(/*D=*/nullptr, TaskEntry, TaskEntryFnInfo); 3957 CodeGenFunction CGF(CGM); 3958 CGF.StartFunction(GlobalDecl(), KmpInt32Ty, TaskEntry, TaskEntryFnInfo, Args, 3959 Loc, Loc); 3960 3961 // TaskFunction(gtid, tt->task_data.part_id, &tt->privates, task_privates_map, 3962 // tt, 3963 // For taskloops: 3964 // tt->task_data.lb, tt->task_data.ub, tt->task_data.st, tt->task_data.liter, 3965 // tt->task_data.shareds); 3966 auto *GtidParam = CGF.EmitLoadOfScalar( 3967 CGF.GetAddrOfLocalVar(&GtidArg), /*Volatile=*/false, KmpInt32Ty, Loc); 3968 LValue TDBase = CGF.EmitLoadOfPointerLValue( 3969 CGF.GetAddrOfLocalVar(&TaskTypeArg), 3970 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>()); 3971 auto *KmpTaskTWithPrivatesQTyRD = 3972 cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl()); 3973 LValue Base = 3974 CGF.EmitLValueForField(TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin()); 3975 auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl()); 3976 auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId); 3977 auto PartIdLVal = CGF.EmitLValueForField(Base, *PartIdFI); 3978 auto *PartidParam = PartIdLVal.getPointer(); 3979 3980 auto SharedsFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds); 3981 auto SharedsLVal = CGF.EmitLValueForField(Base, *SharedsFI); 3982 auto *SharedsParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 3983 CGF.EmitLoadOfLValue(SharedsLVal, Loc).getScalarVal(), 3984 CGF.ConvertTypeForMem(SharedsPtrTy)); 3985 3986 auto PrivatesFI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin(), 1); 3987 llvm::Value *PrivatesParam; 3988 if (PrivatesFI != KmpTaskTWithPrivatesQTyRD->field_end()) { 3989 auto PrivatesLVal = CGF.EmitLValueForField(TDBase, *PrivatesFI); 3990 PrivatesParam = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 3991 PrivatesLVal.getPointer(), CGF.VoidPtrTy); 3992 } else 3993 PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy); 3994 3995 llvm::Value *CommonArgs[] = {GtidParam, PartidParam, PrivatesParam, 3996 TaskPrivatesMap, 3997 CGF.Builder 3998 .CreatePointerBitCastOrAddrSpaceCast( 3999 TDBase.getAddress(), CGF.VoidPtrTy) 4000 .getPointer()}; 4001 SmallVector<llvm::Value *, 16> CallArgs(std::begin(CommonArgs), 4002 std::end(CommonArgs)); 4003 if (isOpenMPTaskLoopDirective(Kind)) { 4004 auto LBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound); 4005 auto LBLVal = CGF.EmitLValueForField(Base, *LBFI); 4006 auto *LBParam = CGF.EmitLoadOfLValue(LBLVal, Loc).getScalarVal(); 4007 auto UBFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound); 4008 auto UBLVal = CGF.EmitLValueForField(Base, *UBFI); 4009 auto *UBParam = CGF.EmitLoadOfLValue(UBLVal, Loc).getScalarVal(); 4010 auto StFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTStride); 4011 auto StLVal = CGF.EmitLValueForField(Base, *StFI); 4012 auto *StParam = CGF.EmitLoadOfLValue(StLVal, Loc).getScalarVal(); 4013 auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter); 4014 auto LILVal = CGF.EmitLValueForField(Base, *LIFI); 4015 auto *LIParam = CGF.EmitLoadOfLValue(LILVal, Loc).getScalarVal(); 4016 auto RFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTReductions); 4017 auto RLVal = CGF.EmitLValueForField(Base, *RFI); 4018 auto *RParam = CGF.EmitLoadOfLValue(RLVal, Loc).getScalarVal(); 4019 CallArgs.push_back(LBParam); 4020 CallArgs.push_back(UBParam); 4021 CallArgs.push_back(StParam); 4022 CallArgs.push_back(LIParam); 4023 CallArgs.push_back(RParam); 4024 } 4025 CallArgs.push_back(SharedsParam); 4026 4027 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskFunction, 4028 CallArgs); 4029 CGF.EmitStoreThroughLValue( 4030 RValue::get(CGF.Builder.getInt32(/*C=*/0)), 4031 CGF.MakeAddrLValue(CGF.ReturnValue, KmpInt32Ty)); 4032 CGF.FinishFunction(); 4033 return TaskEntry; 4034 } 4035 4036 static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM, 4037 SourceLocation Loc, 4038 QualType KmpInt32Ty, 4039 QualType KmpTaskTWithPrivatesPtrQTy, 4040 QualType KmpTaskTWithPrivatesQTy) { 4041 auto &C = CGM.getContext(); 4042 FunctionArgList Args; 4043 ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty, 4044 ImplicitParamDecl::Other); 4045 ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 4046 KmpTaskTWithPrivatesPtrQTy.withRestrict(), 4047 ImplicitParamDecl::Other); 4048 Args.push_back(&GtidArg); 4049 Args.push_back(&TaskTypeArg); 4050 auto &DestructorFnInfo = 4051 CGM.getTypes().arrangeBuiltinFunctionDeclaration(KmpInt32Ty, Args); 4052 auto *DestructorFnTy = CGM.getTypes().GetFunctionType(DestructorFnInfo); 4053 auto *DestructorFn = 4054 llvm::Function::Create(DestructorFnTy, llvm::GlobalValue::InternalLinkage, 4055 ".omp_task_destructor.", &CGM.getModule()); 4056 CGM.SetInternalFunctionAttributes(/*D=*/nullptr, DestructorFn, 4057 DestructorFnInfo); 4058 CodeGenFunction CGF(CGM); 4059 CGF.StartFunction(GlobalDecl(), KmpInt32Ty, DestructorFn, DestructorFnInfo, 4060 Args, Loc, Loc); 4061 4062 LValue Base = CGF.EmitLoadOfPointerLValue( 4063 CGF.GetAddrOfLocalVar(&TaskTypeArg), 4064 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>()); 4065 auto *KmpTaskTWithPrivatesQTyRD = 4066 cast<RecordDecl>(KmpTaskTWithPrivatesQTy->getAsTagDecl()); 4067 auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin()); 4068 Base = CGF.EmitLValueForField(Base, *FI); 4069 for (auto *Field : 4070 cast<RecordDecl>(FI->getType()->getAsTagDecl())->fields()) { 4071 if (auto DtorKind = Field->getType().isDestructedType()) { 4072 auto FieldLValue = CGF.EmitLValueForField(Base, Field); 4073 CGF.pushDestroy(DtorKind, FieldLValue.getAddress(), Field->getType()); 4074 } 4075 } 4076 CGF.FinishFunction(); 4077 return DestructorFn; 4078 } 4079 4080 /// \brief Emit a privates mapping function for correct handling of private and 4081 /// firstprivate variables. 4082 /// \code 4083 /// void .omp_task_privates_map.(const .privates. *noalias privs, <ty1> 4084 /// **noalias priv1,..., <tyn> **noalias privn) { 4085 /// *priv1 = &.privates.priv1; 4086 /// ...; 4087 /// *privn = &.privates.privn; 4088 /// } 4089 /// \endcode 4090 static llvm::Value * 4091 emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc, 4092 ArrayRef<const Expr *> PrivateVars, 4093 ArrayRef<const Expr *> FirstprivateVars, 4094 ArrayRef<const Expr *> LastprivateVars, 4095 QualType PrivatesQTy, 4096 ArrayRef<PrivateDataTy> Privates) { 4097 auto &C = CGM.getContext(); 4098 FunctionArgList Args; 4099 ImplicitParamDecl TaskPrivatesArg( 4100 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 4101 C.getPointerType(PrivatesQTy).withConst().withRestrict(), 4102 ImplicitParamDecl::Other); 4103 Args.push_back(&TaskPrivatesArg); 4104 llvm::DenseMap<const VarDecl *, unsigned> PrivateVarsPos; 4105 unsigned Counter = 1; 4106 for (auto *E: PrivateVars) { 4107 Args.push_back(ImplicitParamDecl::Create( 4108 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 4109 C.getPointerType(C.getPointerType(E->getType())) 4110 .withConst() 4111 .withRestrict(), 4112 ImplicitParamDecl::Other)); 4113 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4114 PrivateVarsPos[VD] = Counter; 4115 ++Counter; 4116 } 4117 for (auto *E : FirstprivateVars) { 4118 Args.push_back(ImplicitParamDecl::Create( 4119 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 4120 C.getPointerType(C.getPointerType(E->getType())) 4121 .withConst() 4122 .withRestrict(), 4123 ImplicitParamDecl::Other)); 4124 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4125 PrivateVarsPos[VD] = Counter; 4126 ++Counter; 4127 } 4128 for (auto *E: LastprivateVars) { 4129 Args.push_back(ImplicitParamDecl::Create( 4130 C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 4131 C.getPointerType(C.getPointerType(E->getType())) 4132 .withConst() 4133 .withRestrict(), 4134 ImplicitParamDecl::Other)); 4135 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4136 PrivateVarsPos[VD] = Counter; 4137 ++Counter; 4138 } 4139 auto &TaskPrivatesMapFnInfo = 4140 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 4141 auto *TaskPrivatesMapTy = 4142 CGM.getTypes().GetFunctionType(TaskPrivatesMapFnInfo); 4143 auto *TaskPrivatesMap = llvm::Function::Create( 4144 TaskPrivatesMapTy, llvm::GlobalValue::InternalLinkage, 4145 ".omp_task_privates_map.", &CGM.getModule()); 4146 CGM.SetInternalFunctionAttributes(/*D=*/nullptr, TaskPrivatesMap, 4147 TaskPrivatesMapFnInfo); 4148 TaskPrivatesMap->removeFnAttr(llvm::Attribute::NoInline); 4149 TaskPrivatesMap->removeFnAttr(llvm::Attribute::OptimizeNone); 4150 TaskPrivatesMap->addFnAttr(llvm::Attribute::AlwaysInline); 4151 CodeGenFunction CGF(CGM); 4152 CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskPrivatesMap, 4153 TaskPrivatesMapFnInfo, Args, Loc, Loc); 4154 4155 // *privi = &.privates.privi; 4156 LValue Base = CGF.EmitLoadOfPointerLValue( 4157 CGF.GetAddrOfLocalVar(&TaskPrivatesArg), 4158 TaskPrivatesArg.getType()->castAs<PointerType>()); 4159 auto *PrivatesQTyRD = cast<RecordDecl>(PrivatesQTy->getAsTagDecl()); 4160 Counter = 0; 4161 for (auto *Field : PrivatesQTyRD->fields()) { 4162 auto FieldLVal = CGF.EmitLValueForField(Base, Field); 4163 auto *VD = Args[PrivateVarsPos[Privates[Counter].second.Original]]; 4164 auto RefLVal = CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType()); 4165 auto RefLoadLVal = CGF.EmitLoadOfPointerLValue( 4166 RefLVal.getAddress(), RefLVal.getType()->castAs<PointerType>()); 4167 CGF.EmitStoreOfScalar(FieldLVal.getPointer(), RefLoadLVal); 4168 ++Counter; 4169 } 4170 CGF.FinishFunction(); 4171 return TaskPrivatesMap; 4172 } 4173 4174 static bool stable_sort_comparator(const PrivateDataTy P1, 4175 const PrivateDataTy P2) { 4176 return P1.first > P2.first; 4177 } 4178 4179 /// Emit initialization for private variables in task-based directives. 4180 static void emitPrivatesInit(CodeGenFunction &CGF, 4181 const OMPExecutableDirective &D, 4182 Address KmpTaskSharedsPtr, LValue TDBase, 4183 const RecordDecl *KmpTaskTWithPrivatesQTyRD, 4184 QualType SharedsTy, QualType SharedsPtrTy, 4185 const OMPTaskDataTy &Data, 4186 ArrayRef<PrivateDataTy> Privates, bool ForDup) { 4187 auto &C = CGF.getContext(); 4188 auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin()); 4189 LValue PrivatesBase = CGF.EmitLValueForField(TDBase, *FI); 4190 OpenMPDirectiveKind Kind = isOpenMPTaskLoopDirective(D.getDirectiveKind()) 4191 ? OMPD_taskloop 4192 : OMPD_task; 4193 const CapturedStmt &CS = *D.getCapturedStmt(Kind); 4194 CodeGenFunction::CGCapturedStmtInfo CapturesInfo(CS); 4195 LValue SrcBase; 4196 bool IsTargetTask = 4197 isOpenMPTargetDataManagementDirective(D.getDirectiveKind()) || 4198 isOpenMPTargetExecutionDirective(D.getDirectiveKind()); 4199 // For target-based directives skip 3 firstprivate arrays BasePointersArray, 4200 // PointersArray and SizesArray. The original variables for these arrays are 4201 // not captured and we get their addresses explicitly. 4202 if ((!IsTargetTask && !Data.FirstprivateVars.empty()) || 4203 (IsTargetTask && KmpTaskSharedsPtr.isValid())) { 4204 SrcBase = CGF.MakeAddrLValue( 4205 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4206 KmpTaskSharedsPtr, CGF.ConvertTypeForMem(SharedsPtrTy)), 4207 SharedsTy); 4208 } 4209 FI = cast<RecordDecl>(FI->getType()->getAsTagDecl())->field_begin(); 4210 for (auto &&Pair : Privates) { 4211 auto *VD = Pair.second.PrivateCopy; 4212 auto *Init = VD->getAnyInitializer(); 4213 if (Init && (!ForDup || (isa<CXXConstructExpr>(Init) && 4214 !CGF.isTrivialInitializer(Init)))) { 4215 LValue PrivateLValue = CGF.EmitLValueForField(PrivatesBase, *FI); 4216 if (auto *Elem = Pair.second.PrivateElemInit) { 4217 auto *OriginalVD = Pair.second.Original; 4218 // Check if the variable is the target-based BasePointersArray, 4219 // PointersArray or SizesArray. 4220 LValue SharedRefLValue; 4221 QualType Type = OriginalVD->getType(); 4222 auto *SharedField = CapturesInfo.lookup(OriginalVD); 4223 if (IsTargetTask && !SharedField) { 4224 assert(isa<ImplicitParamDecl>(OriginalVD) && 4225 isa<CapturedDecl>(OriginalVD->getDeclContext()) && 4226 cast<CapturedDecl>(OriginalVD->getDeclContext()) 4227 ->getNumParams() == 0 && 4228 isa<TranslationUnitDecl>( 4229 cast<CapturedDecl>(OriginalVD->getDeclContext()) 4230 ->getDeclContext()) && 4231 "Expected artificial target data variable."); 4232 SharedRefLValue = 4233 CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(OriginalVD), Type); 4234 } else { 4235 SharedRefLValue = CGF.EmitLValueForField(SrcBase, SharedField); 4236 SharedRefLValue = CGF.MakeAddrLValue( 4237 Address(SharedRefLValue.getPointer(), C.getDeclAlign(OriginalVD)), 4238 SharedRefLValue.getType(), LValueBaseInfo(AlignmentSource::Decl), 4239 SharedRefLValue.getTBAAInfo()); 4240 } 4241 if (Type->isArrayType()) { 4242 // Initialize firstprivate array. 4243 if (!isa<CXXConstructExpr>(Init) || CGF.isTrivialInitializer(Init)) { 4244 // Perform simple memcpy. 4245 CGF.EmitAggregateAssign(PrivateLValue.getAddress(), 4246 SharedRefLValue.getAddress(), Type); 4247 } else { 4248 // Initialize firstprivate array using element-by-element 4249 // initialization. 4250 CGF.EmitOMPAggregateAssign( 4251 PrivateLValue.getAddress(), SharedRefLValue.getAddress(), Type, 4252 [&CGF, Elem, Init, &CapturesInfo](Address DestElement, 4253 Address SrcElement) { 4254 // Clean up any temporaries needed by the initialization. 4255 CodeGenFunction::OMPPrivateScope InitScope(CGF); 4256 InitScope.addPrivate( 4257 Elem, [SrcElement]() -> Address { return SrcElement; }); 4258 (void)InitScope.Privatize(); 4259 // Emit initialization for single element. 4260 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII( 4261 CGF, &CapturesInfo); 4262 CGF.EmitAnyExprToMem(Init, DestElement, 4263 Init->getType().getQualifiers(), 4264 /*IsInitializer=*/false); 4265 }); 4266 } 4267 } else { 4268 CodeGenFunction::OMPPrivateScope InitScope(CGF); 4269 InitScope.addPrivate(Elem, [SharedRefLValue]() -> Address { 4270 return SharedRefLValue.getAddress(); 4271 }); 4272 (void)InitScope.Privatize(); 4273 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CapturesInfo); 4274 CGF.EmitExprAsInit(Init, VD, PrivateLValue, 4275 /*capturedByInit=*/false); 4276 } 4277 } else 4278 CGF.EmitExprAsInit(Init, VD, PrivateLValue, /*capturedByInit=*/false); 4279 } 4280 ++FI; 4281 } 4282 } 4283 4284 /// Check if duplication function is required for taskloops. 4285 static bool checkInitIsRequired(CodeGenFunction &CGF, 4286 ArrayRef<PrivateDataTy> Privates) { 4287 bool InitRequired = false; 4288 for (auto &&Pair : Privates) { 4289 auto *VD = Pair.second.PrivateCopy; 4290 auto *Init = VD->getAnyInitializer(); 4291 InitRequired = InitRequired || (Init && isa<CXXConstructExpr>(Init) && 4292 !CGF.isTrivialInitializer(Init)); 4293 } 4294 return InitRequired; 4295 } 4296 4297 4298 /// Emit task_dup function (for initialization of 4299 /// private/firstprivate/lastprivate vars and last_iter flag) 4300 /// \code 4301 /// void __task_dup_entry(kmp_task_t *task_dst, const kmp_task_t *task_src, int 4302 /// lastpriv) { 4303 /// // setup lastprivate flag 4304 /// task_dst->last = lastpriv; 4305 /// // could be constructor calls here... 4306 /// } 4307 /// \endcode 4308 static llvm::Value * 4309 emitTaskDupFunction(CodeGenModule &CGM, SourceLocation Loc, 4310 const OMPExecutableDirective &D, 4311 QualType KmpTaskTWithPrivatesPtrQTy, 4312 const RecordDecl *KmpTaskTWithPrivatesQTyRD, 4313 const RecordDecl *KmpTaskTQTyRD, QualType SharedsTy, 4314 QualType SharedsPtrTy, const OMPTaskDataTy &Data, 4315 ArrayRef<PrivateDataTy> Privates, bool WithLastIter) { 4316 auto &C = CGM.getContext(); 4317 FunctionArgList Args; 4318 ImplicitParamDecl DstArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 4319 KmpTaskTWithPrivatesPtrQTy, 4320 ImplicitParamDecl::Other); 4321 ImplicitParamDecl SrcArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 4322 KmpTaskTWithPrivatesPtrQTy, 4323 ImplicitParamDecl::Other); 4324 ImplicitParamDecl LastprivArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy, 4325 ImplicitParamDecl::Other); 4326 Args.push_back(&DstArg); 4327 Args.push_back(&SrcArg); 4328 Args.push_back(&LastprivArg); 4329 auto &TaskDupFnInfo = 4330 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 4331 auto *TaskDupTy = CGM.getTypes().GetFunctionType(TaskDupFnInfo); 4332 auto *TaskDup = 4333 llvm::Function::Create(TaskDupTy, llvm::GlobalValue::InternalLinkage, 4334 ".omp_task_dup.", &CGM.getModule()); 4335 CGM.SetInternalFunctionAttributes(/*D=*/nullptr, TaskDup, TaskDupFnInfo); 4336 CodeGenFunction CGF(CGM); 4337 CGF.StartFunction(GlobalDecl(), C.VoidTy, TaskDup, TaskDupFnInfo, Args, Loc, 4338 Loc); 4339 4340 LValue TDBase = CGF.EmitLoadOfPointerLValue( 4341 CGF.GetAddrOfLocalVar(&DstArg), 4342 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>()); 4343 // task_dst->liter = lastpriv; 4344 if (WithLastIter) { 4345 auto LIFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTLastIter); 4346 LValue Base = CGF.EmitLValueForField( 4347 TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin()); 4348 LValue LILVal = CGF.EmitLValueForField(Base, *LIFI); 4349 llvm::Value *Lastpriv = CGF.EmitLoadOfScalar( 4350 CGF.GetAddrOfLocalVar(&LastprivArg), /*Volatile=*/false, C.IntTy, Loc); 4351 CGF.EmitStoreOfScalar(Lastpriv, LILVal); 4352 } 4353 4354 // Emit initial values for private copies (if any). 4355 assert(!Privates.empty()); 4356 Address KmpTaskSharedsPtr = Address::invalid(); 4357 if (!Data.FirstprivateVars.empty()) { 4358 LValue TDBase = CGF.EmitLoadOfPointerLValue( 4359 CGF.GetAddrOfLocalVar(&SrcArg), 4360 KmpTaskTWithPrivatesPtrQTy->castAs<PointerType>()); 4361 LValue Base = CGF.EmitLValueForField( 4362 TDBase, *KmpTaskTWithPrivatesQTyRD->field_begin()); 4363 KmpTaskSharedsPtr = Address( 4364 CGF.EmitLoadOfScalar(CGF.EmitLValueForField( 4365 Base, *std::next(KmpTaskTQTyRD->field_begin(), 4366 KmpTaskTShareds)), 4367 Loc), 4368 CGF.getNaturalTypeAlignment(SharedsTy)); 4369 } 4370 emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, TDBase, KmpTaskTWithPrivatesQTyRD, 4371 SharedsTy, SharedsPtrTy, Data, Privates, /*ForDup=*/true); 4372 CGF.FinishFunction(); 4373 return TaskDup; 4374 } 4375 4376 /// Checks if destructor function is required to be generated. 4377 /// \return true if cleanups are required, false otherwise. 4378 static bool 4379 checkDestructorsRequired(const RecordDecl *KmpTaskTWithPrivatesQTyRD) { 4380 bool NeedsCleanup = false; 4381 auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin()); 4382 auto *PrivateRD = cast<RecordDecl>(FI->getType()->getAsTagDecl()); 4383 for (auto *FD : PrivateRD->fields()) { 4384 NeedsCleanup = NeedsCleanup || FD->getType().isDestructedType(); 4385 if (NeedsCleanup) 4386 break; 4387 } 4388 return NeedsCleanup; 4389 } 4390 4391 CGOpenMPRuntime::TaskResultTy 4392 CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc, 4393 const OMPExecutableDirective &D, 4394 llvm::Value *TaskFunction, QualType SharedsTy, 4395 Address Shareds, const OMPTaskDataTy &Data) { 4396 auto &C = CGM.getContext(); 4397 llvm::SmallVector<PrivateDataTy, 4> Privates; 4398 // Aggregate privates and sort them by the alignment. 4399 auto I = Data.PrivateCopies.begin(); 4400 for (auto *E : Data.PrivateVars) { 4401 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4402 Privates.push_back(std::make_pair( 4403 C.getDeclAlign(VD), 4404 PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()), 4405 /*PrivateElemInit=*/nullptr))); 4406 ++I; 4407 } 4408 I = Data.FirstprivateCopies.begin(); 4409 auto IElemInitRef = Data.FirstprivateInits.begin(); 4410 for (auto *E : Data.FirstprivateVars) { 4411 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4412 Privates.push_back(std::make_pair( 4413 C.getDeclAlign(VD), 4414 PrivateHelpersTy( 4415 VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()), 4416 cast<VarDecl>(cast<DeclRefExpr>(*IElemInitRef)->getDecl())))); 4417 ++I; 4418 ++IElemInitRef; 4419 } 4420 I = Data.LastprivateCopies.begin(); 4421 for (auto *E : Data.LastprivateVars) { 4422 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()); 4423 Privates.push_back(std::make_pair( 4424 C.getDeclAlign(VD), 4425 PrivateHelpersTy(VD, cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl()), 4426 /*PrivateElemInit=*/nullptr))); 4427 ++I; 4428 } 4429 std::stable_sort(Privates.begin(), Privates.end(), stable_sort_comparator); 4430 auto KmpInt32Ty = C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1); 4431 // Build type kmp_routine_entry_t (if not built yet). 4432 emitKmpRoutineEntryT(KmpInt32Ty); 4433 // Build type kmp_task_t (if not built yet). 4434 if (isOpenMPTaskLoopDirective(D.getDirectiveKind())) { 4435 if (SavedKmpTaskloopTQTy.isNull()) { 4436 SavedKmpTaskloopTQTy = C.getRecordType(createKmpTaskTRecordDecl( 4437 CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy)); 4438 } 4439 KmpTaskTQTy = SavedKmpTaskloopTQTy; 4440 } else { 4441 assert((D.getDirectiveKind() == OMPD_task || 4442 isOpenMPTargetExecutionDirective(D.getDirectiveKind()) || 4443 isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) && 4444 "Expected taskloop, task or target directive"); 4445 if (SavedKmpTaskTQTy.isNull()) { 4446 SavedKmpTaskTQTy = C.getRecordType(createKmpTaskTRecordDecl( 4447 CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy)); 4448 } 4449 KmpTaskTQTy = SavedKmpTaskTQTy; 4450 } 4451 auto *KmpTaskTQTyRD = cast<RecordDecl>(KmpTaskTQTy->getAsTagDecl()); 4452 // Build particular struct kmp_task_t for the given task. 4453 auto *KmpTaskTWithPrivatesQTyRD = 4454 createKmpTaskTWithPrivatesRecordDecl(CGM, KmpTaskTQTy, Privates); 4455 auto KmpTaskTWithPrivatesQTy = C.getRecordType(KmpTaskTWithPrivatesQTyRD); 4456 QualType KmpTaskTWithPrivatesPtrQTy = 4457 C.getPointerType(KmpTaskTWithPrivatesQTy); 4458 auto *KmpTaskTWithPrivatesTy = CGF.ConvertType(KmpTaskTWithPrivatesQTy); 4459 auto *KmpTaskTWithPrivatesPtrTy = KmpTaskTWithPrivatesTy->getPointerTo(); 4460 auto *KmpTaskTWithPrivatesTySize = CGF.getTypeSize(KmpTaskTWithPrivatesQTy); 4461 QualType SharedsPtrTy = C.getPointerType(SharedsTy); 4462 4463 // Emit initial values for private copies (if any). 4464 llvm::Value *TaskPrivatesMap = nullptr; 4465 auto *TaskPrivatesMapTy = 4466 std::next(cast<llvm::Function>(TaskFunction)->arg_begin(), 3)->getType(); 4467 if (!Privates.empty()) { 4468 auto FI = std::next(KmpTaskTWithPrivatesQTyRD->field_begin()); 4469 TaskPrivatesMap = emitTaskPrivateMappingFunction( 4470 CGM, Loc, Data.PrivateVars, Data.FirstprivateVars, Data.LastprivateVars, 4471 FI->getType(), Privates); 4472 TaskPrivatesMap = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4473 TaskPrivatesMap, TaskPrivatesMapTy); 4474 } else { 4475 TaskPrivatesMap = llvm::ConstantPointerNull::get( 4476 cast<llvm::PointerType>(TaskPrivatesMapTy)); 4477 } 4478 // Build a proxy function kmp_int32 .omp_task_entry.(kmp_int32 gtid, 4479 // kmp_task_t *tt); 4480 auto *TaskEntry = emitProxyTaskFunction( 4481 CGM, Loc, D.getDirectiveKind(), KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy, 4482 KmpTaskTWithPrivatesQTy, KmpTaskTQTy, SharedsPtrTy, TaskFunction, 4483 TaskPrivatesMap); 4484 4485 // Build call kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid, 4486 // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, 4487 // kmp_routine_entry_t *task_entry); 4488 // Task flags. Format is taken from 4489 // http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h, 4490 // description of kmp_tasking_flags struct. 4491 enum { 4492 TiedFlag = 0x1, 4493 FinalFlag = 0x2, 4494 DestructorsFlag = 0x8, 4495 PriorityFlag = 0x20 4496 }; 4497 unsigned Flags = Data.Tied ? TiedFlag : 0; 4498 bool NeedsCleanup = false; 4499 if (!Privates.empty()) { 4500 NeedsCleanup = checkDestructorsRequired(KmpTaskTWithPrivatesQTyRD); 4501 if (NeedsCleanup) 4502 Flags = Flags | DestructorsFlag; 4503 } 4504 if (Data.Priority.getInt()) 4505 Flags = Flags | PriorityFlag; 4506 auto *TaskFlags = 4507 Data.Final.getPointer() 4508 ? CGF.Builder.CreateSelect(Data.Final.getPointer(), 4509 CGF.Builder.getInt32(FinalFlag), 4510 CGF.Builder.getInt32(/*C=*/0)) 4511 : CGF.Builder.getInt32(Data.Final.getInt() ? FinalFlag : 0); 4512 TaskFlags = CGF.Builder.CreateOr(TaskFlags, CGF.Builder.getInt32(Flags)); 4513 auto *SharedsSize = CGM.getSize(C.getTypeSizeInChars(SharedsTy)); 4514 llvm::Value *AllocArgs[] = {emitUpdateLocation(CGF, Loc), 4515 getThreadID(CGF, Loc), TaskFlags, 4516 KmpTaskTWithPrivatesTySize, SharedsSize, 4517 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4518 TaskEntry, KmpRoutineEntryPtrTy)}; 4519 auto *NewTask = CGF.EmitRuntimeCall( 4520 createRuntimeFunction(OMPRTL__kmpc_omp_task_alloc), AllocArgs); 4521 auto *NewTaskNewTaskTTy = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4522 NewTask, KmpTaskTWithPrivatesPtrTy); 4523 LValue Base = CGF.MakeNaturalAlignAddrLValue(NewTaskNewTaskTTy, 4524 KmpTaskTWithPrivatesQTy); 4525 LValue TDBase = 4526 CGF.EmitLValueForField(Base, *KmpTaskTWithPrivatesQTyRD->field_begin()); 4527 // Fill the data in the resulting kmp_task_t record. 4528 // Copy shareds if there are any. 4529 Address KmpTaskSharedsPtr = Address::invalid(); 4530 if (!SharedsTy->getAsStructureType()->getDecl()->field_empty()) { 4531 KmpTaskSharedsPtr = 4532 Address(CGF.EmitLoadOfScalar( 4533 CGF.EmitLValueForField( 4534 TDBase, *std::next(KmpTaskTQTyRD->field_begin(), 4535 KmpTaskTShareds)), 4536 Loc), 4537 CGF.getNaturalTypeAlignment(SharedsTy)); 4538 CGF.EmitAggregateCopy(KmpTaskSharedsPtr, Shareds, SharedsTy); 4539 } 4540 // Emit initial values for private copies (if any). 4541 TaskResultTy Result; 4542 if (!Privates.empty()) { 4543 emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, Base, KmpTaskTWithPrivatesQTyRD, 4544 SharedsTy, SharedsPtrTy, Data, Privates, 4545 /*ForDup=*/false); 4546 if (isOpenMPTaskLoopDirective(D.getDirectiveKind()) && 4547 (!Data.LastprivateVars.empty() || checkInitIsRequired(CGF, Privates))) { 4548 Result.TaskDupFn = emitTaskDupFunction( 4549 CGM, Loc, D, KmpTaskTWithPrivatesPtrQTy, KmpTaskTWithPrivatesQTyRD, 4550 KmpTaskTQTyRD, SharedsTy, SharedsPtrTy, Data, Privates, 4551 /*WithLastIter=*/!Data.LastprivateVars.empty()); 4552 } 4553 } 4554 // Fields of union "kmp_cmplrdata_t" for destructors and priority. 4555 enum { Priority = 0, Destructors = 1 }; 4556 // Provide pointer to function with destructors for privates. 4557 auto FI = std::next(KmpTaskTQTyRD->field_begin(), Data1); 4558 auto *KmpCmplrdataUD = (*FI)->getType()->getAsUnionType()->getDecl(); 4559 if (NeedsCleanup) { 4560 llvm::Value *DestructorFn = emitDestructorsFunction( 4561 CGM, Loc, KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy, 4562 KmpTaskTWithPrivatesQTy); 4563 LValue Data1LV = CGF.EmitLValueForField(TDBase, *FI); 4564 LValue DestructorsLV = CGF.EmitLValueForField( 4565 Data1LV, *std::next(KmpCmplrdataUD->field_begin(), Destructors)); 4566 CGF.EmitStoreOfScalar(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4567 DestructorFn, KmpRoutineEntryPtrTy), 4568 DestructorsLV); 4569 } 4570 // Set priority. 4571 if (Data.Priority.getInt()) { 4572 LValue Data2LV = CGF.EmitLValueForField( 4573 TDBase, *std::next(KmpTaskTQTyRD->field_begin(), Data2)); 4574 LValue PriorityLV = CGF.EmitLValueForField( 4575 Data2LV, *std::next(KmpCmplrdataUD->field_begin(), Priority)); 4576 CGF.EmitStoreOfScalar(Data.Priority.getPointer(), PriorityLV); 4577 } 4578 Result.NewTask = NewTask; 4579 Result.TaskEntry = TaskEntry; 4580 Result.NewTaskNewTaskTTy = NewTaskNewTaskTTy; 4581 Result.TDBase = TDBase; 4582 Result.KmpTaskTQTyRD = KmpTaskTQTyRD; 4583 return Result; 4584 } 4585 4586 void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, 4587 const OMPExecutableDirective &D, 4588 llvm::Value *TaskFunction, 4589 QualType SharedsTy, Address Shareds, 4590 const Expr *IfCond, 4591 const OMPTaskDataTy &Data) { 4592 if (!CGF.HaveInsertPoint()) 4593 return; 4594 4595 TaskResultTy Result = 4596 emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data); 4597 llvm::Value *NewTask = Result.NewTask; 4598 llvm::Value *TaskEntry = Result.TaskEntry; 4599 llvm::Value *NewTaskNewTaskTTy = Result.NewTaskNewTaskTTy; 4600 LValue TDBase = Result.TDBase; 4601 RecordDecl *KmpTaskTQTyRD = Result.KmpTaskTQTyRD; 4602 auto &C = CGM.getContext(); 4603 // Process list of dependences. 4604 Address DependenciesArray = Address::invalid(); 4605 unsigned NumDependencies = Data.Dependences.size(); 4606 if (NumDependencies) { 4607 // Dependence kind for RTL. 4608 enum RTLDependenceKindTy { DepIn = 0x01, DepInOut = 0x3 }; 4609 enum RTLDependInfoFieldsTy { BaseAddr, Len, Flags }; 4610 RecordDecl *KmpDependInfoRD; 4611 QualType FlagsTy = 4612 C.getIntTypeForBitwidth(C.getTypeSize(C.BoolTy), /*Signed=*/false); 4613 llvm::Type *LLVMFlagsTy = CGF.ConvertTypeForMem(FlagsTy); 4614 if (KmpDependInfoTy.isNull()) { 4615 KmpDependInfoRD = C.buildImplicitRecord("kmp_depend_info"); 4616 KmpDependInfoRD->startDefinition(); 4617 addFieldToRecordDecl(C, KmpDependInfoRD, C.getIntPtrType()); 4618 addFieldToRecordDecl(C, KmpDependInfoRD, C.getSizeType()); 4619 addFieldToRecordDecl(C, KmpDependInfoRD, FlagsTy); 4620 KmpDependInfoRD->completeDefinition(); 4621 KmpDependInfoTy = C.getRecordType(KmpDependInfoRD); 4622 } else 4623 KmpDependInfoRD = cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl()); 4624 CharUnits DependencySize = C.getTypeSizeInChars(KmpDependInfoTy); 4625 // Define type kmp_depend_info[<Dependences.size()>]; 4626 QualType KmpDependInfoArrayTy = C.getConstantArrayType( 4627 KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies), 4628 ArrayType::Normal, /*IndexTypeQuals=*/0); 4629 // kmp_depend_info[<Dependences.size()>] deps; 4630 DependenciesArray = 4631 CGF.CreateMemTemp(KmpDependInfoArrayTy, ".dep.arr.addr"); 4632 for (unsigned i = 0; i < NumDependencies; ++i) { 4633 const Expr *E = Data.Dependences[i].second; 4634 auto Addr = CGF.EmitLValue(E); 4635 llvm::Value *Size; 4636 QualType Ty = E->getType(); 4637 if (auto *ASE = dyn_cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts())) { 4638 LValue UpAddrLVal = 4639 CGF.EmitOMPArraySectionExpr(ASE, /*LowerBound=*/false); 4640 llvm::Value *UpAddr = 4641 CGF.Builder.CreateConstGEP1_32(UpAddrLVal.getPointer(), /*Idx0=*/1); 4642 llvm::Value *LowIntPtr = 4643 CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGM.SizeTy); 4644 llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGM.SizeTy); 4645 Size = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr); 4646 } else 4647 Size = CGF.getTypeSize(Ty); 4648 auto Base = CGF.MakeAddrLValue( 4649 CGF.Builder.CreateConstArrayGEP(DependenciesArray, i, DependencySize), 4650 KmpDependInfoTy); 4651 // deps[i].base_addr = &<Dependences[i].second>; 4652 auto BaseAddrLVal = CGF.EmitLValueForField( 4653 Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr)); 4654 CGF.EmitStoreOfScalar( 4655 CGF.Builder.CreatePtrToInt(Addr.getPointer(), CGF.IntPtrTy), 4656 BaseAddrLVal); 4657 // deps[i].len = sizeof(<Dependences[i].second>); 4658 auto LenLVal = CGF.EmitLValueForField( 4659 Base, *std::next(KmpDependInfoRD->field_begin(), Len)); 4660 CGF.EmitStoreOfScalar(Size, LenLVal); 4661 // deps[i].flags = <Dependences[i].first>; 4662 RTLDependenceKindTy DepKind; 4663 switch (Data.Dependences[i].first) { 4664 case OMPC_DEPEND_in: 4665 DepKind = DepIn; 4666 break; 4667 // Out and InOut dependencies must use the same code. 4668 case OMPC_DEPEND_out: 4669 case OMPC_DEPEND_inout: 4670 DepKind = DepInOut; 4671 break; 4672 case OMPC_DEPEND_source: 4673 case OMPC_DEPEND_sink: 4674 case OMPC_DEPEND_unknown: 4675 llvm_unreachable("Unknown task dependence type"); 4676 } 4677 auto FlagsLVal = CGF.EmitLValueForField( 4678 Base, *std::next(KmpDependInfoRD->field_begin(), Flags)); 4679 CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind), 4680 FlagsLVal); 4681 } 4682 DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4683 CGF.Builder.CreateStructGEP(DependenciesArray, 0, CharUnits::Zero()), 4684 CGF.VoidPtrTy); 4685 } 4686 4687 // NOTE: routine and part_id fields are intialized by __kmpc_omp_task_alloc() 4688 // libcall. 4689 // Build kmp_int32 __kmpc_omp_task_with_deps(ident_t *, kmp_int32 gtid, 4690 // kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, 4691 // kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list) if dependence 4692 // list is not empty 4693 auto *ThreadID = getThreadID(CGF, Loc); 4694 auto *UpLoc = emitUpdateLocation(CGF, Loc); 4695 llvm::Value *TaskArgs[] = { UpLoc, ThreadID, NewTask }; 4696 llvm::Value *DepTaskArgs[7]; 4697 if (NumDependencies) { 4698 DepTaskArgs[0] = UpLoc; 4699 DepTaskArgs[1] = ThreadID; 4700 DepTaskArgs[2] = NewTask; 4701 DepTaskArgs[3] = CGF.Builder.getInt32(NumDependencies); 4702 DepTaskArgs[4] = DependenciesArray.getPointer(); 4703 DepTaskArgs[5] = CGF.Builder.getInt32(0); 4704 DepTaskArgs[6] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy); 4705 } 4706 auto &&ThenCodeGen = [this, &Data, TDBase, KmpTaskTQTyRD, NumDependencies, 4707 &TaskArgs, 4708 &DepTaskArgs](CodeGenFunction &CGF, PrePostActionTy &) { 4709 if (!Data.Tied) { 4710 auto PartIdFI = std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTPartId); 4711 auto PartIdLVal = CGF.EmitLValueForField(TDBase, *PartIdFI); 4712 CGF.EmitStoreOfScalar(CGF.Builder.getInt32(0), PartIdLVal); 4713 } 4714 if (NumDependencies) { 4715 CGF.EmitRuntimeCall( 4716 createRuntimeFunction(OMPRTL__kmpc_omp_task_with_deps), DepTaskArgs); 4717 } else { 4718 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_task), 4719 TaskArgs); 4720 } 4721 // Check if parent region is untied and build return for untied task; 4722 if (auto *Region = 4723 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) 4724 Region->emitUntiedSwitch(CGF); 4725 }; 4726 4727 llvm::Value *DepWaitTaskArgs[6]; 4728 if (NumDependencies) { 4729 DepWaitTaskArgs[0] = UpLoc; 4730 DepWaitTaskArgs[1] = ThreadID; 4731 DepWaitTaskArgs[2] = CGF.Builder.getInt32(NumDependencies); 4732 DepWaitTaskArgs[3] = DependenciesArray.getPointer(); 4733 DepWaitTaskArgs[4] = CGF.Builder.getInt32(0); 4734 DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy); 4735 } 4736 auto &&ElseCodeGen = [&TaskArgs, ThreadID, NewTaskNewTaskTTy, TaskEntry, 4737 NumDependencies, &DepWaitTaskArgs, 4738 Loc](CodeGenFunction &CGF, PrePostActionTy &) { 4739 auto &RT = CGF.CGM.getOpenMPRuntime(); 4740 CodeGenFunction::RunCleanupsScope LocalScope(CGF); 4741 // Build void __kmpc_omp_wait_deps(ident_t *, kmp_int32 gtid, 4742 // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 4743 // ndeps_noalias, kmp_depend_info_t *noalias_dep_list); if dependence info 4744 // is specified. 4745 if (NumDependencies) 4746 CGF.EmitRuntimeCall(RT.createRuntimeFunction(OMPRTL__kmpc_omp_wait_deps), 4747 DepWaitTaskArgs); 4748 // Call proxy_task_entry(gtid, new_task); 4749 auto &&CodeGen = [TaskEntry, ThreadID, NewTaskNewTaskTTy, 4750 Loc](CodeGenFunction &CGF, PrePostActionTy &Action) { 4751 Action.Enter(CGF); 4752 llvm::Value *OutlinedFnArgs[] = {ThreadID, NewTaskNewTaskTTy}; 4753 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, Loc, TaskEntry, 4754 OutlinedFnArgs); 4755 }; 4756 4757 // Build void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid, 4758 // kmp_task_t *new_task); 4759 // Build void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid, 4760 // kmp_task_t *new_task); 4761 RegionCodeGenTy RCG(CodeGen); 4762 CommonActionTy Action( 4763 RT.createRuntimeFunction(OMPRTL__kmpc_omp_task_begin_if0), TaskArgs, 4764 RT.createRuntimeFunction(OMPRTL__kmpc_omp_task_complete_if0), TaskArgs); 4765 RCG.setAction(Action); 4766 RCG(CGF); 4767 }; 4768 4769 if (IfCond) 4770 emitOMPIfClause(CGF, IfCond, ThenCodeGen, ElseCodeGen); 4771 else { 4772 RegionCodeGenTy ThenRCG(ThenCodeGen); 4773 ThenRCG(CGF); 4774 } 4775 } 4776 4777 void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, 4778 const OMPLoopDirective &D, 4779 llvm::Value *TaskFunction, 4780 QualType SharedsTy, Address Shareds, 4781 const Expr *IfCond, 4782 const OMPTaskDataTy &Data) { 4783 if (!CGF.HaveInsertPoint()) 4784 return; 4785 TaskResultTy Result = 4786 emitTaskInit(CGF, Loc, D, TaskFunction, SharedsTy, Shareds, Data); 4787 // NOTE: routine and part_id fields are intialized by __kmpc_omp_task_alloc() 4788 // libcall. 4789 // Call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int 4790 // if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int 4791 // sched, kmp_uint64 grainsize, void *task_dup); 4792 llvm::Value *ThreadID = getThreadID(CGF, Loc); 4793 llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc); 4794 llvm::Value *IfVal; 4795 if (IfCond) { 4796 IfVal = CGF.Builder.CreateIntCast(CGF.EvaluateExprAsBool(IfCond), CGF.IntTy, 4797 /*isSigned=*/true); 4798 } else 4799 IfVal = llvm::ConstantInt::getSigned(CGF.IntTy, /*V=*/1); 4800 4801 LValue LBLVal = CGF.EmitLValueForField( 4802 Result.TDBase, 4803 *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound)); 4804 auto *LBVar = 4805 cast<VarDecl>(cast<DeclRefExpr>(D.getLowerBoundVariable())->getDecl()); 4806 CGF.EmitAnyExprToMem(LBVar->getInit(), LBLVal.getAddress(), LBLVal.getQuals(), 4807 /*IsInitializer=*/true); 4808 LValue UBLVal = CGF.EmitLValueForField( 4809 Result.TDBase, 4810 *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound)); 4811 auto *UBVar = 4812 cast<VarDecl>(cast<DeclRefExpr>(D.getUpperBoundVariable())->getDecl()); 4813 CGF.EmitAnyExprToMem(UBVar->getInit(), UBLVal.getAddress(), UBLVal.getQuals(), 4814 /*IsInitializer=*/true); 4815 LValue StLVal = CGF.EmitLValueForField( 4816 Result.TDBase, 4817 *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTStride)); 4818 auto *StVar = 4819 cast<VarDecl>(cast<DeclRefExpr>(D.getStrideVariable())->getDecl()); 4820 CGF.EmitAnyExprToMem(StVar->getInit(), StLVal.getAddress(), StLVal.getQuals(), 4821 /*IsInitializer=*/true); 4822 // Store reductions address. 4823 LValue RedLVal = CGF.EmitLValueForField( 4824 Result.TDBase, 4825 *std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTReductions)); 4826 if (Data.Reductions) 4827 CGF.EmitStoreOfScalar(Data.Reductions, RedLVal); 4828 else { 4829 CGF.EmitNullInitialization(RedLVal.getAddress(), 4830 CGF.getContext().VoidPtrTy); 4831 } 4832 enum { NoSchedule = 0, Grainsize = 1, NumTasks = 2 }; 4833 llvm::Value *TaskArgs[] = { 4834 UpLoc, 4835 ThreadID, 4836 Result.NewTask, 4837 IfVal, 4838 LBLVal.getPointer(), 4839 UBLVal.getPointer(), 4840 CGF.EmitLoadOfScalar(StLVal, SourceLocation()), 4841 llvm::ConstantInt::getNullValue( 4842 CGF.IntTy), // Always 0 because taskgroup emitted by the compiler 4843 llvm::ConstantInt::getSigned( 4844 CGF.IntTy, Data.Schedule.getPointer() 4845 ? Data.Schedule.getInt() ? NumTasks : Grainsize 4846 : NoSchedule), 4847 Data.Schedule.getPointer() 4848 ? CGF.Builder.CreateIntCast(Data.Schedule.getPointer(), CGF.Int64Ty, 4849 /*isSigned=*/false) 4850 : llvm::ConstantInt::get(CGF.Int64Ty, /*V=*/0), 4851 Result.TaskDupFn ? CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4852 Result.TaskDupFn, CGF.VoidPtrTy) 4853 : llvm::ConstantPointerNull::get(CGF.VoidPtrTy)}; 4854 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_taskloop), TaskArgs); 4855 } 4856 4857 /// \brief Emit reduction operation for each element of array (required for 4858 /// array sections) LHS op = RHS. 4859 /// \param Type Type of array. 4860 /// \param LHSVar Variable on the left side of the reduction operation 4861 /// (references element of array in original variable). 4862 /// \param RHSVar Variable on the right side of the reduction operation 4863 /// (references element of array in original variable). 4864 /// \param RedOpGen Generator of reduction operation with use of LHSVar and 4865 /// RHSVar. 4866 static void EmitOMPAggregateReduction( 4867 CodeGenFunction &CGF, QualType Type, const VarDecl *LHSVar, 4868 const VarDecl *RHSVar, 4869 const llvm::function_ref<void(CodeGenFunction &CGF, const Expr *, 4870 const Expr *, const Expr *)> &RedOpGen, 4871 const Expr *XExpr = nullptr, const Expr *EExpr = nullptr, 4872 const Expr *UpExpr = nullptr) { 4873 // Perform element-by-element initialization. 4874 QualType ElementTy; 4875 Address LHSAddr = CGF.GetAddrOfLocalVar(LHSVar); 4876 Address RHSAddr = CGF.GetAddrOfLocalVar(RHSVar); 4877 4878 // Drill down to the base element type on both arrays. 4879 auto ArrayTy = Type->getAsArrayTypeUnsafe(); 4880 auto NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, LHSAddr); 4881 4882 auto RHSBegin = RHSAddr.getPointer(); 4883 auto LHSBegin = LHSAddr.getPointer(); 4884 // Cast from pointer to array type to pointer to single element. 4885 auto LHSEnd = CGF.Builder.CreateGEP(LHSBegin, NumElements); 4886 // The basic structure here is a while-do loop. 4887 auto BodyBB = CGF.createBasicBlock("omp.arraycpy.body"); 4888 auto DoneBB = CGF.createBasicBlock("omp.arraycpy.done"); 4889 auto IsEmpty = 4890 CGF.Builder.CreateICmpEQ(LHSBegin, LHSEnd, "omp.arraycpy.isempty"); 4891 CGF.Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB); 4892 4893 // Enter the loop body, making that address the current address. 4894 auto EntryBB = CGF.Builder.GetInsertBlock(); 4895 CGF.EmitBlock(BodyBB); 4896 4897 CharUnits ElementSize = CGF.getContext().getTypeSizeInChars(ElementTy); 4898 4899 llvm::PHINode *RHSElementPHI = CGF.Builder.CreatePHI( 4900 RHSBegin->getType(), 2, "omp.arraycpy.srcElementPast"); 4901 RHSElementPHI->addIncoming(RHSBegin, EntryBB); 4902 Address RHSElementCurrent = 4903 Address(RHSElementPHI, 4904 RHSAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 4905 4906 llvm::PHINode *LHSElementPHI = CGF.Builder.CreatePHI( 4907 LHSBegin->getType(), 2, "omp.arraycpy.destElementPast"); 4908 LHSElementPHI->addIncoming(LHSBegin, EntryBB); 4909 Address LHSElementCurrent = 4910 Address(LHSElementPHI, 4911 LHSAddr.getAlignment().alignmentOfArrayElement(ElementSize)); 4912 4913 // Emit copy. 4914 CodeGenFunction::OMPPrivateScope Scope(CGF); 4915 Scope.addPrivate(LHSVar, [=]() -> Address { return LHSElementCurrent; }); 4916 Scope.addPrivate(RHSVar, [=]() -> Address { return RHSElementCurrent; }); 4917 Scope.Privatize(); 4918 RedOpGen(CGF, XExpr, EExpr, UpExpr); 4919 Scope.ForceCleanup(); 4920 4921 // Shift the address forward by one element. 4922 auto LHSElementNext = CGF.Builder.CreateConstGEP1_32( 4923 LHSElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element"); 4924 auto RHSElementNext = CGF.Builder.CreateConstGEP1_32( 4925 RHSElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element"); 4926 // Check whether we've reached the end. 4927 auto Done = 4928 CGF.Builder.CreateICmpEQ(LHSElementNext, LHSEnd, "omp.arraycpy.done"); 4929 CGF.Builder.CreateCondBr(Done, DoneBB, BodyBB); 4930 LHSElementPHI->addIncoming(LHSElementNext, CGF.Builder.GetInsertBlock()); 4931 RHSElementPHI->addIncoming(RHSElementNext, CGF.Builder.GetInsertBlock()); 4932 4933 // Done. 4934 CGF.EmitBlock(DoneBB, /*IsFinished=*/true); 4935 } 4936 4937 /// Emit reduction combiner. If the combiner is a simple expression emit it as 4938 /// is, otherwise consider it as combiner of UDR decl and emit it as a call of 4939 /// UDR combiner function. 4940 static void emitReductionCombiner(CodeGenFunction &CGF, 4941 const Expr *ReductionOp) { 4942 if (auto *CE = dyn_cast<CallExpr>(ReductionOp)) 4943 if (auto *OVE = dyn_cast<OpaqueValueExpr>(CE->getCallee())) 4944 if (auto *DRE = 4945 dyn_cast<DeclRefExpr>(OVE->getSourceExpr()->IgnoreImpCasts())) 4946 if (auto *DRD = dyn_cast<OMPDeclareReductionDecl>(DRE->getDecl())) { 4947 std::pair<llvm::Function *, llvm::Function *> Reduction = 4948 CGF.CGM.getOpenMPRuntime().getUserDefinedReduction(DRD); 4949 RValue Func = RValue::get(Reduction.first); 4950 CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func); 4951 CGF.EmitIgnoredExpr(ReductionOp); 4952 return; 4953 } 4954 CGF.EmitIgnoredExpr(ReductionOp); 4955 } 4956 4957 llvm::Value *CGOpenMPRuntime::emitReductionFunction( 4958 CodeGenModule &CGM, SourceLocation Loc, llvm::Type *ArgsType, 4959 ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, 4960 ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps) { 4961 auto &C = CGM.getContext(); 4962 4963 // void reduction_func(void *LHSArg, void *RHSArg); 4964 FunctionArgList Args; 4965 ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy, 4966 ImplicitParamDecl::Other); 4967 ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy, 4968 ImplicitParamDecl::Other); 4969 Args.push_back(&LHSArg); 4970 Args.push_back(&RHSArg); 4971 auto &CGFI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 4972 auto *Fn = llvm::Function::Create( 4973 CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage, 4974 ".omp.reduction.reduction_func", &CGM.getModule()); 4975 CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, CGFI); 4976 CodeGenFunction CGF(CGM); 4977 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args, Loc, Loc); 4978 4979 // Dst = (void*[n])(LHSArg); 4980 // Src = (void*[n])(RHSArg); 4981 Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4982 CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)), 4983 ArgsType), CGF.getPointerAlign()); 4984 Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 4985 CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)), 4986 ArgsType), CGF.getPointerAlign()); 4987 4988 // ... 4989 // *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]); 4990 // ... 4991 CodeGenFunction::OMPPrivateScope Scope(CGF); 4992 auto IPriv = Privates.begin(); 4993 unsigned Idx = 0; 4994 for (unsigned I = 0, E = ReductionOps.size(); I < E; ++I, ++IPriv, ++Idx) { 4995 auto RHSVar = cast<VarDecl>(cast<DeclRefExpr>(RHSExprs[I])->getDecl()); 4996 Scope.addPrivate(RHSVar, [&]() -> Address { 4997 return emitAddrOfVarFromArray(CGF, RHS, Idx, RHSVar); 4998 }); 4999 auto LHSVar = cast<VarDecl>(cast<DeclRefExpr>(LHSExprs[I])->getDecl()); 5000 Scope.addPrivate(LHSVar, [&]() -> Address { 5001 return emitAddrOfVarFromArray(CGF, LHS, Idx, LHSVar); 5002 }); 5003 QualType PrivTy = (*IPriv)->getType(); 5004 if (PrivTy->isVariablyModifiedType()) { 5005 // Get array size and emit VLA type. 5006 ++Idx; 5007 Address Elem = 5008 CGF.Builder.CreateConstArrayGEP(LHS, Idx, CGF.getPointerSize()); 5009 llvm::Value *Ptr = CGF.Builder.CreateLoad(Elem); 5010 auto *VLA = CGF.getContext().getAsVariableArrayType(PrivTy); 5011 auto *OVE = cast<OpaqueValueExpr>(VLA->getSizeExpr()); 5012 CodeGenFunction::OpaqueValueMapping OpaqueMap( 5013 CGF, OVE, RValue::get(CGF.Builder.CreatePtrToInt(Ptr, CGF.SizeTy))); 5014 CGF.EmitVariablyModifiedType(PrivTy); 5015 } 5016 } 5017 Scope.Privatize(); 5018 IPriv = Privates.begin(); 5019 auto ILHS = LHSExprs.begin(); 5020 auto IRHS = RHSExprs.begin(); 5021 for (auto *E : ReductionOps) { 5022 if ((*IPriv)->getType()->isArrayType()) { 5023 // Emit reduction for array section. 5024 auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 5025 auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 5026 EmitOMPAggregateReduction( 5027 CGF, (*IPriv)->getType(), LHSVar, RHSVar, 5028 [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) { 5029 emitReductionCombiner(CGF, E); 5030 }); 5031 } else 5032 // Emit reduction for array subscript or single variable. 5033 emitReductionCombiner(CGF, E); 5034 ++IPriv; 5035 ++ILHS; 5036 ++IRHS; 5037 } 5038 Scope.ForceCleanup(); 5039 CGF.FinishFunction(); 5040 return Fn; 5041 } 5042 5043 void CGOpenMPRuntime::emitSingleReductionCombiner(CodeGenFunction &CGF, 5044 const Expr *ReductionOp, 5045 const Expr *PrivateRef, 5046 const DeclRefExpr *LHS, 5047 const DeclRefExpr *RHS) { 5048 if (PrivateRef->getType()->isArrayType()) { 5049 // Emit reduction for array section. 5050 auto *LHSVar = cast<VarDecl>(LHS->getDecl()); 5051 auto *RHSVar = cast<VarDecl>(RHS->getDecl()); 5052 EmitOMPAggregateReduction( 5053 CGF, PrivateRef->getType(), LHSVar, RHSVar, 5054 [=](CodeGenFunction &CGF, const Expr *, const Expr *, const Expr *) { 5055 emitReductionCombiner(CGF, ReductionOp); 5056 }); 5057 } else 5058 // Emit reduction for array subscript or single variable. 5059 emitReductionCombiner(CGF, ReductionOp); 5060 } 5061 5062 void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc, 5063 ArrayRef<const Expr *> Privates, 5064 ArrayRef<const Expr *> LHSExprs, 5065 ArrayRef<const Expr *> RHSExprs, 5066 ArrayRef<const Expr *> ReductionOps, 5067 ReductionOptionsTy Options) { 5068 if (!CGF.HaveInsertPoint()) 5069 return; 5070 5071 bool WithNowait = Options.WithNowait; 5072 bool SimpleReduction = Options.SimpleReduction; 5073 5074 // Next code should be emitted for reduction: 5075 // 5076 // static kmp_critical_name lock = { 0 }; 5077 // 5078 // void reduce_func(void *lhs[<n>], void *rhs[<n>]) { 5079 // *(Type0*)lhs[0] = ReductionOperation0(*(Type0*)lhs[0], *(Type0*)rhs[0]); 5080 // ... 5081 // *(Type<n>-1*)lhs[<n>-1] = ReductionOperation<n>-1(*(Type<n>-1*)lhs[<n>-1], 5082 // *(Type<n>-1*)rhs[<n>-1]); 5083 // } 5084 // 5085 // ... 5086 // void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]}; 5087 // switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), 5088 // RedList, reduce_func, &<lock>)) { 5089 // case 1: 5090 // ... 5091 // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); 5092 // ... 5093 // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); 5094 // break; 5095 // case 2: 5096 // ... 5097 // Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i])); 5098 // ... 5099 // [__kmpc_end_reduce(<loc>, <gtid>, &<lock>);] 5100 // break; 5101 // default:; 5102 // } 5103 // 5104 // if SimpleReduction is true, only the next code is generated: 5105 // ... 5106 // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); 5107 // ... 5108 5109 auto &C = CGM.getContext(); 5110 5111 if (SimpleReduction) { 5112 CodeGenFunction::RunCleanupsScope Scope(CGF); 5113 auto IPriv = Privates.begin(); 5114 auto ILHS = LHSExprs.begin(); 5115 auto IRHS = RHSExprs.begin(); 5116 for (auto *E : ReductionOps) { 5117 emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS), 5118 cast<DeclRefExpr>(*IRHS)); 5119 ++IPriv; 5120 ++ILHS; 5121 ++IRHS; 5122 } 5123 return; 5124 } 5125 5126 // 1. Build a list of reduction variables. 5127 // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]}; 5128 auto Size = RHSExprs.size(); 5129 for (auto *E : Privates) { 5130 if (E->getType()->isVariablyModifiedType()) 5131 // Reserve place for array size. 5132 ++Size; 5133 } 5134 llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size); 5135 QualType ReductionArrayTy = 5136 C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal, 5137 /*IndexTypeQuals=*/0); 5138 Address ReductionList = 5139 CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list"); 5140 auto IPriv = Privates.begin(); 5141 unsigned Idx = 0; 5142 for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) { 5143 Address Elem = 5144 CGF.Builder.CreateConstArrayGEP(ReductionList, Idx, CGF.getPointerSize()); 5145 CGF.Builder.CreateStore( 5146 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 5147 CGF.EmitLValue(RHSExprs[I]).getPointer(), CGF.VoidPtrTy), 5148 Elem); 5149 if ((*IPriv)->getType()->isVariablyModifiedType()) { 5150 // Store array size. 5151 ++Idx; 5152 Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx, 5153 CGF.getPointerSize()); 5154 llvm::Value *Size = CGF.Builder.CreateIntCast( 5155 CGF.getVLASize( 5156 CGF.getContext().getAsVariableArrayType((*IPriv)->getType())) 5157 .first, 5158 CGF.SizeTy, /*isSigned=*/false); 5159 CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy), 5160 Elem); 5161 } 5162 } 5163 5164 // 2. Emit reduce_func(). 5165 auto *ReductionFn = emitReductionFunction( 5166 CGM, Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), 5167 Privates, LHSExprs, RHSExprs, ReductionOps); 5168 5169 // 3. Create static kmp_critical_name lock = { 0 }; 5170 auto *Lock = getCriticalRegionLock(".reduction"); 5171 5172 // 4. Build res = __kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), 5173 // RedList, reduce_func, &<lock>); 5174 auto *IdentTLoc = emitUpdateLocation(CGF, Loc, OMP_ATOMIC_REDUCE); 5175 auto *ThreadId = getThreadID(CGF, Loc); 5176 auto *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy); 5177 auto *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 5178 ReductionList.getPointer(), CGF.VoidPtrTy); 5179 llvm::Value *Args[] = { 5180 IdentTLoc, // ident_t *<loc> 5181 ThreadId, // i32 <gtid> 5182 CGF.Builder.getInt32(RHSExprs.size()), // i32 <n> 5183 ReductionArrayTySize, // size_type sizeof(RedList) 5184 RL, // void *RedList 5185 ReductionFn, // void (*) (void *, void *) <reduce_func> 5186 Lock // kmp_critical_name *&<lock> 5187 }; 5188 auto Res = CGF.EmitRuntimeCall( 5189 createRuntimeFunction(WithNowait ? OMPRTL__kmpc_reduce_nowait 5190 : OMPRTL__kmpc_reduce), 5191 Args); 5192 5193 // 5. Build switch(res) 5194 auto *DefaultBB = CGF.createBasicBlock(".omp.reduction.default"); 5195 auto *SwInst = CGF.Builder.CreateSwitch(Res, DefaultBB, /*NumCases=*/2); 5196 5197 // 6. Build case 1: 5198 // ... 5199 // <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); 5200 // ... 5201 // __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); 5202 // break; 5203 auto *Case1BB = CGF.createBasicBlock(".omp.reduction.case1"); 5204 SwInst->addCase(CGF.Builder.getInt32(1), Case1BB); 5205 CGF.EmitBlock(Case1BB); 5206 5207 // Add emission of __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); 5208 llvm::Value *EndArgs[] = { 5209 IdentTLoc, // ident_t *<loc> 5210 ThreadId, // i32 <gtid> 5211 Lock // kmp_critical_name *&<lock> 5212 }; 5213 auto &&CodeGen = [&Privates, &LHSExprs, &RHSExprs, &ReductionOps]( 5214 CodeGenFunction &CGF, PrePostActionTy &Action) { 5215 auto &RT = CGF.CGM.getOpenMPRuntime(); 5216 auto IPriv = Privates.begin(); 5217 auto ILHS = LHSExprs.begin(); 5218 auto IRHS = RHSExprs.begin(); 5219 for (auto *E : ReductionOps) { 5220 RT.emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS), 5221 cast<DeclRefExpr>(*IRHS)); 5222 ++IPriv; 5223 ++ILHS; 5224 ++IRHS; 5225 } 5226 }; 5227 RegionCodeGenTy RCG(CodeGen); 5228 CommonActionTy Action( 5229 nullptr, llvm::None, 5230 createRuntimeFunction(WithNowait ? OMPRTL__kmpc_end_reduce_nowait 5231 : OMPRTL__kmpc_end_reduce), 5232 EndArgs); 5233 RCG.setAction(Action); 5234 RCG(CGF); 5235 5236 CGF.EmitBranch(DefaultBB); 5237 5238 // 7. Build case 2: 5239 // ... 5240 // Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i])); 5241 // ... 5242 // break; 5243 auto *Case2BB = CGF.createBasicBlock(".omp.reduction.case2"); 5244 SwInst->addCase(CGF.Builder.getInt32(2), Case2BB); 5245 CGF.EmitBlock(Case2BB); 5246 5247 auto &&AtomicCodeGen = [Loc, &Privates, &LHSExprs, &RHSExprs, &ReductionOps]( 5248 CodeGenFunction &CGF, PrePostActionTy &Action) { 5249 auto ILHS = LHSExprs.begin(); 5250 auto IRHS = RHSExprs.begin(); 5251 auto IPriv = Privates.begin(); 5252 for (auto *E : ReductionOps) { 5253 const Expr *XExpr = nullptr; 5254 const Expr *EExpr = nullptr; 5255 const Expr *UpExpr = nullptr; 5256 BinaryOperatorKind BO = BO_Comma; 5257 if (auto *BO = dyn_cast<BinaryOperator>(E)) { 5258 if (BO->getOpcode() == BO_Assign) { 5259 XExpr = BO->getLHS(); 5260 UpExpr = BO->getRHS(); 5261 } 5262 } 5263 // Try to emit update expression as a simple atomic. 5264 auto *RHSExpr = UpExpr; 5265 if (RHSExpr) { 5266 // Analyze RHS part of the whole expression. 5267 if (auto *ACO = dyn_cast<AbstractConditionalOperator>( 5268 RHSExpr->IgnoreParenImpCasts())) { 5269 // If this is a conditional operator, analyze its condition for 5270 // min/max reduction operator. 5271 RHSExpr = ACO->getCond(); 5272 } 5273 if (auto *BORHS = 5274 dyn_cast<BinaryOperator>(RHSExpr->IgnoreParenImpCasts())) { 5275 EExpr = BORHS->getRHS(); 5276 BO = BORHS->getOpcode(); 5277 } 5278 } 5279 if (XExpr) { 5280 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 5281 auto &&AtomicRedGen = [BO, VD, 5282 Loc](CodeGenFunction &CGF, const Expr *XExpr, 5283 const Expr *EExpr, const Expr *UpExpr) { 5284 LValue X = CGF.EmitLValue(XExpr); 5285 RValue E; 5286 if (EExpr) 5287 E = CGF.EmitAnyExpr(EExpr); 5288 CGF.EmitOMPAtomicSimpleUpdateExpr( 5289 X, E, BO, /*IsXLHSInRHSPart=*/true, 5290 llvm::AtomicOrdering::Monotonic, Loc, 5291 [&CGF, UpExpr, VD, Loc](RValue XRValue) { 5292 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5293 PrivateScope.addPrivate( 5294 VD, [&CGF, VD, XRValue, Loc]() -> Address { 5295 Address LHSTemp = CGF.CreateMemTemp(VD->getType()); 5296 CGF.emitOMPSimpleStore( 5297 CGF.MakeAddrLValue(LHSTemp, VD->getType()), XRValue, 5298 VD->getType().getNonReferenceType(), Loc); 5299 return LHSTemp; 5300 }); 5301 (void)PrivateScope.Privatize(); 5302 return CGF.EmitAnyExpr(UpExpr); 5303 }); 5304 }; 5305 if ((*IPriv)->getType()->isArrayType()) { 5306 // Emit atomic reduction for array section. 5307 auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 5308 EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), VD, RHSVar, 5309 AtomicRedGen, XExpr, EExpr, UpExpr); 5310 } else 5311 // Emit atomic reduction for array subscript or single variable. 5312 AtomicRedGen(CGF, XExpr, EExpr, UpExpr); 5313 } else { 5314 // Emit as a critical region. 5315 auto &&CritRedGen = [E, Loc](CodeGenFunction &CGF, const Expr *, 5316 const Expr *, const Expr *) { 5317 auto &RT = CGF.CGM.getOpenMPRuntime(); 5318 RT.emitCriticalRegion( 5319 CGF, ".atomic_reduction", 5320 [=](CodeGenFunction &CGF, PrePostActionTy &Action) { 5321 Action.Enter(CGF); 5322 emitReductionCombiner(CGF, E); 5323 }, 5324 Loc); 5325 }; 5326 if ((*IPriv)->getType()->isArrayType()) { 5327 auto *LHSVar = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl()); 5328 auto *RHSVar = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl()); 5329 EmitOMPAggregateReduction(CGF, (*IPriv)->getType(), LHSVar, RHSVar, 5330 CritRedGen); 5331 } else 5332 CritRedGen(CGF, nullptr, nullptr, nullptr); 5333 } 5334 ++ILHS; 5335 ++IRHS; 5336 ++IPriv; 5337 } 5338 }; 5339 RegionCodeGenTy AtomicRCG(AtomicCodeGen); 5340 if (!WithNowait) { 5341 // Add emission of __kmpc_end_reduce(<loc>, <gtid>, &<lock>); 5342 llvm::Value *EndArgs[] = { 5343 IdentTLoc, // ident_t *<loc> 5344 ThreadId, // i32 <gtid> 5345 Lock // kmp_critical_name *&<lock> 5346 }; 5347 CommonActionTy Action(nullptr, llvm::None, 5348 createRuntimeFunction(OMPRTL__kmpc_end_reduce), 5349 EndArgs); 5350 AtomicRCG.setAction(Action); 5351 AtomicRCG(CGF); 5352 } else 5353 AtomicRCG(CGF); 5354 5355 CGF.EmitBranch(DefaultBB); 5356 CGF.EmitBlock(DefaultBB, /*IsFinished=*/true); 5357 } 5358 5359 /// Generates unique name for artificial threadprivate variables. 5360 /// Format is: <Prefix> "." <Loc_raw_encoding> "_" <N> 5361 static std::string generateUniqueName(StringRef Prefix, SourceLocation Loc, 5362 unsigned N) { 5363 SmallString<256> Buffer; 5364 llvm::raw_svector_ostream Out(Buffer); 5365 Out << Prefix << "." << Loc.getRawEncoding() << "_" << N; 5366 return Out.str(); 5367 } 5368 5369 /// Emits reduction initializer function: 5370 /// \code 5371 /// void @.red_init(void* %arg) { 5372 /// %0 = bitcast void* %arg to <type>* 5373 /// store <type> <init>, <type>* %0 5374 /// ret void 5375 /// } 5376 /// \endcode 5377 static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM, 5378 SourceLocation Loc, 5379 ReductionCodeGen &RCG, unsigned N) { 5380 auto &C = CGM.getContext(); 5381 FunctionArgList Args; 5382 ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy, 5383 ImplicitParamDecl::Other); 5384 Args.emplace_back(&Param); 5385 auto &FnInfo = 5386 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 5387 auto *FnTy = CGM.getTypes().GetFunctionType(FnInfo); 5388 auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage, 5389 ".red_init.", &CGM.getModule()); 5390 CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, FnInfo); 5391 CodeGenFunction CGF(CGM); 5392 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc); 5393 Address PrivateAddr = CGF.EmitLoadOfPointer( 5394 CGF.GetAddrOfLocalVar(&Param), 5395 C.getPointerType(C.VoidPtrTy).castAs<PointerType>()); 5396 llvm::Value *Size = nullptr; 5397 // If the size of the reduction item is non-constant, load it from global 5398 // threadprivate variable. 5399 if (RCG.getSizes(N).second) { 5400 Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate( 5401 CGF, CGM.getContext().getSizeType(), 5402 generateUniqueName("reduction_size", Loc, N)); 5403 Size = 5404 CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false, 5405 CGM.getContext().getSizeType(), SourceLocation()); 5406 } 5407 RCG.emitAggregateType(CGF, N, Size); 5408 LValue SharedLVal; 5409 // If initializer uses initializer from declare reduction construct, emit a 5410 // pointer to the address of the original reduction item (reuired by reduction 5411 // initializer) 5412 if (RCG.usesReductionInitializer(N)) { 5413 Address SharedAddr = 5414 CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate( 5415 CGF, CGM.getContext().VoidPtrTy, 5416 generateUniqueName("reduction", Loc, N)); 5417 SharedLVal = CGF.MakeAddrLValue(SharedAddr, CGM.getContext().VoidPtrTy); 5418 } else { 5419 SharedLVal = CGF.MakeNaturalAlignAddrLValue( 5420 llvm::ConstantPointerNull::get(CGM.VoidPtrTy), 5421 CGM.getContext().VoidPtrTy); 5422 } 5423 // Emit the initializer: 5424 // %0 = bitcast void* %arg to <type>* 5425 // store <type> <init>, <type>* %0 5426 RCG.emitInitialization(CGF, N, PrivateAddr, SharedLVal, 5427 [](CodeGenFunction &) { return false; }); 5428 CGF.FinishFunction(); 5429 return Fn; 5430 } 5431 5432 /// Emits reduction combiner function: 5433 /// \code 5434 /// void @.red_comb(void* %arg0, void* %arg1) { 5435 /// %lhs = bitcast void* %arg0 to <type>* 5436 /// %rhs = bitcast void* %arg1 to <type>* 5437 /// %2 = <ReductionOp>(<type>* %lhs, <type>* %rhs) 5438 /// store <type> %2, <type>* %lhs 5439 /// ret void 5440 /// } 5441 /// \endcode 5442 static llvm::Value *emitReduceCombFunction(CodeGenModule &CGM, 5443 SourceLocation Loc, 5444 ReductionCodeGen &RCG, unsigned N, 5445 const Expr *ReductionOp, 5446 const Expr *LHS, const Expr *RHS, 5447 const Expr *PrivateRef) { 5448 auto &C = CGM.getContext(); 5449 auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(LHS)->getDecl()); 5450 auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(RHS)->getDecl()); 5451 FunctionArgList Args; 5452 ImplicitParamDecl ParamInOut(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, 5453 C.VoidPtrTy, ImplicitParamDecl::Other); 5454 ImplicitParamDecl ParamIn(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy, 5455 ImplicitParamDecl::Other); 5456 Args.emplace_back(&ParamInOut); 5457 Args.emplace_back(&ParamIn); 5458 auto &FnInfo = 5459 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 5460 auto *FnTy = CGM.getTypes().GetFunctionType(FnInfo); 5461 auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage, 5462 ".red_comb.", &CGM.getModule()); 5463 CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, FnInfo); 5464 CodeGenFunction CGF(CGM); 5465 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc); 5466 llvm::Value *Size = nullptr; 5467 // If the size of the reduction item is non-constant, load it from global 5468 // threadprivate variable. 5469 if (RCG.getSizes(N).second) { 5470 Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate( 5471 CGF, CGM.getContext().getSizeType(), 5472 generateUniqueName("reduction_size", Loc, N)); 5473 Size = 5474 CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false, 5475 CGM.getContext().getSizeType(), SourceLocation()); 5476 } 5477 RCG.emitAggregateType(CGF, N, Size); 5478 // Remap lhs and rhs variables to the addresses of the function arguments. 5479 // %lhs = bitcast void* %arg0 to <type>* 5480 // %rhs = bitcast void* %arg1 to <type>* 5481 CodeGenFunction::OMPPrivateScope PrivateScope(CGF); 5482 PrivateScope.addPrivate(LHSVD, [&C, &CGF, &ParamInOut, LHSVD]() -> Address { 5483 // Pull out the pointer to the variable. 5484 Address PtrAddr = CGF.EmitLoadOfPointer( 5485 CGF.GetAddrOfLocalVar(&ParamInOut), 5486 C.getPointerType(C.VoidPtrTy).castAs<PointerType>()); 5487 return CGF.Builder.CreateElementBitCast( 5488 PtrAddr, CGF.ConvertTypeForMem(LHSVD->getType())); 5489 }); 5490 PrivateScope.addPrivate(RHSVD, [&C, &CGF, &ParamIn, RHSVD]() -> Address { 5491 // Pull out the pointer to the variable. 5492 Address PtrAddr = CGF.EmitLoadOfPointer( 5493 CGF.GetAddrOfLocalVar(&ParamIn), 5494 C.getPointerType(C.VoidPtrTy).castAs<PointerType>()); 5495 return CGF.Builder.CreateElementBitCast( 5496 PtrAddr, CGF.ConvertTypeForMem(RHSVD->getType())); 5497 }); 5498 PrivateScope.Privatize(); 5499 // Emit the combiner body: 5500 // %2 = <ReductionOp>(<type> *%lhs, <type> *%rhs) 5501 // store <type> %2, <type>* %lhs 5502 CGM.getOpenMPRuntime().emitSingleReductionCombiner( 5503 CGF, ReductionOp, PrivateRef, cast<DeclRefExpr>(LHS), 5504 cast<DeclRefExpr>(RHS)); 5505 CGF.FinishFunction(); 5506 return Fn; 5507 } 5508 5509 /// Emits reduction finalizer function: 5510 /// \code 5511 /// void @.red_fini(void* %arg) { 5512 /// %0 = bitcast void* %arg to <type>* 5513 /// <destroy>(<type>* %0) 5514 /// ret void 5515 /// } 5516 /// \endcode 5517 static llvm::Value *emitReduceFiniFunction(CodeGenModule &CGM, 5518 SourceLocation Loc, 5519 ReductionCodeGen &RCG, unsigned N) { 5520 if (!RCG.needCleanups(N)) 5521 return nullptr; 5522 auto &C = CGM.getContext(); 5523 FunctionArgList Args; 5524 ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy, 5525 ImplicitParamDecl::Other); 5526 Args.emplace_back(&Param); 5527 auto &FnInfo = 5528 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args); 5529 auto *FnTy = CGM.getTypes().GetFunctionType(FnInfo); 5530 auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage, 5531 ".red_fini.", &CGM.getModule()); 5532 CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, FnInfo); 5533 CodeGenFunction CGF(CGM); 5534 CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc); 5535 Address PrivateAddr = CGF.EmitLoadOfPointer( 5536 CGF.GetAddrOfLocalVar(&Param), 5537 C.getPointerType(C.VoidPtrTy).castAs<PointerType>()); 5538 llvm::Value *Size = nullptr; 5539 // If the size of the reduction item is non-constant, load it from global 5540 // threadprivate variable. 5541 if (RCG.getSizes(N).second) { 5542 Address SizeAddr = CGM.getOpenMPRuntime().getAddrOfArtificialThreadPrivate( 5543 CGF, CGM.getContext().getSizeType(), 5544 generateUniqueName("reduction_size", Loc, N)); 5545 Size = 5546 CGF.EmitLoadOfScalar(SizeAddr, /*Volatile=*/false, 5547 CGM.getContext().getSizeType(), SourceLocation()); 5548 } 5549 RCG.emitAggregateType(CGF, N, Size); 5550 // Emit the finalizer body: 5551 // <destroy>(<type>* %0) 5552 RCG.emitCleanups(CGF, N, PrivateAddr); 5553 CGF.FinishFunction(); 5554 return Fn; 5555 } 5556 5557 llvm::Value *CGOpenMPRuntime::emitTaskReductionInit( 5558 CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs, 5559 ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) { 5560 if (!CGF.HaveInsertPoint() || Data.ReductionVars.empty()) 5561 return nullptr; 5562 5563 // Build typedef struct: 5564 // kmp_task_red_input { 5565 // void *reduce_shar; // shared reduction item 5566 // size_t reduce_size; // size of data item 5567 // void *reduce_init; // data initialization routine 5568 // void *reduce_fini; // data finalization routine 5569 // void *reduce_comb; // data combiner routine 5570 // kmp_task_red_flags_t flags; // flags for additional info from compiler 5571 // } kmp_task_red_input_t; 5572 ASTContext &C = CGM.getContext(); 5573 auto *RD = C.buildImplicitRecord("kmp_task_red_input_t"); 5574 RD->startDefinition(); 5575 const FieldDecl *SharedFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy); 5576 const FieldDecl *SizeFD = addFieldToRecordDecl(C, RD, C.getSizeType()); 5577 const FieldDecl *InitFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy); 5578 const FieldDecl *FiniFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy); 5579 const FieldDecl *CombFD = addFieldToRecordDecl(C, RD, C.VoidPtrTy); 5580 const FieldDecl *FlagsFD = addFieldToRecordDecl( 5581 C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false)); 5582 RD->completeDefinition(); 5583 QualType RDType = C.getRecordType(RD); 5584 unsigned Size = Data.ReductionVars.size(); 5585 llvm::APInt ArraySize(/*numBits=*/64, Size); 5586 QualType ArrayRDType = C.getConstantArrayType( 5587 RDType, ArraySize, ArrayType::Normal, /*IndexTypeQuals=*/0); 5588 // kmp_task_red_input_t .rd_input.[Size]; 5589 Address TaskRedInput = CGF.CreateMemTemp(ArrayRDType, ".rd_input."); 5590 ReductionCodeGen RCG(Data.ReductionVars, Data.ReductionCopies, 5591 Data.ReductionOps); 5592 for (unsigned Cnt = 0; Cnt < Size; ++Cnt) { 5593 // kmp_task_red_input_t &ElemLVal = .rd_input.[Cnt]; 5594 llvm::Value *Idxs[] = {llvm::ConstantInt::get(CGM.SizeTy, /*V=*/0), 5595 llvm::ConstantInt::get(CGM.SizeTy, Cnt)}; 5596 llvm::Value *GEP = CGF.EmitCheckedInBoundsGEP( 5597 TaskRedInput.getPointer(), Idxs, 5598 /*SignedIndices=*/false, /*IsSubtraction=*/false, Loc, 5599 ".rd_input.gep."); 5600 LValue ElemLVal = CGF.MakeNaturalAlignAddrLValue(GEP, RDType); 5601 // ElemLVal.reduce_shar = &Shareds[Cnt]; 5602 LValue SharedLVal = CGF.EmitLValueForField(ElemLVal, SharedFD); 5603 RCG.emitSharedLValue(CGF, Cnt); 5604 llvm::Value *CastedShared = 5605 CGF.EmitCastToVoidPtr(RCG.getSharedLValue(Cnt).getPointer()); 5606 CGF.EmitStoreOfScalar(CastedShared, SharedLVal); 5607 RCG.emitAggregateType(CGF, Cnt); 5608 llvm::Value *SizeValInChars; 5609 llvm::Value *SizeVal; 5610 std::tie(SizeValInChars, SizeVal) = RCG.getSizes(Cnt); 5611 // We use delayed creation/initialization for VLAs, array sections and 5612 // custom reduction initializations. It is required because runtime does not 5613 // provide the way to pass the sizes of VLAs/array sections to 5614 // initializer/combiner/finalizer functions and does not pass the pointer to 5615 // original reduction item to the initializer. Instead threadprivate global 5616 // variables are used to store these values and use them in the functions. 5617 bool DelayedCreation = !!SizeVal; 5618 SizeValInChars = CGF.Builder.CreateIntCast(SizeValInChars, CGM.SizeTy, 5619 /*isSigned=*/false); 5620 LValue SizeLVal = CGF.EmitLValueForField(ElemLVal, SizeFD); 5621 CGF.EmitStoreOfScalar(SizeValInChars, SizeLVal); 5622 // ElemLVal.reduce_init = init; 5623 LValue InitLVal = CGF.EmitLValueForField(ElemLVal, InitFD); 5624 llvm::Value *InitAddr = 5625 CGF.EmitCastToVoidPtr(emitReduceInitFunction(CGM, Loc, RCG, Cnt)); 5626 CGF.EmitStoreOfScalar(InitAddr, InitLVal); 5627 DelayedCreation = DelayedCreation || RCG.usesReductionInitializer(Cnt); 5628 // ElemLVal.reduce_fini = fini; 5629 LValue FiniLVal = CGF.EmitLValueForField(ElemLVal, FiniFD); 5630 llvm::Value *Fini = emitReduceFiniFunction(CGM, Loc, RCG, Cnt); 5631 llvm::Value *FiniAddr = Fini 5632 ? CGF.EmitCastToVoidPtr(Fini) 5633 : llvm::ConstantPointerNull::get(CGM.VoidPtrTy); 5634 CGF.EmitStoreOfScalar(FiniAddr, FiniLVal); 5635 // ElemLVal.reduce_comb = comb; 5636 LValue CombLVal = CGF.EmitLValueForField(ElemLVal, CombFD); 5637 llvm::Value *CombAddr = CGF.EmitCastToVoidPtr(emitReduceCombFunction( 5638 CGM, Loc, RCG, Cnt, Data.ReductionOps[Cnt], LHSExprs[Cnt], 5639 RHSExprs[Cnt], Data.ReductionCopies[Cnt])); 5640 CGF.EmitStoreOfScalar(CombAddr, CombLVal); 5641 // ElemLVal.flags = 0; 5642 LValue FlagsLVal = CGF.EmitLValueForField(ElemLVal, FlagsFD); 5643 if (DelayedCreation) { 5644 CGF.EmitStoreOfScalar( 5645 llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1, /*IsSigned=*/true), 5646 FlagsLVal); 5647 } else 5648 CGF.EmitNullInitialization(FlagsLVal.getAddress(), FlagsLVal.getType()); 5649 } 5650 // Build call void *__kmpc_task_reduction_init(int gtid, int num_data, void 5651 // *data); 5652 llvm::Value *Args[] = { 5653 CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy, 5654 /*isSigned=*/true), 5655 llvm::ConstantInt::get(CGM.IntTy, Size, /*isSigned=*/true), 5656 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TaskRedInput.getPointer(), 5657 CGM.VoidPtrTy)}; 5658 return CGF.EmitRuntimeCall( 5659 createRuntimeFunction(OMPRTL__kmpc_task_reduction_init), Args); 5660 } 5661 5662 void CGOpenMPRuntime::emitTaskReductionFixups(CodeGenFunction &CGF, 5663 SourceLocation Loc, 5664 ReductionCodeGen &RCG, 5665 unsigned N) { 5666 auto Sizes = RCG.getSizes(N); 5667 // Emit threadprivate global variable if the type is non-constant 5668 // (Sizes.second = nullptr). 5669 if (Sizes.second) { 5670 llvm::Value *SizeVal = CGF.Builder.CreateIntCast(Sizes.second, CGM.SizeTy, 5671 /*isSigned=*/false); 5672 Address SizeAddr = getAddrOfArtificialThreadPrivate( 5673 CGF, CGM.getContext().getSizeType(), 5674 generateUniqueName("reduction_size", Loc, N)); 5675 CGF.Builder.CreateStore(SizeVal, SizeAddr, /*IsVolatile=*/false); 5676 } 5677 // Store address of the original reduction item if custom initializer is used. 5678 if (RCG.usesReductionInitializer(N)) { 5679 Address SharedAddr = getAddrOfArtificialThreadPrivate( 5680 CGF, CGM.getContext().VoidPtrTy, 5681 generateUniqueName("reduction", Loc, N)); 5682 CGF.Builder.CreateStore( 5683 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 5684 RCG.getSharedLValue(N).getPointer(), CGM.VoidPtrTy), 5685 SharedAddr, /*IsVolatile=*/false); 5686 } 5687 } 5688 5689 Address CGOpenMPRuntime::getTaskReductionItem(CodeGenFunction &CGF, 5690 SourceLocation Loc, 5691 llvm::Value *ReductionsPtr, 5692 LValue SharedLVal) { 5693 // Build call void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void 5694 // *d); 5695 llvm::Value *Args[] = { 5696 CGF.Builder.CreateIntCast(getThreadID(CGF, Loc), CGM.IntTy, 5697 /*isSigned=*/true), 5698 ReductionsPtr, 5699 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(SharedLVal.getPointer(), 5700 CGM.VoidPtrTy)}; 5701 return Address( 5702 CGF.EmitRuntimeCall( 5703 createRuntimeFunction(OMPRTL__kmpc_task_reduction_get_th_data), Args), 5704 SharedLVal.getAlignment()); 5705 } 5706 5707 void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF, 5708 SourceLocation Loc) { 5709 if (!CGF.HaveInsertPoint()) 5710 return; 5711 // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32 5712 // global_tid); 5713 llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)}; 5714 // Ignore return result until untied tasks are supported. 5715 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_omp_taskwait), Args); 5716 if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) 5717 Region->emitUntiedSwitch(CGF); 5718 } 5719 5720 void CGOpenMPRuntime::emitInlinedDirective(CodeGenFunction &CGF, 5721 OpenMPDirectiveKind InnerKind, 5722 const RegionCodeGenTy &CodeGen, 5723 bool HasCancel) { 5724 if (!CGF.HaveInsertPoint()) 5725 return; 5726 InlinedOpenMPRegionRAII Region(CGF, CodeGen, InnerKind, HasCancel); 5727 CGF.CapturedStmtInfo->EmitBody(CGF, /*S=*/nullptr); 5728 } 5729 5730 namespace { 5731 enum RTCancelKind { 5732 CancelNoreq = 0, 5733 CancelParallel = 1, 5734 CancelLoop = 2, 5735 CancelSections = 3, 5736 CancelTaskgroup = 4 5737 }; 5738 } // anonymous namespace 5739 5740 static RTCancelKind getCancellationKind(OpenMPDirectiveKind CancelRegion) { 5741 RTCancelKind CancelKind = CancelNoreq; 5742 if (CancelRegion == OMPD_parallel) 5743 CancelKind = CancelParallel; 5744 else if (CancelRegion == OMPD_for) 5745 CancelKind = CancelLoop; 5746 else if (CancelRegion == OMPD_sections) 5747 CancelKind = CancelSections; 5748 else { 5749 assert(CancelRegion == OMPD_taskgroup); 5750 CancelKind = CancelTaskgroup; 5751 } 5752 return CancelKind; 5753 } 5754 5755 void CGOpenMPRuntime::emitCancellationPointCall( 5756 CodeGenFunction &CGF, SourceLocation Loc, 5757 OpenMPDirectiveKind CancelRegion) { 5758 if (!CGF.HaveInsertPoint()) 5759 return; 5760 // Build call kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32 5761 // global_tid, kmp_int32 cncl_kind); 5762 if (auto *OMPRegionInfo = 5763 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) { 5764 // For 'cancellation point taskgroup', the task region info may not have a 5765 // cancel. This may instead happen in another adjacent task. 5766 if (CancelRegion == OMPD_taskgroup || OMPRegionInfo->hasCancel()) { 5767 llvm::Value *Args[] = { 5768 emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc), 5769 CGF.Builder.getInt32(getCancellationKind(CancelRegion))}; 5770 // Ignore return result until untied tasks are supported. 5771 auto *Result = CGF.EmitRuntimeCall( 5772 createRuntimeFunction(OMPRTL__kmpc_cancellationpoint), Args); 5773 // if (__kmpc_cancellationpoint()) { 5774 // exit from construct; 5775 // } 5776 auto *ExitBB = CGF.createBasicBlock(".cancel.exit"); 5777 auto *ContBB = CGF.createBasicBlock(".cancel.continue"); 5778 auto *Cmp = CGF.Builder.CreateIsNotNull(Result); 5779 CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB); 5780 CGF.EmitBlock(ExitBB); 5781 // exit from construct; 5782 auto CancelDest = 5783 CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind()); 5784 CGF.EmitBranchThroughCleanup(CancelDest); 5785 CGF.EmitBlock(ContBB, /*IsFinished=*/true); 5786 } 5787 } 5788 } 5789 5790 void CGOpenMPRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, 5791 const Expr *IfCond, 5792 OpenMPDirectiveKind CancelRegion) { 5793 if (!CGF.HaveInsertPoint()) 5794 return; 5795 // Build call kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid, 5796 // kmp_int32 cncl_kind); 5797 if (auto *OMPRegionInfo = 5798 dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) { 5799 auto &&ThenGen = [Loc, CancelRegion, OMPRegionInfo](CodeGenFunction &CGF, 5800 PrePostActionTy &) { 5801 auto &RT = CGF.CGM.getOpenMPRuntime(); 5802 llvm::Value *Args[] = { 5803 RT.emitUpdateLocation(CGF, Loc), RT.getThreadID(CGF, Loc), 5804 CGF.Builder.getInt32(getCancellationKind(CancelRegion))}; 5805 // Ignore return result until untied tasks are supported. 5806 auto *Result = CGF.EmitRuntimeCall( 5807 RT.createRuntimeFunction(OMPRTL__kmpc_cancel), Args); 5808 // if (__kmpc_cancel()) { 5809 // exit from construct; 5810 // } 5811 auto *ExitBB = CGF.createBasicBlock(".cancel.exit"); 5812 auto *ContBB = CGF.createBasicBlock(".cancel.continue"); 5813 auto *Cmp = CGF.Builder.CreateIsNotNull(Result); 5814 CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB); 5815 CGF.EmitBlock(ExitBB); 5816 // exit from construct; 5817 auto CancelDest = 5818 CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind()); 5819 CGF.EmitBranchThroughCleanup(CancelDest); 5820 CGF.EmitBlock(ContBB, /*IsFinished=*/true); 5821 }; 5822 if (IfCond) 5823 emitOMPIfClause(CGF, IfCond, ThenGen, 5824 [](CodeGenFunction &, PrePostActionTy &) {}); 5825 else { 5826 RegionCodeGenTy ThenRCG(ThenGen); 5827 ThenRCG(CGF); 5828 } 5829 } 5830 } 5831 5832 /// \brief Obtain information that uniquely identifies a target entry. This 5833 /// consists of the file and device IDs as well as line number associated with 5834 /// the relevant entry source location. 5835 static void getTargetEntryUniqueInfo(ASTContext &C, SourceLocation Loc, 5836 unsigned &DeviceID, unsigned &FileID, 5837 unsigned &LineNum) { 5838 5839 auto &SM = C.getSourceManager(); 5840 5841 // The loc should be always valid and have a file ID (the user cannot use 5842 // #pragma directives in macros) 5843 5844 assert(Loc.isValid() && "Source location is expected to be always valid."); 5845 assert(Loc.isFileID() && "Source location is expected to refer to a file."); 5846 5847 PresumedLoc PLoc = SM.getPresumedLoc(Loc); 5848 assert(PLoc.isValid() && "Source location is expected to be always valid."); 5849 5850 llvm::sys::fs::UniqueID ID; 5851 if (llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID)) 5852 llvm_unreachable("Source file with target region no longer exists!"); 5853 5854 DeviceID = ID.getDevice(); 5855 FileID = ID.getFile(); 5856 LineNum = PLoc.getLine(); 5857 } 5858 5859 void CGOpenMPRuntime::emitTargetOutlinedFunction( 5860 const OMPExecutableDirective &D, StringRef ParentName, 5861 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, 5862 bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) { 5863 assert(!ParentName.empty() && "Invalid target region parent name!"); 5864 5865 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID, 5866 IsOffloadEntry, CodeGen); 5867 } 5868 5869 void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper( 5870 const OMPExecutableDirective &D, StringRef ParentName, 5871 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, 5872 bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) { 5873 // Create a unique name for the entry function using the source location 5874 // information of the current target region. The name will be something like: 5875 // 5876 // __omp_offloading_DD_FFFF_PP_lBB 5877 // 5878 // where DD_FFFF is an ID unique to the file (device and file IDs), PP is the 5879 // mangled name of the function that encloses the target region and BB is the 5880 // line number of the target region. 5881 5882 unsigned DeviceID; 5883 unsigned FileID; 5884 unsigned Line; 5885 getTargetEntryUniqueInfo(CGM.getContext(), D.getLocStart(), DeviceID, FileID, 5886 Line); 5887 SmallString<64> EntryFnName; 5888 { 5889 llvm::raw_svector_ostream OS(EntryFnName); 5890 OS << "__omp_offloading" << llvm::format("_%x", DeviceID) 5891 << llvm::format("_%x_", FileID) << ParentName << "_l" << Line; 5892 } 5893 5894 const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target); 5895 5896 CodeGenFunction CGF(CGM, true); 5897 CGOpenMPTargetRegionInfo CGInfo(CS, CodeGen, EntryFnName); 5898 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo); 5899 5900 OutlinedFn = CGF.GenerateOpenMPCapturedStmtFunction(CS); 5901 5902 // If this target outline function is not an offload entry, we don't need to 5903 // register it. 5904 if (!IsOffloadEntry) 5905 return; 5906 5907 // The target region ID is used by the runtime library to identify the current 5908 // target region, so it only has to be unique and not necessarily point to 5909 // anything. It could be the pointer to the outlined function that implements 5910 // the target region, but we aren't using that so that the compiler doesn't 5911 // need to keep that, and could therefore inline the host function if proven 5912 // worthwhile during optimization. In the other hand, if emitting code for the 5913 // device, the ID has to be the function address so that it can retrieved from 5914 // the offloading entry and launched by the runtime library. We also mark the 5915 // outlined function to have external linkage in case we are emitting code for 5916 // the device, because these functions will be entry points to the device. 5917 5918 if (CGM.getLangOpts().OpenMPIsDevice) { 5919 OutlinedFnID = llvm::ConstantExpr::getBitCast(OutlinedFn, CGM.Int8PtrTy); 5920 OutlinedFn->setLinkage(llvm::GlobalValue::ExternalLinkage); 5921 OutlinedFn->setDSOLocal(false); 5922 } else 5923 OutlinedFnID = new llvm::GlobalVariable( 5924 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true, 5925 llvm::GlobalValue::PrivateLinkage, 5926 llvm::Constant::getNullValue(CGM.Int8Ty), ".omp_offload.region_id"); 5927 5928 // Register the information for the entry associated with this target region. 5929 OffloadEntriesInfoManager.registerTargetRegionEntryInfo( 5930 DeviceID, FileID, ParentName, Line, OutlinedFn, OutlinedFnID, 5931 /*Flags=*/0); 5932 } 5933 5934 /// discard all CompoundStmts intervening between two constructs 5935 static const Stmt *ignoreCompoundStmts(const Stmt *Body) { 5936 while (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) 5937 Body = CS->body_front(); 5938 5939 return Body; 5940 } 5941 5942 /// Emit the number of teams for a target directive. Inspect the num_teams 5943 /// clause associated with a teams construct combined or closely nested 5944 /// with the target directive. 5945 /// 5946 /// Emit a team of size one for directives such as 'target parallel' that 5947 /// have no associated teams construct. 5948 /// 5949 /// Otherwise, return nullptr. 5950 static llvm::Value * 5951 emitNumTeamsForTargetDirective(CGOpenMPRuntime &OMPRuntime, 5952 CodeGenFunction &CGF, 5953 const OMPExecutableDirective &D) { 5954 5955 assert(!CGF.getLangOpts().OpenMPIsDevice && "Clauses associated with the " 5956 "teams directive expected to be " 5957 "emitted only for the host!"); 5958 5959 auto &Bld = CGF.Builder; 5960 5961 // If the target directive is combined with a teams directive: 5962 // Return the value in the num_teams clause, if any. 5963 // Otherwise, return 0 to denote the runtime default. 5964 if (isOpenMPTeamsDirective(D.getDirectiveKind())) { 5965 if (const auto *NumTeamsClause = D.getSingleClause<OMPNumTeamsClause>()) { 5966 CodeGenFunction::RunCleanupsScope NumTeamsScope(CGF); 5967 auto NumTeams = CGF.EmitScalarExpr(NumTeamsClause->getNumTeams(), 5968 /*IgnoreResultAssign*/ true); 5969 return Bld.CreateIntCast(NumTeams, CGF.Int32Ty, 5970 /*IsSigned=*/true); 5971 } 5972 5973 // The default value is 0. 5974 return Bld.getInt32(0); 5975 } 5976 5977 // If the target directive is combined with a parallel directive but not a 5978 // teams directive, start one team. 5979 if (isOpenMPParallelDirective(D.getDirectiveKind())) 5980 return Bld.getInt32(1); 5981 5982 // If the current target region has a teams region enclosed, we need to get 5983 // the number of teams to pass to the runtime function call. This is done 5984 // by generating the expression in a inlined region. This is required because 5985 // the expression is captured in the enclosing target environment when the 5986 // teams directive is not combined with target. 5987 5988 const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target); 5989 5990 if (auto *TeamsDir = dyn_cast_or_null<OMPExecutableDirective>( 5991 ignoreCompoundStmts(CS.getCapturedStmt()))) { 5992 if (isOpenMPTeamsDirective(TeamsDir->getDirectiveKind())) { 5993 if (auto *NTE = TeamsDir->getSingleClause<OMPNumTeamsClause>()) { 5994 CGOpenMPInnerExprInfo CGInfo(CGF, CS); 5995 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo); 5996 llvm::Value *NumTeams = CGF.EmitScalarExpr(NTE->getNumTeams()); 5997 return Bld.CreateIntCast(NumTeams, CGF.Int32Ty, 5998 /*IsSigned=*/true); 5999 } 6000 6001 // If we have an enclosed teams directive but no num_teams clause we use 6002 // the default value 0. 6003 return Bld.getInt32(0); 6004 } 6005 } 6006 6007 // No teams associated with the directive. 6008 return nullptr; 6009 } 6010 6011 /// Emit the number of threads for a target directive. Inspect the 6012 /// thread_limit clause associated with a teams construct combined or closely 6013 /// nested with the target directive. 6014 /// 6015 /// Emit the num_threads clause for directives such as 'target parallel' that 6016 /// have no associated teams construct. 6017 /// 6018 /// Otherwise, return nullptr. 6019 static llvm::Value * 6020 emitNumThreadsForTargetDirective(CGOpenMPRuntime &OMPRuntime, 6021 CodeGenFunction &CGF, 6022 const OMPExecutableDirective &D) { 6023 6024 assert(!CGF.getLangOpts().OpenMPIsDevice && "Clauses associated with the " 6025 "teams directive expected to be " 6026 "emitted only for the host!"); 6027 6028 auto &Bld = CGF.Builder; 6029 6030 // 6031 // If the target directive is combined with a teams directive: 6032 // Return the value in the thread_limit clause, if any. 6033 // 6034 // If the target directive is combined with a parallel directive: 6035 // Return the value in the num_threads clause, if any. 6036 // 6037 // If both clauses are set, select the minimum of the two. 6038 // 6039 // If neither teams or parallel combined directives set the number of threads 6040 // in a team, return 0 to denote the runtime default. 6041 // 6042 // If this is not a teams directive return nullptr. 6043 6044 if (isOpenMPTeamsDirective(D.getDirectiveKind()) || 6045 isOpenMPParallelDirective(D.getDirectiveKind())) { 6046 llvm::Value *DefaultThreadLimitVal = Bld.getInt32(0); 6047 llvm::Value *NumThreadsVal = nullptr; 6048 llvm::Value *ThreadLimitVal = nullptr; 6049 6050 if (const auto *ThreadLimitClause = 6051 D.getSingleClause<OMPThreadLimitClause>()) { 6052 CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF); 6053 auto ThreadLimit = CGF.EmitScalarExpr(ThreadLimitClause->getThreadLimit(), 6054 /*IgnoreResultAssign*/ true); 6055 ThreadLimitVal = Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, 6056 /*IsSigned=*/true); 6057 } 6058 6059 if (const auto *NumThreadsClause = 6060 D.getSingleClause<OMPNumThreadsClause>()) { 6061 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF); 6062 llvm::Value *NumThreads = 6063 CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(), 6064 /*IgnoreResultAssign*/ true); 6065 NumThreadsVal = 6066 Bld.CreateIntCast(NumThreads, CGF.Int32Ty, /*IsSigned=*/true); 6067 } 6068 6069 // Select the lesser of thread_limit and num_threads. 6070 if (NumThreadsVal) 6071 ThreadLimitVal = ThreadLimitVal 6072 ? Bld.CreateSelect(Bld.CreateICmpSLT(NumThreadsVal, 6073 ThreadLimitVal), 6074 NumThreadsVal, ThreadLimitVal) 6075 : NumThreadsVal; 6076 6077 // Set default value passed to the runtime if either teams or a target 6078 // parallel type directive is found but no clause is specified. 6079 if (!ThreadLimitVal) 6080 ThreadLimitVal = DefaultThreadLimitVal; 6081 6082 return ThreadLimitVal; 6083 } 6084 6085 // If the current target region has a teams region enclosed, we need to get 6086 // the thread limit to pass to the runtime function call. This is done 6087 // by generating the expression in a inlined region. This is required because 6088 // the expression is captured in the enclosing target environment when the 6089 // teams directive is not combined with target. 6090 6091 const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target); 6092 6093 if (auto *TeamsDir = dyn_cast_or_null<OMPExecutableDirective>( 6094 ignoreCompoundStmts(CS.getCapturedStmt()))) { 6095 if (isOpenMPTeamsDirective(TeamsDir->getDirectiveKind())) { 6096 if (auto *TLE = TeamsDir->getSingleClause<OMPThreadLimitClause>()) { 6097 CGOpenMPInnerExprInfo CGInfo(CGF, CS); 6098 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo); 6099 llvm::Value *ThreadLimit = CGF.EmitScalarExpr(TLE->getThreadLimit()); 6100 return CGF.Builder.CreateIntCast(ThreadLimit, CGF.Int32Ty, 6101 /*IsSigned=*/true); 6102 } 6103 6104 // If we have an enclosed teams directive but no thread_limit clause we 6105 // use the default value 0. 6106 return CGF.Builder.getInt32(0); 6107 } 6108 } 6109 6110 // No teams associated with the directive. 6111 return nullptr; 6112 } 6113 6114 namespace { 6115 // \brief Utility to handle information from clauses associated with a given 6116 // construct that use mappable expressions (e.g. 'map' clause, 'to' clause). 6117 // It provides a convenient interface to obtain the information and generate 6118 // code for that information. 6119 class MappableExprsHandler { 6120 public: 6121 /// \brief Values for bit flags used to specify the mapping type for 6122 /// offloading. 6123 enum OpenMPOffloadMappingFlags { 6124 /// \brief Allocate memory on the device and move data from host to device. 6125 OMP_MAP_TO = 0x01, 6126 /// \brief Allocate memory on the device and move data from device to host. 6127 OMP_MAP_FROM = 0x02, 6128 /// \brief Always perform the requested mapping action on the element, even 6129 /// if it was already mapped before. 6130 OMP_MAP_ALWAYS = 0x04, 6131 /// \brief Delete the element from the device environment, ignoring the 6132 /// current reference count associated with the element. 6133 OMP_MAP_DELETE = 0x08, 6134 /// \brief The element being mapped is a pointer-pointee pair; both the 6135 /// pointer and the pointee should be mapped. 6136 OMP_MAP_PTR_AND_OBJ = 0x10, 6137 /// \brief This flags signals that the base address of an entry should be 6138 /// passed to the target kernel as an argument. 6139 OMP_MAP_TARGET_PARAM = 0x20, 6140 /// \brief Signal that the runtime library has to return the device pointer 6141 /// in the current position for the data being mapped. Used when we have the 6142 /// use_device_ptr clause. 6143 OMP_MAP_RETURN_PARAM = 0x40, 6144 /// \brief This flag signals that the reference being passed is a pointer to 6145 /// private data. 6146 OMP_MAP_PRIVATE = 0x80, 6147 /// \brief Pass the element to the device by value. 6148 OMP_MAP_LITERAL = 0x100, 6149 /// Implicit map 6150 OMP_MAP_IMPLICIT = 0x200, 6151 }; 6152 6153 /// Class that associates information with a base pointer to be passed to the 6154 /// runtime library. 6155 class BasePointerInfo { 6156 /// The base pointer. 6157 llvm::Value *Ptr = nullptr; 6158 /// The base declaration that refers to this device pointer, or null if 6159 /// there is none. 6160 const ValueDecl *DevPtrDecl = nullptr; 6161 6162 public: 6163 BasePointerInfo(llvm::Value *Ptr, const ValueDecl *DevPtrDecl = nullptr) 6164 : Ptr(Ptr), DevPtrDecl(DevPtrDecl) {} 6165 llvm::Value *operator*() const { return Ptr; } 6166 const ValueDecl *getDevicePtrDecl() const { return DevPtrDecl; } 6167 void setDevicePtrDecl(const ValueDecl *D) { DevPtrDecl = D; } 6168 }; 6169 6170 typedef SmallVector<BasePointerInfo, 16> MapBaseValuesArrayTy; 6171 typedef SmallVector<llvm::Value *, 16> MapValuesArrayTy; 6172 typedef SmallVector<uint64_t, 16> MapFlagsArrayTy; 6173 6174 private: 6175 /// \brief Directive from where the map clauses were extracted. 6176 const OMPExecutableDirective &CurDir; 6177 6178 /// \brief Function the directive is being generated for. 6179 CodeGenFunction &CGF; 6180 6181 /// \brief Set of all first private variables in the current directive. 6182 llvm::SmallPtrSet<const VarDecl *, 8> FirstPrivateDecls; 6183 /// Set of all reduction variables in the current directive. 6184 llvm::SmallPtrSet<const VarDecl *, 8> ReductionDecls; 6185 6186 /// Map between device pointer declarations and their expression components. 6187 /// The key value for declarations in 'this' is null. 6188 llvm::DenseMap< 6189 const ValueDecl *, 6190 SmallVector<OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>> 6191 DevPointersMap; 6192 6193 llvm::Value *getExprTypeSize(const Expr *E) const { 6194 auto ExprTy = E->getType().getCanonicalType(); 6195 6196 // Reference types are ignored for mapping purposes. 6197 if (auto *RefTy = ExprTy->getAs<ReferenceType>()) 6198 ExprTy = RefTy->getPointeeType().getCanonicalType(); 6199 6200 // Given that an array section is considered a built-in type, we need to 6201 // do the calculation based on the length of the section instead of relying 6202 // on CGF.getTypeSize(E->getType()). 6203 if (const auto *OAE = dyn_cast<OMPArraySectionExpr>(E)) { 6204 QualType BaseTy = OMPArraySectionExpr::getBaseOriginalType( 6205 OAE->getBase()->IgnoreParenImpCasts()) 6206 .getCanonicalType(); 6207 6208 // If there is no length associated with the expression, that means we 6209 // are using the whole length of the base. 6210 if (!OAE->getLength() && OAE->getColonLoc().isValid()) 6211 return CGF.getTypeSize(BaseTy); 6212 6213 llvm::Value *ElemSize; 6214 if (auto *PTy = BaseTy->getAs<PointerType>()) 6215 ElemSize = CGF.getTypeSize(PTy->getPointeeType().getCanonicalType()); 6216 else { 6217 auto *ATy = cast<ArrayType>(BaseTy.getTypePtr()); 6218 assert(ATy && "Expecting array type if not a pointer type."); 6219 ElemSize = CGF.getTypeSize(ATy->getElementType().getCanonicalType()); 6220 } 6221 6222 // If we don't have a length at this point, that is because we have an 6223 // array section with a single element. 6224 if (!OAE->getLength()) 6225 return ElemSize; 6226 6227 auto *LengthVal = CGF.EmitScalarExpr(OAE->getLength()); 6228 LengthVal = 6229 CGF.Builder.CreateIntCast(LengthVal, CGF.SizeTy, /*isSigned=*/false); 6230 return CGF.Builder.CreateNUWMul(LengthVal, ElemSize); 6231 } 6232 return CGF.getTypeSize(ExprTy); 6233 } 6234 6235 /// \brief Return the corresponding bits for a given map clause modifier. Add 6236 /// a flag marking the map as a pointer if requested. Add a flag marking the 6237 /// map as the first one of a series of maps that relate to the same map 6238 /// expression. 6239 uint64_t getMapTypeBits(OpenMPMapClauseKind MapType, 6240 OpenMPMapClauseKind MapTypeModifier, bool AddPtrFlag, 6241 bool AddIsTargetParamFlag) const { 6242 uint64_t Bits = 0u; 6243 switch (MapType) { 6244 case OMPC_MAP_alloc: 6245 case OMPC_MAP_release: 6246 // alloc and release is the default behavior in the runtime library, i.e. 6247 // if we don't pass any bits alloc/release that is what the runtime is 6248 // going to do. Therefore, we don't need to signal anything for these two 6249 // type modifiers. 6250 break; 6251 case OMPC_MAP_to: 6252 Bits = OMP_MAP_TO; 6253 break; 6254 case OMPC_MAP_from: 6255 Bits = OMP_MAP_FROM; 6256 break; 6257 case OMPC_MAP_tofrom: 6258 Bits = OMP_MAP_TO | OMP_MAP_FROM; 6259 break; 6260 case OMPC_MAP_delete: 6261 Bits = OMP_MAP_DELETE; 6262 break; 6263 default: 6264 llvm_unreachable("Unexpected map type!"); 6265 break; 6266 } 6267 if (AddPtrFlag) 6268 Bits |= OMP_MAP_PTR_AND_OBJ; 6269 if (AddIsTargetParamFlag) 6270 Bits |= OMP_MAP_TARGET_PARAM; 6271 if (MapTypeModifier == OMPC_MAP_always) 6272 Bits |= OMP_MAP_ALWAYS; 6273 return Bits; 6274 } 6275 6276 /// \brief Return true if the provided expression is a final array section. A 6277 /// final array section, is one whose length can't be proved to be one. 6278 bool isFinalArraySectionExpression(const Expr *E) const { 6279 auto *OASE = dyn_cast<OMPArraySectionExpr>(E); 6280 6281 // It is not an array section and therefore not a unity-size one. 6282 if (!OASE) 6283 return false; 6284 6285 // An array section with no colon always refer to a single element. 6286 if (OASE->getColonLoc().isInvalid()) 6287 return false; 6288 6289 auto *Length = OASE->getLength(); 6290 6291 // If we don't have a length we have to check if the array has size 1 6292 // for this dimension. Also, we should always expect a length if the 6293 // base type is pointer. 6294 if (!Length) { 6295 auto BaseQTy = OMPArraySectionExpr::getBaseOriginalType( 6296 OASE->getBase()->IgnoreParenImpCasts()) 6297 .getCanonicalType(); 6298 if (auto *ATy = dyn_cast<ConstantArrayType>(BaseQTy.getTypePtr())) 6299 return ATy->getSize().getSExtValue() != 1; 6300 // If we don't have a constant dimension length, we have to consider 6301 // the current section as having any size, so it is not necessarily 6302 // unitary. If it happen to be unity size, that's user fault. 6303 return true; 6304 } 6305 6306 // Check if the length evaluates to 1. 6307 llvm::APSInt ConstLength; 6308 if (!Length->EvaluateAsInt(ConstLength, CGF.getContext())) 6309 return true; // Can have more that size 1. 6310 6311 return ConstLength.getSExtValue() != 1; 6312 } 6313 6314 /// \brief Generate the base pointers, section pointers, sizes and map type 6315 /// bits for the provided map type, map modifier, and expression components. 6316 /// \a IsFirstComponent should be set to true if the provided set of 6317 /// components is the first associated with a capture. 6318 void generateInfoForComponentList( 6319 OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapTypeModifier, 6320 OMPClauseMappableExprCommon::MappableExprComponentListRef Components, 6321 MapBaseValuesArrayTy &BasePointers, MapValuesArrayTy &Pointers, 6322 MapValuesArrayTy &Sizes, MapFlagsArrayTy &Types, 6323 bool IsFirstComponentList, bool IsImplicit) const { 6324 6325 // The following summarizes what has to be generated for each map and the 6326 // types bellow. The generated information is expressed in this order: 6327 // base pointer, section pointer, size, flags 6328 // (to add to the ones that come from the map type and modifier). 6329 // 6330 // double d; 6331 // int i[100]; 6332 // float *p; 6333 // 6334 // struct S1 { 6335 // int i; 6336 // float f[50]; 6337 // } 6338 // struct S2 { 6339 // int i; 6340 // float f[50]; 6341 // S1 s; 6342 // double *p; 6343 // struct S2 *ps; 6344 // } 6345 // S2 s; 6346 // S2 *ps; 6347 // 6348 // map(d) 6349 // &d, &d, sizeof(double), noflags 6350 // 6351 // map(i) 6352 // &i, &i, 100*sizeof(int), noflags 6353 // 6354 // map(i[1:23]) 6355 // &i(=&i[0]), &i[1], 23*sizeof(int), noflags 6356 // 6357 // map(p) 6358 // &p, &p, sizeof(float*), noflags 6359 // 6360 // map(p[1:24]) 6361 // p, &p[1], 24*sizeof(float), noflags 6362 // 6363 // map(s) 6364 // &s, &s, sizeof(S2), noflags 6365 // 6366 // map(s.i) 6367 // &s, &(s.i), sizeof(int), noflags 6368 // 6369 // map(s.s.f) 6370 // &s, &(s.i.f), 50*sizeof(int), noflags 6371 // 6372 // map(s.p) 6373 // &s, &(s.p), sizeof(double*), noflags 6374 // 6375 // map(s.p[:22], s.a s.b) 6376 // &s, &(s.p), sizeof(double*), noflags 6377 // &(s.p), &(s.p[0]), 22*sizeof(double), ptr_flag 6378 // 6379 // map(s.ps) 6380 // &s, &(s.ps), sizeof(S2*), noflags 6381 // 6382 // map(s.ps->s.i) 6383 // &s, &(s.ps), sizeof(S2*), noflags 6384 // &(s.ps), &(s.ps->s.i), sizeof(int), ptr_flag 6385 // 6386 // map(s.ps->ps) 6387 // &s, &(s.ps), sizeof(S2*), noflags 6388 // &(s.ps), &(s.ps->ps), sizeof(S2*), ptr_flag 6389 // 6390 // map(s.ps->ps->ps) 6391 // &s, &(s.ps), sizeof(S2*), noflags 6392 // &(s.ps), &(s.ps->ps), sizeof(S2*), ptr_flag 6393 // &(s.ps->ps), &(s.ps->ps->ps), sizeof(S2*), ptr_flag 6394 // 6395 // map(s.ps->ps->s.f[:22]) 6396 // &s, &(s.ps), sizeof(S2*), noflags 6397 // &(s.ps), &(s.ps->ps), sizeof(S2*), ptr_flag 6398 // &(s.ps->ps), &(s.ps->ps->s.f[0]), 22*sizeof(float), ptr_flag 6399 // 6400 // map(ps) 6401 // &ps, &ps, sizeof(S2*), noflags 6402 // 6403 // map(ps->i) 6404 // ps, &(ps->i), sizeof(int), noflags 6405 // 6406 // map(ps->s.f) 6407 // ps, &(ps->s.f[0]), 50*sizeof(float), noflags 6408 // 6409 // map(ps->p) 6410 // ps, &(ps->p), sizeof(double*), noflags 6411 // 6412 // map(ps->p[:22]) 6413 // ps, &(ps->p), sizeof(double*), noflags 6414 // &(ps->p), &(ps->p[0]), 22*sizeof(double), ptr_flag 6415 // 6416 // map(ps->ps) 6417 // ps, &(ps->ps), sizeof(S2*), noflags 6418 // 6419 // map(ps->ps->s.i) 6420 // ps, &(ps->ps), sizeof(S2*), noflags 6421 // &(ps->ps), &(ps->ps->s.i), sizeof(int), ptr_flag 6422 // 6423 // map(ps->ps->ps) 6424 // ps, &(ps->ps), sizeof(S2*), noflags 6425 // &(ps->ps), &(ps->ps->ps), sizeof(S2*), ptr_flag 6426 // 6427 // map(ps->ps->ps->ps) 6428 // ps, &(ps->ps), sizeof(S2*), noflags 6429 // &(ps->ps), &(ps->ps->ps), sizeof(S2*), ptr_flag 6430 // &(ps->ps->ps), &(ps->ps->ps->ps), sizeof(S2*), ptr_flag 6431 // 6432 // map(ps->ps->ps->s.f[:22]) 6433 // ps, &(ps->ps), sizeof(S2*), noflags 6434 // &(ps->ps), &(ps->ps->ps), sizeof(S2*), ptr_flag 6435 // &(ps->ps->ps), &(ps->ps->ps->s.f[0]), 22*sizeof(float), ptr_flag 6436 6437 // Track if the map information being generated is the first for a capture. 6438 bool IsCaptureFirstInfo = IsFirstComponentList; 6439 6440 // Scan the components from the base to the complete expression. 6441 auto CI = Components.rbegin(); 6442 auto CE = Components.rend(); 6443 auto I = CI; 6444 6445 // Track if the map information being generated is the first for a list of 6446 // components. 6447 bool IsExpressionFirstInfo = true; 6448 llvm::Value *BP = nullptr; 6449 6450 if (auto *ME = dyn_cast<MemberExpr>(I->getAssociatedExpression())) { 6451 // The base is the 'this' pointer. The content of the pointer is going 6452 // to be the base of the field being mapped. 6453 BP = CGF.EmitScalarExpr(ME->getBase()); 6454 } else { 6455 // The base is the reference to the variable. 6456 // BP = &Var. 6457 BP = CGF.EmitOMPSharedLValue(I->getAssociatedExpression()).getPointer(); 6458 6459 // If the variable is a pointer and is being dereferenced (i.e. is not 6460 // the last component), the base has to be the pointer itself, not its 6461 // reference. References are ignored for mapping purposes. 6462 QualType Ty = 6463 I->getAssociatedDeclaration()->getType().getNonReferenceType(); 6464 if (Ty->isAnyPointerType() && std::next(I) != CE) { 6465 auto PtrAddr = CGF.MakeNaturalAlignAddrLValue(BP, Ty); 6466 BP = CGF.EmitLoadOfPointerLValue(PtrAddr.getAddress(), 6467 Ty->castAs<PointerType>()) 6468 .getPointer(); 6469 6470 // We do not need to generate individual map information for the 6471 // pointer, it can be associated with the combined storage. 6472 ++I; 6473 } 6474 } 6475 6476 uint64_t DefaultFlags = IsImplicit ? OMP_MAP_IMPLICIT : 0; 6477 for (; I != CE; ++I) { 6478 auto Next = std::next(I); 6479 6480 // We need to generate the addresses and sizes if this is the last 6481 // component, if the component is a pointer or if it is an array section 6482 // whose length can't be proved to be one. If this is a pointer, it 6483 // becomes the base address for the following components. 6484 6485 // A final array section, is one whose length can't be proved to be one. 6486 bool IsFinalArraySection = 6487 isFinalArraySectionExpression(I->getAssociatedExpression()); 6488 6489 // Get information on whether the element is a pointer. Have to do a 6490 // special treatment for array sections given that they are built-in 6491 // types. 6492 const auto *OASE = 6493 dyn_cast<OMPArraySectionExpr>(I->getAssociatedExpression()); 6494 bool IsPointer = 6495 (OASE && 6496 OMPArraySectionExpr::getBaseOriginalType(OASE) 6497 .getCanonicalType() 6498 ->isAnyPointerType()) || 6499 I->getAssociatedExpression()->getType()->isAnyPointerType(); 6500 6501 if (Next == CE || IsPointer || IsFinalArraySection) { 6502 6503 // If this is not the last component, we expect the pointer to be 6504 // associated with an array expression or member expression. 6505 assert((Next == CE || 6506 isa<MemberExpr>(Next->getAssociatedExpression()) || 6507 isa<ArraySubscriptExpr>(Next->getAssociatedExpression()) || 6508 isa<OMPArraySectionExpr>(Next->getAssociatedExpression())) && 6509 "Unexpected expression"); 6510 6511 llvm::Value *LB = 6512 CGF.EmitOMPSharedLValue(I->getAssociatedExpression()).getPointer(); 6513 auto *Size = getExprTypeSize(I->getAssociatedExpression()); 6514 6515 // If we have a member expression and the current component is a 6516 // reference, we have to map the reference too. Whenever we have a 6517 // reference, the section that reference refers to is going to be a 6518 // load instruction from the storage assigned to the reference. 6519 if (isa<MemberExpr>(I->getAssociatedExpression()) && 6520 I->getAssociatedDeclaration()->getType()->isReferenceType()) { 6521 auto *LI = cast<llvm::LoadInst>(LB); 6522 auto *RefAddr = LI->getPointerOperand(); 6523 6524 BasePointers.push_back(BP); 6525 Pointers.push_back(RefAddr); 6526 Sizes.push_back(CGF.getTypeSize(CGF.getContext().VoidPtrTy)); 6527 Types.push_back(DefaultFlags | 6528 getMapTypeBits( 6529 /*MapType*/ OMPC_MAP_alloc, 6530 /*MapTypeModifier=*/OMPC_MAP_unknown, 6531 !IsExpressionFirstInfo, IsCaptureFirstInfo)); 6532 IsExpressionFirstInfo = false; 6533 IsCaptureFirstInfo = false; 6534 // The reference will be the next base address. 6535 BP = RefAddr; 6536 } 6537 6538 BasePointers.push_back(BP); 6539 Pointers.push_back(LB); 6540 Sizes.push_back(Size); 6541 6542 // We need to add a pointer flag for each map that comes from the 6543 // same expression except for the first one. We also need to signal 6544 // this map is the first one that relates with the current capture 6545 // (there is a set of entries for each capture). 6546 Types.push_back(DefaultFlags | getMapTypeBits(MapType, MapTypeModifier, 6547 !IsExpressionFirstInfo, 6548 IsCaptureFirstInfo)); 6549 6550 // If we have a final array section, we are done with this expression. 6551 if (IsFinalArraySection) 6552 break; 6553 6554 // The pointer becomes the base for the next element. 6555 if (Next != CE) 6556 BP = LB; 6557 6558 IsExpressionFirstInfo = false; 6559 IsCaptureFirstInfo = false; 6560 } 6561 } 6562 } 6563 6564 /// \brief Return the adjusted map modifiers if the declaration a capture 6565 /// refers to appears in a first-private clause. This is expected to be used 6566 /// only with directives that start with 'target'. 6567 unsigned adjustMapModifiersForPrivateClauses(const CapturedStmt::Capture &Cap, 6568 unsigned CurrentModifiers) { 6569 assert(Cap.capturesVariable() && "Expected capture by reference only!"); 6570 6571 // A first private variable captured by reference will use only the 6572 // 'private ptr' and 'map to' flag. Return the right flags if the captured 6573 // declaration is known as first-private in this handler. 6574 if (FirstPrivateDecls.count(Cap.getCapturedVar())) 6575 return MappableExprsHandler::OMP_MAP_PRIVATE | 6576 MappableExprsHandler::OMP_MAP_TO; 6577 // Reduction variable will use only the 'private ptr' and 'map to_from' 6578 // flag. 6579 if (ReductionDecls.count(Cap.getCapturedVar())) { 6580 return MappableExprsHandler::OMP_MAP_TO | 6581 MappableExprsHandler::OMP_MAP_FROM; 6582 } 6583 6584 // We didn't modify anything. 6585 return CurrentModifiers; 6586 } 6587 6588 public: 6589 MappableExprsHandler(const OMPExecutableDirective &Dir, CodeGenFunction &CGF) 6590 : CurDir(Dir), CGF(CGF) { 6591 // Extract firstprivate clause information. 6592 for (const auto *C : Dir.getClausesOfKind<OMPFirstprivateClause>()) 6593 for (const auto *D : C->varlists()) 6594 FirstPrivateDecls.insert( 6595 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl()); 6596 for (const auto *C : Dir.getClausesOfKind<OMPReductionClause>()) { 6597 for (const auto *D : C->varlists()) { 6598 ReductionDecls.insert( 6599 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl()); 6600 } 6601 } 6602 // Extract device pointer clause information. 6603 for (const auto *C : Dir.getClausesOfKind<OMPIsDevicePtrClause>()) 6604 for (auto L : C->component_lists()) 6605 DevPointersMap[L.first].push_back(L.second); 6606 } 6607 6608 /// \brief Generate all the base pointers, section pointers, sizes and map 6609 /// types for the extracted mappable expressions. Also, for each item that 6610 /// relates with a device pointer, a pair of the relevant declaration and 6611 /// index where it occurs is appended to the device pointers info array. 6612 void generateAllInfo(MapBaseValuesArrayTy &BasePointers, 6613 MapValuesArrayTy &Pointers, MapValuesArrayTy &Sizes, 6614 MapFlagsArrayTy &Types) const { 6615 BasePointers.clear(); 6616 Pointers.clear(); 6617 Sizes.clear(); 6618 Types.clear(); 6619 6620 struct MapInfo { 6621 /// Kind that defines how a device pointer has to be returned. 6622 enum ReturnPointerKind { 6623 // Don't have to return any pointer. 6624 RPK_None, 6625 // Pointer is the base of the declaration. 6626 RPK_Base, 6627 // Pointer is a member of the base declaration - 'this' 6628 RPK_Member, 6629 // Pointer is a reference and a member of the base declaration - 'this' 6630 RPK_MemberReference, 6631 }; 6632 OMPClauseMappableExprCommon::MappableExprComponentListRef Components; 6633 OpenMPMapClauseKind MapType = OMPC_MAP_unknown; 6634 OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown; 6635 ReturnPointerKind ReturnDevicePointer = RPK_None; 6636 bool IsImplicit = false; 6637 6638 MapInfo() = default; 6639 MapInfo( 6640 OMPClauseMappableExprCommon::MappableExprComponentListRef Components, 6641 OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapTypeModifier, 6642 ReturnPointerKind ReturnDevicePointer, bool IsImplicit) 6643 : Components(Components), MapType(MapType), 6644 MapTypeModifier(MapTypeModifier), 6645 ReturnDevicePointer(ReturnDevicePointer), IsImplicit(IsImplicit) {} 6646 }; 6647 6648 // We have to process the component lists that relate with the same 6649 // declaration in a single chunk so that we can generate the map flags 6650 // correctly. Therefore, we organize all lists in a map. 6651 llvm::MapVector<const ValueDecl *, SmallVector<MapInfo, 8>> Info; 6652 6653 // Helper function to fill the information map for the different supported 6654 // clauses. 6655 auto &&InfoGen = [&Info]( 6656 const ValueDecl *D, 6657 OMPClauseMappableExprCommon::MappableExprComponentListRef L, 6658 OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapModifier, 6659 MapInfo::ReturnPointerKind ReturnDevicePointer, bool IsImplicit) { 6660 const ValueDecl *VD = 6661 D ? cast<ValueDecl>(D->getCanonicalDecl()) : nullptr; 6662 Info[VD].emplace_back(L, MapType, MapModifier, ReturnDevicePointer, 6663 IsImplicit); 6664 }; 6665 6666 // FIXME: MSVC 2013 seems to require this-> to find member CurDir. 6667 for (auto *C : this->CurDir.getClausesOfKind<OMPMapClause>()) 6668 for (auto L : C->component_lists()) { 6669 InfoGen(L.first, L.second, C->getMapType(), C->getMapTypeModifier(), 6670 MapInfo::RPK_None, C->isImplicit()); 6671 } 6672 for (auto *C : this->CurDir.getClausesOfKind<OMPToClause>()) 6673 for (auto L : C->component_lists()) { 6674 InfoGen(L.first, L.second, OMPC_MAP_to, OMPC_MAP_unknown, 6675 MapInfo::RPK_None, C->isImplicit()); 6676 } 6677 for (auto *C : this->CurDir.getClausesOfKind<OMPFromClause>()) 6678 for (auto L : C->component_lists()) { 6679 InfoGen(L.first, L.second, OMPC_MAP_from, OMPC_MAP_unknown, 6680 MapInfo::RPK_None, C->isImplicit()); 6681 } 6682 6683 // Look at the use_device_ptr clause information and mark the existing map 6684 // entries as such. If there is no map information for an entry in the 6685 // use_device_ptr list, we create one with map type 'alloc' and zero size 6686 // section. It is the user fault if that was not mapped before. 6687 // FIXME: MSVC 2013 seems to require this-> to find member CurDir. 6688 for (auto *C : this->CurDir.getClausesOfKind<OMPUseDevicePtrClause>()) 6689 for (auto L : C->component_lists()) { 6690 assert(!L.second.empty() && "Not expecting empty list of components!"); 6691 const ValueDecl *VD = L.second.back().getAssociatedDeclaration(); 6692 VD = cast<ValueDecl>(VD->getCanonicalDecl()); 6693 auto *IE = L.second.back().getAssociatedExpression(); 6694 // If the first component is a member expression, we have to look into 6695 // 'this', which maps to null in the map of map information. Otherwise 6696 // look directly for the information. 6697 auto It = Info.find(isa<MemberExpr>(IE) ? nullptr : VD); 6698 6699 // We potentially have map information for this declaration already. 6700 // Look for the first set of components that refer to it. 6701 if (It != Info.end()) { 6702 auto CI = std::find_if( 6703 It->second.begin(), It->second.end(), [VD](const MapInfo &MI) { 6704 return MI.Components.back().getAssociatedDeclaration() == VD; 6705 }); 6706 // If we found a map entry, signal that the pointer has to be returned 6707 // and move on to the next declaration. 6708 if (CI != It->second.end()) { 6709 CI->ReturnDevicePointer = isa<MemberExpr>(IE) 6710 ? (VD->getType()->isReferenceType() 6711 ? MapInfo::RPK_MemberReference 6712 : MapInfo::RPK_Member) 6713 : MapInfo::RPK_Base; 6714 continue; 6715 } 6716 } 6717 6718 // We didn't find any match in our map information - generate a zero 6719 // size array section. 6720 // FIXME: MSVC 2013 seems to require this-> to find member CGF. 6721 llvm::Value *Ptr = 6722 this->CGF 6723 .EmitLoadOfLValue(this->CGF.EmitLValue(IE), SourceLocation()) 6724 .getScalarVal(); 6725 BasePointers.push_back({Ptr, VD}); 6726 Pointers.push_back(Ptr); 6727 Sizes.push_back(llvm::Constant::getNullValue(this->CGF.SizeTy)); 6728 Types.push_back(OMP_MAP_RETURN_PARAM | OMP_MAP_TARGET_PARAM); 6729 } 6730 6731 for (auto &M : Info) { 6732 // We need to know when we generate information for the first component 6733 // associated with a capture, because the mapping flags depend on it. 6734 bool IsFirstComponentList = true; 6735 for (MapInfo &L : M.second) { 6736 assert(!L.Components.empty() && 6737 "Not expecting declaration with no component lists."); 6738 6739 // Remember the current base pointer index. 6740 unsigned CurrentBasePointersIdx = BasePointers.size(); 6741 // FIXME: MSVC 2013 seems to require this-> to find the member method. 6742 this->generateInfoForComponentList( 6743 L.MapType, L.MapTypeModifier, L.Components, BasePointers, Pointers, 6744 Sizes, Types, IsFirstComponentList, L.IsImplicit); 6745 6746 // If this entry relates with a device pointer, set the relevant 6747 // declaration and add the 'return pointer' flag. 6748 if (IsFirstComponentList && 6749 L.ReturnDevicePointer != MapInfo::RPK_None) { 6750 // If the pointer is not the base of the map, we need to skip the 6751 // base. If it is a reference in a member field, we also need to skip 6752 // the map of the reference. 6753 if (L.ReturnDevicePointer != MapInfo::RPK_Base) { 6754 ++CurrentBasePointersIdx; 6755 if (L.ReturnDevicePointer == MapInfo::RPK_MemberReference) 6756 ++CurrentBasePointersIdx; 6757 } 6758 assert(BasePointers.size() > CurrentBasePointersIdx && 6759 "Unexpected number of mapped base pointers."); 6760 6761 auto *RelevantVD = L.Components.back().getAssociatedDeclaration(); 6762 assert(RelevantVD && 6763 "No relevant declaration related with device pointer??"); 6764 6765 BasePointers[CurrentBasePointersIdx].setDevicePtrDecl(RelevantVD); 6766 Types[CurrentBasePointersIdx] |= OMP_MAP_RETURN_PARAM; 6767 } 6768 IsFirstComponentList = false; 6769 } 6770 } 6771 } 6772 6773 /// \brief Generate the base pointers, section pointers, sizes and map types 6774 /// associated to a given capture. 6775 void generateInfoForCapture(const CapturedStmt::Capture *Cap, 6776 llvm::Value *Arg, 6777 MapBaseValuesArrayTy &BasePointers, 6778 MapValuesArrayTy &Pointers, 6779 MapValuesArrayTy &Sizes, 6780 MapFlagsArrayTy &Types) const { 6781 assert(!Cap->capturesVariableArrayType() && 6782 "Not expecting to generate map info for a variable array type!"); 6783 6784 BasePointers.clear(); 6785 Pointers.clear(); 6786 Sizes.clear(); 6787 Types.clear(); 6788 6789 // We need to know when we generating information for the first component 6790 // associated with a capture, because the mapping flags depend on it. 6791 bool IsFirstComponentList = true; 6792 6793 const ValueDecl *VD = 6794 Cap->capturesThis() 6795 ? nullptr 6796 : cast<ValueDecl>(Cap->getCapturedVar()->getCanonicalDecl()); 6797 6798 // If this declaration appears in a is_device_ptr clause we just have to 6799 // pass the pointer by value. If it is a reference to a declaration, we just 6800 // pass its value, otherwise, if it is a member expression, we need to map 6801 // 'to' the field. 6802 if (!VD) { 6803 auto It = DevPointersMap.find(VD); 6804 if (It != DevPointersMap.end()) { 6805 for (auto L : It->second) { 6806 generateInfoForComponentList( 6807 /*MapType=*/OMPC_MAP_to, /*MapTypeModifier=*/OMPC_MAP_unknown, L, 6808 BasePointers, Pointers, Sizes, Types, IsFirstComponentList, 6809 /*IsImplicit=*/false); 6810 IsFirstComponentList = false; 6811 } 6812 return; 6813 } 6814 } else if (DevPointersMap.count(VD)) { 6815 BasePointers.push_back({Arg, VD}); 6816 Pointers.push_back(Arg); 6817 Sizes.push_back(CGF.getTypeSize(CGF.getContext().VoidPtrTy)); 6818 Types.push_back(OMP_MAP_LITERAL | OMP_MAP_TARGET_PARAM); 6819 return; 6820 } 6821 6822 // FIXME: MSVC 2013 seems to require this-> to find member CurDir. 6823 for (auto *C : this->CurDir.getClausesOfKind<OMPMapClause>()) 6824 for (auto L : C->decl_component_lists(VD)) { 6825 assert(L.first == VD && 6826 "We got information for the wrong declaration??"); 6827 assert(!L.second.empty() && 6828 "Not expecting declaration with no component lists."); 6829 generateInfoForComponentList( 6830 C->getMapType(), C->getMapTypeModifier(), L.second, BasePointers, 6831 Pointers, Sizes, Types, IsFirstComponentList, C->isImplicit()); 6832 IsFirstComponentList = false; 6833 } 6834 6835 return; 6836 } 6837 6838 /// \brief Generate the default map information for a given capture \a CI, 6839 /// record field declaration \a RI and captured value \a CV. 6840 void generateDefaultMapInfo(const CapturedStmt::Capture &CI, 6841 const FieldDecl &RI, llvm::Value *CV, 6842 MapBaseValuesArrayTy &CurBasePointers, 6843 MapValuesArrayTy &CurPointers, 6844 MapValuesArrayTy &CurSizes, 6845 MapFlagsArrayTy &CurMapTypes) { 6846 6847 // Do the default mapping. 6848 if (CI.capturesThis()) { 6849 CurBasePointers.push_back(CV); 6850 CurPointers.push_back(CV); 6851 const PointerType *PtrTy = cast<PointerType>(RI.getType().getTypePtr()); 6852 CurSizes.push_back(CGF.getTypeSize(PtrTy->getPointeeType())); 6853 // Default map type. 6854 CurMapTypes.push_back(OMP_MAP_TO | OMP_MAP_FROM); 6855 } else if (CI.capturesVariableByCopy()) { 6856 CurBasePointers.push_back(CV); 6857 CurPointers.push_back(CV); 6858 if (!RI.getType()->isAnyPointerType()) { 6859 // We have to signal to the runtime captures passed by value that are 6860 // not pointers. 6861 CurMapTypes.push_back(OMP_MAP_LITERAL); 6862 CurSizes.push_back(CGF.getTypeSize(RI.getType())); 6863 } else { 6864 // Pointers are implicitly mapped with a zero size and no flags 6865 // (other than first map that is added for all implicit maps). 6866 CurMapTypes.push_back(0u); 6867 CurSizes.push_back(llvm::Constant::getNullValue(CGF.SizeTy)); 6868 } 6869 } else { 6870 assert(CI.capturesVariable() && "Expected captured reference."); 6871 CurBasePointers.push_back(CV); 6872 CurPointers.push_back(CV); 6873 6874 const ReferenceType *PtrTy = 6875 cast<ReferenceType>(RI.getType().getTypePtr()); 6876 QualType ElementType = PtrTy->getPointeeType(); 6877 CurSizes.push_back(CGF.getTypeSize(ElementType)); 6878 // The default map type for a scalar/complex type is 'to' because by 6879 // default the value doesn't have to be retrieved. For an aggregate 6880 // type, the default is 'tofrom'. 6881 CurMapTypes.emplace_back(adjustMapModifiersForPrivateClauses( 6882 CI, ElementType->isAggregateType() ? (OMP_MAP_TO | OMP_MAP_FROM) 6883 : OMP_MAP_TO)); 6884 } 6885 // Every default map produces a single argument which is a target parameter. 6886 CurMapTypes.back() |= OMP_MAP_TARGET_PARAM; 6887 } 6888 }; 6889 6890 enum OpenMPOffloadingReservedDeviceIDs { 6891 /// \brief Device ID if the device was not defined, runtime should get it 6892 /// from environment variables in the spec. 6893 OMP_DEVICEID_UNDEF = -1, 6894 }; 6895 } // anonymous namespace 6896 6897 /// \brief Emit the arrays used to pass the captures and map information to the 6898 /// offloading runtime library. If there is no map or capture information, 6899 /// return nullptr by reference. 6900 static void 6901 emitOffloadingArrays(CodeGenFunction &CGF, 6902 MappableExprsHandler::MapBaseValuesArrayTy &BasePointers, 6903 MappableExprsHandler::MapValuesArrayTy &Pointers, 6904 MappableExprsHandler::MapValuesArrayTy &Sizes, 6905 MappableExprsHandler::MapFlagsArrayTy &MapTypes, 6906 CGOpenMPRuntime::TargetDataInfo &Info) { 6907 auto &CGM = CGF.CGM; 6908 auto &Ctx = CGF.getContext(); 6909 6910 // Reset the array information. 6911 Info.clearArrayInfo(); 6912 Info.NumberOfPtrs = BasePointers.size(); 6913 6914 if (Info.NumberOfPtrs) { 6915 // Detect if we have any capture size requiring runtime evaluation of the 6916 // size so that a constant array could be eventually used. 6917 bool hasRuntimeEvaluationCaptureSize = false; 6918 for (auto *S : Sizes) 6919 if (!isa<llvm::Constant>(S)) { 6920 hasRuntimeEvaluationCaptureSize = true; 6921 break; 6922 } 6923 6924 llvm::APInt PointerNumAP(32, Info.NumberOfPtrs, /*isSigned=*/true); 6925 QualType PointerArrayType = 6926 Ctx.getConstantArrayType(Ctx.VoidPtrTy, PointerNumAP, ArrayType::Normal, 6927 /*IndexTypeQuals=*/0); 6928 6929 Info.BasePointersArray = 6930 CGF.CreateMemTemp(PointerArrayType, ".offload_baseptrs").getPointer(); 6931 Info.PointersArray = 6932 CGF.CreateMemTemp(PointerArrayType, ".offload_ptrs").getPointer(); 6933 6934 // If we don't have any VLA types or other types that require runtime 6935 // evaluation, we can use a constant array for the map sizes, otherwise we 6936 // need to fill up the arrays as we do for the pointers. 6937 if (hasRuntimeEvaluationCaptureSize) { 6938 QualType SizeArrayType = Ctx.getConstantArrayType( 6939 Ctx.getSizeType(), PointerNumAP, ArrayType::Normal, 6940 /*IndexTypeQuals=*/0); 6941 Info.SizesArray = 6942 CGF.CreateMemTemp(SizeArrayType, ".offload_sizes").getPointer(); 6943 } else { 6944 // We expect all the sizes to be constant, so we collect them to create 6945 // a constant array. 6946 SmallVector<llvm::Constant *, 16> ConstSizes; 6947 for (auto S : Sizes) 6948 ConstSizes.push_back(cast<llvm::Constant>(S)); 6949 6950 auto *SizesArrayInit = llvm::ConstantArray::get( 6951 llvm::ArrayType::get(CGM.SizeTy, ConstSizes.size()), ConstSizes); 6952 auto *SizesArrayGbl = new llvm::GlobalVariable( 6953 CGM.getModule(), SizesArrayInit->getType(), 6954 /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, 6955 SizesArrayInit, ".offload_sizes"); 6956 SizesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 6957 Info.SizesArray = SizesArrayGbl; 6958 } 6959 6960 // The map types are always constant so we don't need to generate code to 6961 // fill arrays. Instead, we create an array constant. 6962 llvm::Constant *MapTypesArrayInit = 6963 llvm::ConstantDataArray::get(CGF.Builder.getContext(), MapTypes); 6964 auto *MapTypesArrayGbl = new llvm::GlobalVariable( 6965 CGM.getModule(), MapTypesArrayInit->getType(), 6966 /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, 6967 MapTypesArrayInit, ".offload_maptypes"); 6968 MapTypesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 6969 Info.MapTypesArray = MapTypesArrayGbl; 6970 6971 for (unsigned i = 0; i < Info.NumberOfPtrs; ++i) { 6972 llvm::Value *BPVal = *BasePointers[i]; 6973 llvm::Value *BP = CGF.Builder.CreateConstInBoundsGEP2_32( 6974 llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs), 6975 Info.BasePointersArray, 0, i); 6976 BP = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 6977 BP, BPVal->getType()->getPointerTo(/*AddrSpace=*/0)); 6978 Address BPAddr(BP, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy)); 6979 CGF.Builder.CreateStore(BPVal, BPAddr); 6980 6981 if (Info.requiresDevicePointerInfo()) 6982 if (auto *DevVD = BasePointers[i].getDevicePtrDecl()) 6983 Info.CaptureDeviceAddrMap.insert(std::make_pair(DevVD, BPAddr)); 6984 6985 llvm::Value *PVal = Pointers[i]; 6986 llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32( 6987 llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs), 6988 Info.PointersArray, 0, i); 6989 P = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 6990 P, PVal->getType()->getPointerTo(/*AddrSpace=*/0)); 6991 Address PAddr(P, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy)); 6992 CGF.Builder.CreateStore(PVal, PAddr); 6993 6994 if (hasRuntimeEvaluationCaptureSize) { 6995 llvm::Value *S = CGF.Builder.CreateConstInBoundsGEP2_32( 6996 llvm::ArrayType::get(CGM.SizeTy, Info.NumberOfPtrs), 6997 Info.SizesArray, 6998 /*Idx0=*/0, 6999 /*Idx1=*/i); 7000 Address SAddr(S, Ctx.getTypeAlignInChars(Ctx.getSizeType())); 7001 CGF.Builder.CreateStore( 7002 CGF.Builder.CreateIntCast(Sizes[i], CGM.SizeTy, /*isSigned=*/true), 7003 SAddr); 7004 } 7005 } 7006 } 7007 } 7008 /// \brief Emit the arguments to be passed to the runtime library based on the 7009 /// arrays of pointers, sizes and map types. 7010 static void emitOffloadingArraysArgument( 7011 CodeGenFunction &CGF, llvm::Value *&BasePointersArrayArg, 7012 llvm::Value *&PointersArrayArg, llvm::Value *&SizesArrayArg, 7013 llvm::Value *&MapTypesArrayArg, CGOpenMPRuntime::TargetDataInfo &Info) { 7014 auto &CGM = CGF.CGM; 7015 if (Info.NumberOfPtrs) { 7016 BasePointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32( 7017 llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs), 7018 Info.BasePointersArray, 7019 /*Idx0=*/0, /*Idx1=*/0); 7020 PointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32( 7021 llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs), 7022 Info.PointersArray, 7023 /*Idx0=*/0, 7024 /*Idx1=*/0); 7025 SizesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32( 7026 llvm::ArrayType::get(CGM.SizeTy, Info.NumberOfPtrs), Info.SizesArray, 7027 /*Idx0=*/0, /*Idx1=*/0); 7028 MapTypesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32( 7029 llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs), 7030 Info.MapTypesArray, 7031 /*Idx0=*/0, 7032 /*Idx1=*/0); 7033 } else { 7034 BasePointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy); 7035 PointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy); 7036 SizesArrayArg = llvm::ConstantPointerNull::get(CGM.SizeTy->getPointerTo()); 7037 MapTypesArrayArg = 7038 llvm::ConstantPointerNull::get(CGM.Int64Ty->getPointerTo()); 7039 } 7040 } 7041 7042 void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF, 7043 const OMPExecutableDirective &D, 7044 llvm::Value *OutlinedFn, 7045 llvm::Value *OutlinedFnID, 7046 const Expr *IfCond, const Expr *Device) { 7047 if (!CGF.HaveInsertPoint()) 7048 return; 7049 7050 assert(OutlinedFn && "Invalid outlined function!"); 7051 7052 const bool RequiresOuterTask = D.hasClausesOfKind<OMPDependClause>(); 7053 llvm::SmallVector<llvm::Value *, 16> CapturedVars; 7054 const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target); 7055 auto &&ArgsCodegen = [&CS, &CapturedVars](CodeGenFunction &CGF, 7056 PrePostActionTy &) { 7057 CGF.GenerateOpenMPCapturedVars(CS, CapturedVars); 7058 }; 7059 emitInlinedDirective(CGF, OMPD_unknown, ArgsCodegen); 7060 7061 CodeGenFunction::OMPTargetDataInfo InputInfo; 7062 llvm::Value *MapTypesArray = nullptr; 7063 // Fill up the pointer arrays and transfer execution to the device. 7064 auto &&ThenGen = [this, Device, OutlinedFn, OutlinedFnID, &D, &InputInfo, 7065 &MapTypesArray, &CS, RequiresOuterTask, 7066 &CapturedVars](CodeGenFunction &CGF, PrePostActionTy &) { 7067 // On top of the arrays that were filled up, the target offloading call 7068 // takes as arguments the device id as well as the host pointer. The host 7069 // pointer is used by the runtime library to identify the current target 7070 // region, so it only has to be unique and not necessarily point to 7071 // anything. It could be the pointer to the outlined function that 7072 // implements the target region, but we aren't using that so that the 7073 // compiler doesn't need to keep that, and could therefore inline the host 7074 // function if proven worthwhile during optimization. 7075 7076 // From this point on, we need to have an ID of the target region defined. 7077 assert(OutlinedFnID && "Invalid outlined function ID!"); 7078 7079 // Emit device ID if any. 7080 llvm::Value *DeviceID; 7081 if (Device) { 7082 DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device), 7083 CGF.Int64Ty, /*isSigned=*/true); 7084 } else { 7085 DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF); 7086 } 7087 7088 // Emit the number of elements in the offloading arrays. 7089 llvm::Value *PointerNum = 7090 CGF.Builder.getInt32(InputInfo.NumberOfTargetItems); 7091 7092 // Return value of the runtime offloading call. 7093 llvm::Value *Return; 7094 7095 auto *NumTeams = emitNumTeamsForTargetDirective(*this, CGF, D); 7096 auto *NumThreads = emitNumThreadsForTargetDirective(*this, CGF, D); 7097 7098 bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>(); 7099 // The target region is an outlined function launched by the runtime 7100 // via calls __tgt_target() or __tgt_target_teams(). 7101 // 7102 // __tgt_target() launches a target region with one team and one thread, 7103 // executing a serial region. This master thread may in turn launch 7104 // more threads within its team upon encountering a parallel region, 7105 // however, no additional teams can be launched on the device. 7106 // 7107 // __tgt_target_teams() launches a target region with one or more teams, 7108 // each with one or more threads. This call is required for target 7109 // constructs such as: 7110 // 'target teams' 7111 // 'target' / 'teams' 7112 // 'target teams distribute parallel for' 7113 // 'target parallel' 7114 // and so on. 7115 // 7116 // Note that on the host and CPU targets, the runtime implementation of 7117 // these calls simply call the outlined function without forking threads. 7118 // The outlined functions themselves have runtime calls to 7119 // __kmpc_fork_teams() and __kmpc_fork() for this purpose, codegen'd by 7120 // the compiler in emitTeamsCall() and emitParallelCall(). 7121 // 7122 // In contrast, on the NVPTX target, the implementation of 7123 // __tgt_target_teams() launches a GPU kernel with the requested number 7124 // of teams and threads so no additional calls to the runtime are required. 7125 if (NumTeams) { 7126 // If we have NumTeams defined this means that we have an enclosed teams 7127 // region. Therefore we also expect to have NumThreads defined. These two 7128 // values should be defined in the presence of a teams directive, 7129 // regardless of having any clauses associated. If the user is using teams 7130 // but no clauses, these two values will be the default that should be 7131 // passed to the runtime library - a 32-bit integer with the value zero. 7132 assert(NumThreads && "Thread limit expression should be available along " 7133 "with number of teams."); 7134 llvm::Value *OffloadingArgs[] = {DeviceID, 7135 OutlinedFnID, 7136 PointerNum, 7137 InputInfo.BasePointersArray.getPointer(), 7138 InputInfo.PointersArray.getPointer(), 7139 InputInfo.SizesArray.getPointer(), 7140 MapTypesArray, 7141 NumTeams, 7142 NumThreads}; 7143 Return = CGF.EmitRuntimeCall( 7144 createRuntimeFunction(HasNowait ? OMPRTL__tgt_target_teams_nowait 7145 : OMPRTL__tgt_target_teams), 7146 OffloadingArgs); 7147 } else { 7148 llvm::Value *OffloadingArgs[] = {DeviceID, 7149 OutlinedFnID, 7150 PointerNum, 7151 InputInfo.BasePointersArray.getPointer(), 7152 InputInfo.PointersArray.getPointer(), 7153 InputInfo.SizesArray.getPointer(), 7154 MapTypesArray}; 7155 Return = CGF.EmitRuntimeCall( 7156 createRuntimeFunction(HasNowait ? OMPRTL__tgt_target_nowait 7157 : OMPRTL__tgt_target), 7158 OffloadingArgs); 7159 } 7160 7161 // Check the error code and execute the host version if required. 7162 llvm::BasicBlock *OffloadFailedBlock = 7163 CGF.createBasicBlock("omp_offload.failed"); 7164 llvm::BasicBlock *OffloadContBlock = 7165 CGF.createBasicBlock("omp_offload.cont"); 7166 llvm::Value *Failed = CGF.Builder.CreateIsNotNull(Return); 7167 CGF.Builder.CreateCondBr(Failed, OffloadFailedBlock, OffloadContBlock); 7168 7169 CGF.EmitBlock(OffloadFailedBlock); 7170 if (RequiresOuterTask) { 7171 CapturedVars.clear(); 7172 CGF.GenerateOpenMPCapturedVars(CS, CapturedVars); 7173 } 7174 emitOutlinedFunctionCall(CGF, D.getLocStart(), OutlinedFn, CapturedVars); 7175 CGF.EmitBranch(OffloadContBlock); 7176 7177 CGF.EmitBlock(OffloadContBlock, /*IsFinished=*/true); 7178 }; 7179 7180 // Notify that the host version must be executed. 7181 auto &&ElseGen = [this, &D, OutlinedFn, &CS, &CapturedVars, 7182 RequiresOuterTask](CodeGenFunction &CGF, 7183 PrePostActionTy &) { 7184 if (RequiresOuterTask) { 7185 CapturedVars.clear(); 7186 CGF.GenerateOpenMPCapturedVars(CS, CapturedVars); 7187 } 7188 emitOutlinedFunctionCall(CGF, D.getLocStart(), OutlinedFn, CapturedVars); 7189 }; 7190 7191 auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray, 7192 &CapturedVars, RequiresOuterTask, 7193 &CS](CodeGenFunction &CGF, PrePostActionTy &) { 7194 // Fill up the arrays with all the captured variables. 7195 MappableExprsHandler::MapBaseValuesArrayTy BasePointers; 7196 MappableExprsHandler::MapValuesArrayTy Pointers; 7197 MappableExprsHandler::MapValuesArrayTy Sizes; 7198 MappableExprsHandler::MapFlagsArrayTy MapTypes; 7199 7200 MappableExprsHandler::MapBaseValuesArrayTy CurBasePointers; 7201 MappableExprsHandler::MapValuesArrayTy CurPointers; 7202 MappableExprsHandler::MapValuesArrayTy CurSizes; 7203 MappableExprsHandler::MapFlagsArrayTy CurMapTypes; 7204 7205 // Get mappable expression information. 7206 MappableExprsHandler MEHandler(D, CGF); 7207 7208 auto RI = CS.getCapturedRecordDecl()->field_begin(); 7209 auto CV = CapturedVars.begin(); 7210 for (CapturedStmt::const_capture_iterator CI = CS.capture_begin(), 7211 CE = CS.capture_end(); 7212 CI != CE; ++CI, ++RI, ++CV) { 7213 CurBasePointers.clear(); 7214 CurPointers.clear(); 7215 CurSizes.clear(); 7216 CurMapTypes.clear(); 7217 7218 // VLA sizes are passed to the outlined region by copy and do not have map 7219 // information associated. 7220 if (CI->capturesVariableArrayType()) { 7221 CurBasePointers.push_back(*CV); 7222 CurPointers.push_back(*CV); 7223 CurSizes.push_back(CGF.getTypeSize(RI->getType())); 7224 // Copy to the device as an argument. No need to retrieve it. 7225 CurMapTypes.push_back(MappableExprsHandler::OMP_MAP_LITERAL | 7226 MappableExprsHandler::OMP_MAP_TARGET_PARAM); 7227 } else { 7228 // If we have any information in the map clause, we use it, otherwise we 7229 // just do a default mapping. 7230 MEHandler.generateInfoForCapture(CI, *CV, CurBasePointers, CurPointers, 7231 CurSizes, CurMapTypes); 7232 if (CurBasePointers.empty()) 7233 MEHandler.generateDefaultMapInfo(*CI, **RI, *CV, CurBasePointers, 7234 CurPointers, CurSizes, CurMapTypes); 7235 } 7236 // We expect to have at least an element of information for this capture. 7237 assert(!CurBasePointers.empty() && 7238 "Non-existing map pointer for capture!"); 7239 assert(CurBasePointers.size() == CurPointers.size() && 7240 CurBasePointers.size() == CurSizes.size() && 7241 CurBasePointers.size() == CurMapTypes.size() && 7242 "Inconsistent map information sizes!"); 7243 7244 // We need to append the results of this capture to what we already have. 7245 BasePointers.append(CurBasePointers.begin(), CurBasePointers.end()); 7246 Pointers.append(CurPointers.begin(), CurPointers.end()); 7247 Sizes.append(CurSizes.begin(), CurSizes.end()); 7248 MapTypes.append(CurMapTypes.begin(), CurMapTypes.end()); 7249 } 7250 7251 TargetDataInfo Info; 7252 // Fill up the arrays and create the arguments. 7253 emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info); 7254 emitOffloadingArraysArgument(CGF, Info.BasePointersArray, 7255 Info.PointersArray, Info.SizesArray, 7256 Info.MapTypesArray, Info); 7257 InputInfo.NumberOfTargetItems = Info.NumberOfPtrs; 7258 InputInfo.BasePointersArray = 7259 Address(Info.BasePointersArray, CGM.getPointerAlign()); 7260 InputInfo.PointersArray = 7261 Address(Info.PointersArray, CGM.getPointerAlign()); 7262 InputInfo.SizesArray = Address(Info.SizesArray, CGM.getPointerAlign()); 7263 MapTypesArray = Info.MapTypesArray; 7264 if (RequiresOuterTask) 7265 CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo); 7266 else 7267 emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen); 7268 }; 7269 7270 auto &&TargetElseGen = [this, &ElseGen, &D, RequiresOuterTask]( 7271 CodeGenFunction &CGF, PrePostActionTy &) { 7272 if (RequiresOuterTask) { 7273 CodeGenFunction::OMPTargetDataInfo InputInfo; 7274 CGF.EmitOMPTargetTaskBasedDirective(D, ElseGen, InputInfo); 7275 } else { 7276 emitInlinedDirective(CGF, D.getDirectiveKind(), ElseGen); 7277 } 7278 }; 7279 7280 // If we have a target function ID it means that we need to support 7281 // offloading, otherwise, just execute on the host. We need to execute on host 7282 // regardless of the conditional in the if clause if, e.g., the user do not 7283 // specify target triples. 7284 if (OutlinedFnID) { 7285 if (IfCond) { 7286 emitOMPIfClause(CGF, IfCond, TargetThenGen, TargetElseGen); 7287 } else { 7288 RegionCodeGenTy ThenRCG(TargetThenGen); 7289 ThenRCG(CGF); 7290 } 7291 } else { 7292 RegionCodeGenTy ElseRCG(TargetElseGen); 7293 ElseRCG(CGF); 7294 } 7295 } 7296 7297 void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S, 7298 StringRef ParentName) { 7299 if (!S) 7300 return; 7301 7302 // Codegen OMP target directives that offload compute to the device. 7303 bool requiresDeviceCodegen = 7304 isa<OMPExecutableDirective>(S) && 7305 isOpenMPTargetExecutionDirective( 7306 cast<OMPExecutableDirective>(S)->getDirectiveKind()); 7307 7308 if (requiresDeviceCodegen) { 7309 auto &E = *cast<OMPExecutableDirective>(S); 7310 unsigned DeviceID; 7311 unsigned FileID; 7312 unsigned Line; 7313 getTargetEntryUniqueInfo(CGM.getContext(), E.getLocStart(), DeviceID, 7314 FileID, Line); 7315 7316 // Is this a target region that should not be emitted as an entry point? If 7317 // so just signal we are done with this target region. 7318 if (!OffloadEntriesInfoManager.hasTargetRegionEntryInfo(DeviceID, FileID, 7319 ParentName, Line)) 7320 return; 7321 7322 switch (S->getStmtClass()) { 7323 case Stmt::OMPTargetDirectiveClass: 7324 CodeGenFunction::EmitOMPTargetDeviceFunction( 7325 CGM, ParentName, cast<OMPTargetDirective>(*S)); 7326 break; 7327 case Stmt::OMPTargetParallelDirectiveClass: 7328 CodeGenFunction::EmitOMPTargetParallelDeviceFunction( 7329 CGM, ParentName, cast<OMPTargetParallelDirective>(*S)); 7330 break; 7331 case Stmt::OMPTargetTeamsDirectiveClass: 7332 CodeGenFunction::EmitOMPTargetTeamsDeviceFunction( 7333 CGM, ParentName, cast<OMPTargetTeamsDirective>(*S)); 7334 break; 7335 case Stmt::OMPTargetTeamsDistributeDirectiveClass: 7336 CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction( 7337 CGM, ParentName, cast<OMPTargetTeamsDistributeDirective>(*S)); 7338 break; 7339 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass: 7340 CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction( 7341 CGM, ParentName, cast<OMPTargetTeamsDistributeSimdDirective>(*S)); 7342 break; 7343 case Stmt::OMPTargetParallelForDirectiveClass: 7344 CodeGenFunction::EmitOMPTargetParallelForDeviceFunction( 7345 CGM, ParentName, cast<OMPTargetParallelForDirective>(*S)); 7346 break; 7347 case Stmt::OMPTargetParallelForSimdDirectiveClass: 7348 CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction( 7349 CGM, ParentName, cast<OMPTargetParallelForSimdDirective>(*S)); 7350 break; 7351 case Stmt::OMPTargetSimdDirectiveClass: 7352 CodeGenFunction::EmitOMPTargetSimdDeviceFunction( 7353 CGM, ParentName, cast<OMPTargetSimdDirective>(*S)); 7354 break; 7355 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass: 7356 CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction( 7357 CGM, ParentName, 7358 cast<OMPTargetTeamsDistributeParallelForDirective>(*S)); 7359 break; 7360 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass: 7361 CodeGenFunction:: 7362 EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction( 7363 CGM, ParentName, 7364 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S)); 7365 break; 7366 default: 7367 llvm_unreachable("Unknown target directive for OpenMP device codegen."); 7368 } 7369 return; 7370 } 7371 7372 if (const OMPExecutableDirective *E = dyn_cast<OMPExecutableDirective>(S)) { 7373 if (!E->hasAssociatedStmt() || !E->getAssociatedStmt()) 7374 return; 7375 7376 scanForTargetRegionsFunctions( 7377 E->getInnermostCapturedStmt()->getCapturedStmt(), ParentName); 7378 return; 7379 } 7380 7381 // If this is a lambda function, look into its body. 7382 if (auto *L = dyn_cast<LambdaExpr>(S)) 7383 S = L->getBody(); 7384 7385 // Keep looking for target regions recursively. 7386 for (auto *II : S->children()) 7387 scanForTargetRegionsFunctions(II, ParentName); 7388 } 7389 7390 bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) { 7391 auto &FD = *cast<FunctionDecl>(GD.getDecl()); 7392 7393 // If emitting code for the host, we do not process FD here. Instead we do 7394 // the normal code generation. 7395 if (!CGM.getLangOpts().OpenMPIsDevice) 7396 return false; 7397 7398 // Try to detect target regions in the function. 7399 scanForTargetRegionsFunctions(FD.getBody(), CGM.getMangledName(GD)); 7400 7401 // We should not emit any function other that the ones created during the 7402 // scanning. Therefore, we signal that this function is completely dealt 7403 // with. 7404 return true; 7405 } 7406 7407 bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) { 7408 if (!CGM.getLangOpts().OpenMPIsDevice) 7409 return false; 7410 7411 // Check if there are Ctors/Dtors in this declaration and look for target 7412 // regions in it. We use the complete variant to produce the kernel name 7413 // mangling. 7414 QualType RDTy = cast<VarDecl>(GD.getDecl())->getType(); 7415 if (auto *RD = RDTy->getBaseElementTypeUnsafe()->getAsCXXRecordDecl()) { 7416 for (auto *Ctor : RD->ctors()) { 7417 StringRef ParentName = 7418 CGM.getMangledName(GlobalDecl(Ctor, Ctor_Complete)); 7419 scanForTargetRegionsFunctions(Ctor->getBody(), ParentName); 7420 } 7421 auto *Dtor = RD->getDestructor(); 7422 if (Dtor) { 7423 StringRef ParentName = 7424 CGM.getMangledName(GlobalDecl(Dtor, Dtor_Complete)); 7425 scanForTargetRegionsFunctions(Dtor->getBody(), ParentName); 7426 } 7427 } 7428 7429 // If we are in target mode, we do not emit any global (declare target is not 7430 // implemented yet). Therefore we signal that GD was processed in this case. 7431 return true; 7432 } 7433 7434 bool CGOpenMPRuntime::emitTargetGlobal(GlobalDecl GD) { 7435 auto *VD = GD.getDecl(); 7436 if (isa<FunctionDecl>(VD)) 7437 return emitTargetFunctions(GD); 7438 7439 return emitTargetGlobalVariable(GD); 7440 } 7441 7442 llvm::Function *CGOpenMPRuntime::emitRegistrationFunction() { 7443 // If we have offloading in the current module, we need to emit the entries 7444 // now and register the offloading descriptor. 7445 createOffloadEntriesAndInfoMetadata(); 7446 7447 // Create and register the offloading binary descriptors. This is the main 7448 // entity that captures all the information about offloading in the current 7449 // compilation unit. 7450 return createOffloadingBinaryDescriptorRegistration(); 7451 } 7452 7453 void CGOpenMPRuntime::emitTeamsCall(CodeGenFunction &CGF, 7454 const OMPExecutableDirective &D, 7455 SourceLocation Loc, 7456 llvm::Value *OutlinedFn, 7457 ArrayRef<llvm::Value *> CapturedVars) { 7458 if (!CGF.HaveInsertPoint()) 7459 return; 7460 7461 auto *RTLoc = emitUpdateLocation(CGF, Loc); 7462 CodeGenFunction::RunCleanupsScope Scope(CGF); 7463 7464 // Build call __kmpc_fork_teams(loc, n, microtask, var1, .., varn); 7465 llvm::Value *Args[] = { 7466 RTLoc, 7467 CGF.Builder.getInt32(CapturedVars.size()), // Number of captured vars 7468 CGF.Builder.CreateBitCast(OutlinedFn, getKmpc_MicroPointerTy())}; 7469 llvm::SmallVector<llvm::Value *, 16> RealArgs; 7470 RealArgs.append(std::begin(Args), std::end(Args)); 7471 RealArgs.append(CapturedVars.begin(), CapturedVars.end()); 7472 7473 auto RTLFn = createRuntimeFunction(OMPRTL__kmpc_fork_teams); 7474 CGF.EmitRuntimeCall(RTLFn, RealArgs); 7475 } 7476 7477 void CGOpenMPRuntime::emitNumTeamsClause(CodeGenFunction &CGF, 7478 const Expr *NumTeams, 7479 const Expr *ThreadLimit, 7480 SourceLocation Loc) { 7481 if (!CGF.HaveInsertPoint()) 7482 return; 7483 7484 auto *RTLoc = emitUpdateLocation(CGF, Loc); 7485 7486 llvm::Value *NumTeamsVal = 7487 (NumTeams) 7488 ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(NumTeams), 7489 CGF.CGM.Int32Ty, /* isSigned = */ true) 7490 : CGF.Builder.getInt32(0); 7491 7492 llvm::Value *ThreadLimitVal = 7493 (ThreadLimit) 7494 ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(ThreadLimit), 7495 CGF.CGM.Int32Ty, /* isSigned = */ true) 7496 : CGF.Builder.getInt32(0); 7497 7498 // Build call __kmpc_push_num_teamss(&loc, global_tid, num_teams, thread_limit) 7499 llvm::Value *PushNumTeamsArgs[] = {RTLoc, getThreadID(CGF, Loc), NumTeamsVal, 7500 ThreadLimitVal}; 7501 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__kmpc_push_num_teams), 7502 PushNumTeamsArgs); 7503 } 7504 7505 void CGOpenMPRuntime::emitTargetDataCalls( 7506 CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, 7507 const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) { 7508 if (!CGF.HaveInsertPoint()) 7509 return; 7510 7511 // Action used to replace the default codegen action and turn privatization 7512 // off. 7513 PrePostActionTy NoPrivAction; 7514 7515 // Generate the code for the opening of the data environment. Capture all the 7516 // arguments of the runtime call by reference because they are used in the 7517 // closing of the region. 7518 auto &&BeginThenGen = [this, &D, Device, &Info, 7519 &CodeGen](CodeGenFunction &CGF, PrePostActionTy &) { 7520 // Fill up the arrays with all the mapped variables. 7521 MappableExprsHandler::MapBaseValuesArrayTy BasePointers; 7522 MappableExprsHandler::MapValuesArrayTy Pointers; 7523 MappableExprsHandler::MapValuesArrayTy Sizes; 7524 MappableExprsHandler::MapFlagsArrayTy MapTypes; 7525 7526 // Get map clause information. 7527 MappableExprsHandler MCHandler(D, CGF); 7528 MCHandler.generateAllInfo(BasePointers, Pointers, Sizes, MapTypes); 7529 7530 // Fill up the arrays and create the arguments. 7531 emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info); 7532 7533 llvm::Value *BasePointersArrayArg = nullptr; 7534 llvm::Value *PointersArrayArg = nullptr; 7535 llvm::Value *SizesArrayArg = nullptr; 7536 llvm::Value *MapTypesArrayArg = nullptr; 7537 emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg, 7538 SizesArrayArg, MapTypesArrayArg, Info); 7539 7540 // Emit device ID if any. 7541 llvm::Value *DeviceID = nullptr; 7542 if (Device) { 7543 DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device), 7544 CGF.Int64Ty, /*isSigned=*/true); 7545 } else { 7546 DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF); 7547 } 7548 7549 // Emit the number of elements in the offloading arrays. 7550 auto *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs); 7551 7552 llvm::Value *OffloadingArgs[] = { 7553 DeviceID, PointerNum, BasePointersArrayArg, 7554 PointersArrayArg, SizesArrayArg, MapTypesArrayArg}; 7555 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_target_data_begin), 7556 OffloadingArgs); 7557 7558 // If device pointer privatization is required, emit the body of the region 7559 // here. It will have to be duplicated: with and without privatization. 7560 if (!Info.CaptureDeviceAddrMap.empty()) 7561 CodeGen(CGF); 7562 }; 7563 7564 // Generate code for the closing of the data region. 7565 auto &&EndThenGen = [this, Device, &Info](CodeGenFunction &CGF, 7566 PrePostActionTy &) { 7567 assert(Info.isValid() && "Invalid data environment closing arguments."); 7568 7569 llvm::Value *BasePointersArrayArg = nullptr; 7570 llvm::Value *PointersArrayArg = nullptr; 7571 llvm::Value *SizesArrayArg = nullptr; 7572 llvm::Value *MapTypesArrayArg = nullptr; 7573 emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg, 7574 SizesArrayArg, MapTypesArrayArg, Info); 7575 7576 // Emit device ID if any. 7577 llvm::Value *DeviceID = nullptr; 7578 if (Device) { 7579 DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device), 7580 CGF.Int64Ty, /*isSigned=*/true); 7581 } else { 7582 DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF); 7583 } 7584 7585 // Emit the number of elements in the offloading arrays. 7586 auto *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs); 7587 7588 llvm::Value *OffloadingArgs[] = { 7589 DeviceID, PointerNum, BasePointersArrayArg, 7590 PointersArrayArg, SizesArrayArg, MapTypesArrayArg}; 7591 CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_target_data_end), 7592 OffloadingArgs); 7593 }; 7594 7595 // If we need device pointer privatization, we need to emit the body of the 7596 // region with no privatization in the 'else' branch of the conditional. 7597 // Otherwise, we don't have to do anything. 7598 auto &&BeginElseGen = [&Info, &CodeGen, &NoPrivAction](CodeGenFunction &CGF, 7599 PrePostActionTy &) { 7600 if (!Info.CaptureDeviceAddrMap.empty()) { 7601 CodeGen.setAction(NoPrivAction); 7602 CodeGen(CGF); 7603 } 7604 }; 7605 7606 // We don't have to do anything to close the region if the if clause evaluates 7607 // to false. 7608 auto &&EndElseGen = [](CodeGenFunction &CGF, PrePostActionTy &) {}; 7609 7610 if (IfCond) { 7611 emitOMPIfClause(CGF, IfCond, BeginThenGen, BeginElseGen); 7612 } else { 7613 RegionCodeGenTy RCG(BeginThenGen); 7614 RCG(CGF); 7615 } 7616 7617 // If we don't require privatization of device pointers, we emit the body in 7618 // between the runtime calls. This avoids duplicating the body code. 7619 if (Info.CaptureDeviceAddrMap.empty()) { 7620 CodeGen.setAction(NoPrivAction); 7621 CodeGen(CGF); 7622 } 7623 7624 if (IfCond) { 7625 emitOMPIfClause(CGF, IfCond, EndThenGen, EndElseGen); 7626 } else { 7627 RegionCodeGenTy RCG(EndThenGen); 7628 RCG(CGF); 7629 } 7630 } 7631 7632 void CGOpenMPRuntime::emitTargetDataStandAloneCall( 7633 CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, 7634 const Expr *Device) { 7635 if (!CGF.HaveInsertPoint()) 7636 return; 7637 7638 assert((isa<OMPTargetEnterDataDirective>(D) || 7639 isa<OMPTargetExitDataDirective>(D) || 7640 isa<OMPTargetUpdateDirective>(D)) && 7641 "Expecting either target enter, exit data, or update directives."); 7642 7643 CodeGenFunction::OMPTargetDataInfo InputInfo; 7644 llvm::Value *MapTypesArray = nullptr; 7645 // Generate the code for the opening of the data environment. 7646 auto &&ThenGen = [this, &D, Device, &InputInfo, 7647 &MapTypesArray](CodeGenFunction &CGF, PrePostActionTy &) { 7648 // Emit device ID if any. 7649 llvm::Value *DeviceID = nullptr; 7650 if (Device) { 7651 DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device), 7652 CGF.Int64Ty, /*isSigned=*/true); 7653 } else { 7654 DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF); 7655 } 7656 7657 // Emit the number of elements in the offloading arrays. 7658 llvm::Constant *PointerNum = 7659 CGF.Builder.getInt32(InputInfo.NumberOfTargetItems); 7660 7661 llvm::Value *OffloadingArgs[] = {DeviceID, 7662 PointerNum, 7663 InputInfo.BasePointersArray.getPointer(), 7664 InputInfo.PointersArray.getPointer(), 7665 InputInfo.SizesArray.getPointer(), 7666 MapTypesArray}; 7667 7668 // Select the right runtime function call for each expected standalone 7669 // directive. 7670 const bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>(); 7671 OpenMPRTLFunction RTLFn; 7672 switch (D.getDirectiveKind()) { 7673 default: 7674 llvm_unreachable("Unexpected standalone target data directive."); 7675 break; 7676 case OMPD_target_enter_data: 7677 RTLFn = HasNowait ? OMPRTL__tgt_target_data_begin_nowait 7678 : OMPRTL__tgt_target_data_begin; 7679 break; 7680 case OMPD_target_exit_data: 7681 RTLFn = HasNowait ? OMPRTL__tgt_target_data_end_nowait 7682 : OMPRTL__tgt_target_data_end; 7683 break; 7684 case OMPD_target_update: 7685 RTLFn = HasNowait ? OMPRTL__tgt_target_data_update_nowait 7686 : OMPRTL__tgt_target_data_update; 7687 break; 7688 } 7689 CGF.EmitRuntimeCall(createRuntimeFunction(RTLFn), OffloadingArgs); 7690 }; 7691 7692 auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray]( 7693 CodeGenFunction &CGF, PrePostActionTy &) { 7694 // Fill up the arrays with all the mapped variables. 7695 MappableExprsHandler::MapBaseValuesArrayTy BasePointers; 7696 MappableExprsHandler::MapValuesArrayTy Pointers; 7697 MappableExprsHandler::MapValuesArrayTy Sizes; 7698 MappableExprsHandler::MapFlagsArrayTy MapTypes; 7699 7700 // Get map clause information. 7701 MappableExprsHandler MEHandler(D, CGF); 7702 MEHandler.generateAllInfo(BasePointers, Pointers, Sizes, MapTypes); 7703 7704 TargetDataInfo Info; 7705 // Fill up the arrays and create the arguments. 7706 emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info); 7707 emitOffloadingArraysArgument(CGF, Info.BasePointersArray, 7708 Info.PointersArray, Info.SizesArray, 7709 Info.MapTypesArray, Info); 7710 InputInfo.NumberOfTargetItems = Info.NumberOfPtrs; 7711 InputInfo.BasePointersArray = 7712 Address(Info.BasePointersArray, CGM.getPointerAlign()); 7713 InputInfo.PointersArray = 7714 Address(Info.PointersArray, CGM.getPointerAlign()); 7715 InputInfo.SizesArray = 7716 Address(Info.SizesArray, CGM.getPointerAlign()); 7717 MapTypesArray = Info.MapTypesArray; 7718 if (D.hasClausesOfKind<OMPDependClause>()) 7719 CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo); 7720 else 7721 emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen); 7722 }; 7723 7724 if (IfCond) 7725 emitOMPIfClause(CGF, IfCond, TargetThenGen, 7726 [](CodeGenFunction &CGF, PrePostActionTy &) {}); 7727 else { 7728 RegionCodeGenTy ThenRCG(TargetThenGen); 7729 ThenRCG(CGF); 7730 } 7731 } 7732 7733 namespace { 7734 /// Kind of parameter in a function with 'declare simd' directive. 7735 enum ParamKindTy { LinearWithVarStride, Linear, Uniform, Vector }; 7736 /// Attribute set of the parameter. 7737 struct ParamAttrTy { 7738 ParamKindTy Kind = Vector; 7739 llvm::APSInt StrideOrArg; 7740 llvm::APSInt Alignment; 7741 }; 7742 } // namespace 7743 7744 static unsigned evaluateCDTSize(const FunctionDecl *FD, 7745 ArrayRef<ParamAttrTy> ParamAttrs) { 7746 // Every vector variant of a SIMD-enabled function has a vector length (VLEN). 7747 // If OpenMP clause "simdlen" is used, the VLEN is the value of the argument 7748 // of that clause. The VLEN value must be power of 2. 7749 // In other case the notion of the function`s "characteristic data type" (CDT) 7750 // is used to compute the vector length. 7751 // CDT is defined in the following order: 7752 // a) For non-void function, the CDT is the return type. 7753 // b) If the function has any non-uniform, non-linear parameters, then the 7754 // CDT is the type of the first such parameter. 7755 // c) If the CDT determined by a) or b) above is struct, union, or class 7756 // type which is pass-by-value (except for the type that maps to the 7757 // built-in complex data type), the characteristic data type is int. 7758 // d) If none of the above three cases is applicable, the CDT is int. 7759 // The VLEN is then determined based on the CDT and the size of vector 7760 // register of that ISA for which current vector version is generated. The 7761 // VLEN is computed using the formula below: 7762 // VLEN = sizeof(vector_register) / sizeof(CDT), 7763 // where vector register size specified in section 3.2.1 Registers and the 7764 // Stack Frame of original AMD64 ABI document. 7765 QualType RetType = FD->getReturnType(); 7766 if (RetType.isNull()) 7767 return 0; 7768 ASTContext &C = FD->getASTContext(); 7769 QualType CDT; 7770 if (!RetType.isNull() && !RetType->isVoidType()) 7771 CDT = RetType; 7772 else { 7773 unsigned Offset = 0; 7774 if (auto *MD = dyn_cast<CXXMethodDecl>(FD)) { 7775 if (ParamAttrs[Offset].Kind == Vector) 7776 CDT = C.getPointerType(C.getRecordType(MD->getParent())); 7777 ++Offset; 7778 } 7779 if (CDT.isNull()) { 7780 for (unsigned I = 0, E = FD->getNumParams(); I < E; ++I) { 7781 if (ParamAttrs[I + Offset].Kind == Vector) { 7782 CDT = FD->getParamDecl(I)->getType(); 7783 break; 7784 } 7785 } 7786 } 7787 } 7788 if (CDT.isNull()) 7789 CDT = C.IntTy; 7790 CDT = CDT->getCanonicalTypeUnqualified(); 7791 if (CDT->isRecordType() || CDT->isUnionType()) 7792 CDT = C.IntTy; 7793 return C.getTypeSize(CDT); 7794 } 7795 7796 static void 7797 emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn, 7798 const llvm::APSInt &VLENVal, 7799 ArrayRef<ParamAttrTy> ParamAttrs, 7800 OMPDeclareSimdDeclAttr::BranchStateTy State) { 7801 struct ISADataTy { 7802 char ISA; 7803 unsigned VecRegSize; 7804 }; 7805 ISADataTy ISAData[] = { 7806 { 7807 'b', 128 7808 }, // SSE 7809 { 7810 'c', 256 7811 }, // AVX 7812 { 7813 'd', 256 7814 }, // AVX2 7815 { 7816 'e', 512 7817 }, // AVX512 7818 }; 7819 llvm::SmallVector<char, 2> Masked; 7820 switch (State) { 7821 case OMPDeclareSimdDeclAttr::BS_Undefined: 7822 Masked.push_back('N'); 7823 Masked.push_back('M'); 7824 break; 7825 case OMPDeclareSimdDeclAttr::BS_Notinbranch: 7826 Masked.push_back('N'); 7827 break; 7828 case OMPDeclareSimdDeclAttr::BS_Inbranch: 7829 Masked.push_back('M'); 7830 break; 7831 } 7832 for (auto Mask : Masked) { 7833 for (auto &Data : ISAData) { 7834 SmallString<256> Buffer; 7835 llvm::raw_svector_ostream Out(Buffer); 7836 Out << "_ZGV" << Data.ISA << Mask; 7837 if (!VLENVal) { 7838 Out << llvm::APSInt::getUnsigned(Data.VecRegSize / 7839 evaluateCDTSize(FD, ParamAttrs)); 7840 } else 7841 Out << VLENVal; 7842 for (auto &ParamAttr : ParamAttrs) { 7843 switch (ParamAttr.Kind){ 7844 case LinearWithVarStride: 7845 Out << 's' << ParamAttr.StrideOrArg; 7846 break; 7847 case Linear: 7848 Out << 'l'; 7849 if (!!ParamAttr.StrideOrArg) 7850 Out << ParamAttr.StrideOrArg; 7851 break; 7852 case Uniform: 7853 Out << 'u'; 7854 break; 7855 case Vector: 7856 Out << 'v'; 7857 break; 7858 } 7859 if (!!ParamAttr.Alignment) 7860 Out << 'a' << ParamAttr.Alignment; 7861 } 7862 Out << '_' << Fn->getName(); 7863 Fn->addFnAttr(Out.str()); 7864 } 7865 } 7866 } 7867 7868 void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD, 7869 llvm::Function *Fn) { 7870 ASTContext &C = CGM.getContext(); 7871 FD = FD->getCanonicalDecl(); 7872 // Map params to their positions in function decl. 7873 llvm::DenseMap<const Decl *, unsigned> ParamPositions; 7874 if (isa<CXXMethodDecl>(FD)) 7875 ParamPositions.insert({FD, 0}); 7876 unsigned ParamPos = ParamPositions.size(); 7877 for (auto *P : FD->parameters()) { 7878 ParamPositions.insert({P->getCanonicalDecl(), ParamPos}); 7879 ++ParamPos; 7880 } 7881 for (auto *Attr : FD->specific_attrs<OMPDeclareSimdDeclAttr>()) { 7882 llvm::SmallVector<ParamAttrTy, 8> ParamAttrs(ParamPositions.size()); 7883 // Mark uniform parameters. 7884 for (auto *E : Attr->uniforms()) { 7885 E = E->IgnoreParenImpCasts(); 7886 unsigned Pos; 7887 if (isa<CXXThisExpr>(E)) 7888 Pos = ParamPositions[FD]; 7889 else { 7890 auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl()) 7891 ->getCanonicalDecl(); 7892 Pos = ParamPositions[PVD]; 7893 } 7894 ParamAttrs[Pos].Kind = Uniform; 7895 } 7896 // Get alignment info. 7897 auto NI = Attr->alignments_begin(); 7898 for (auto *E : Attr->aligneds()) { 7899 E = E->IgnoreParenImpCasts(); 7900 unsigned Pos; 7901 QualType ParmTy; 7902 if (isa<CXXThisExpr>(E)) { 7903 Pos = ParamPositions[FD]; 7904 ParmTy = E->getType(); 7905 } else { 7906 auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl()) 7907 ->getCanonicalDecl(); 7908 Pos = ParamPositions[PVD]; 7909 ParmTy = PVD->getType(); 7910 } 7911 ParamAttrs[Pos].Alignment = 7912 (*NI) ? (*NI)->EvaluateKnownConstInt(C) 7913 : llvm::APSInt::getUnsigned( 7914 C.toCharUnitsFromBits(C.getOpenMPDefaultSimdAlign(ParmTy)) 7915 .getQuantity()); 7916 ++NI; 7917 } 7918 // Mark linear parameters. 7919 auto SI = Attr->steps_begin(); 7920 auto MI = Attr->modifiers_begin(); 7921 for (auto *E : Attr->linears()) { 7922 E = E->IgnoreParenImpCasts(); 7923 unsigned Pos; 7924 if (isa<CXXThisExpr>(E)) 7925 Pos = ParamPositions[FD]; 7926 else { 7927 auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl()) 7928 ->getCanonicalDecl(); 7929 Pos = ParamPositions[PVD]; 7930 } 7931 auto &ParamAttr = ParamAttrs[Pos]; 7932 ParamAttr.Kind = Linear; 7933 if (*SI) { 7934 if (!(*SI)->EvaluateAsInt(ParamAttr.StrideOrArg, C, 7935 Expr::SE_AllowSideEffects)) { 7936 if (auto *DRE = cast<DeclRefExpr>((*SI)->IgnoreParenImpCasts())) { 7937 if (auto *StridePVD = cast<ParmVarDecl>(DRE->getDecl())) { 7938 ParamAttr.Kind = LinearWithVarStride; 7939 ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned( 7940 ParamPositions[StridePVD->getCanonicalDecl()]); 7941 } 7942 } 7943 } 7944 } 7945 ++SI; 7946 ++MI; 7947 } 7948 llvm::APSInt VLENVal; 7949 if (const Expr *VLEN = Attr->getSimdlen()) 7950 VLENVal = VLEN->EvaluateKnownConstInt(C); 7951 OMPDeclareSimdDeclAttr::BranchStateTy State = Attr->getBranchState(); 7952 if (CGM.getTriple().getArch() == llvm::Triple::x86 || 7953 CGM.getTriple().getArch() == llvm::Triple::x86_64) 7954 emitX86DeclareSimdFunction(FD, Fn, VLENVal, ParamAttrs, State); 7955 } 7956 } 7957 7958 namespace { 7959 /// Cleanup action for doacross support. 7960 class DoacrossCleanupTy final : public EHScopeStack::Cleanup { 7961 public: 7962 static const int DoacrossFinArgs = 2; 7963 7964 private: 7965 llvm::Value *RTLFn; 7966 llvm::Value *Args[DoacrossFinArgs]; 7967 7968 public: 7969 DoacrossCleanupTy(llvm::Value *RTLFn, ArrayRef<llvm::Value *> CallArgs) 7970 : RTLFn(RTLFn) { 7971 assert(CallArgs.size() == DoacrossFinArgs); 7972 std::copy(CallArgs.begin(), CallArgs.end(), std::begin(Args)); 7973 } 7974 void Emit(CodeGenFunction &CGF, Flags /*flags*/) override { 7975 if (!CGF.HaveInsertPoint()) 7976 return; 7977 CGF.EmitRuntimeCall(RTLFn, Args); 7978 } 7979 }; 7980 } // namespace 7981 7982 void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF, 7983 const OMPLoopDirective &D) { 7984 if (!CGF.HaveInsertPoint()) 7985 return; 7986 7987 ASTContext &C = CGM.getContext(); 7988 QualType Int64Ty = C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/true); 7989 RecordDecl *RD; 7990 if (KmpDimTy.isNull()) { 7991 // Build struct kmp_dim { // loop bounds info casted to kmp_int64 7992 // kmp_int64 lo; // lower 7993 // kmp_int64 up; // upper 7994 // kmp_int64 st; // stride 7995 // }; 7996 RD = C.buildImplicitRecord("kmp_dim"); 7997 RD->startDefinition(); 7998 addFieldToRecordDecl(C, RD, Int64Ty); 7999 addFieldToRecordDecl(C, RD, Int64Ty); 8000 addFieldToRecordDecl(C, RD, Int64Ty); 8001 RD->completeDefinition(); 8002 KmpDimTy = C.getRecordType(RD); 8003 } else 8004 RD = cast<RecordDecl>(KmpDimTy->getAsTagDecl()); 8005 8006 Address DimsAddr = CGF.CreateMemTemp(KmpDimTy, "dims"); 8007 CGF.EmitNullInitialization(DimsAddr, KmpDimTy); 8008 enum { LowerFD = 0, UpperFD, StrideFD }; 8009 // Fill dims with data. 8010 LValue DimsLVal = CGF.MakeAddrLValue(DimsAddr, KmpDimTy); 8011 // dims.upper = num_iterations; 8012 LValue UpperLVal = 8013 CGF.EmitLValueForField(DimsLVal, *std::next(RD->field_begin(), UpperFD)); 8014 llvm::Value *NumIterVal = CGF.EmitScalarConversion( 8015 CGF.EmitScalarExpr(D.getNumIterations()), D.getNumIterations()->getType(), 8016 Int64Ty, D.getNumIterations()->getExprLoc()); 8017 CGF.EmitStoreOfScalar(NumIterVal, UpperLVal); 8018 // dims.stride = 1; 8019 LValue StrideLVal = 8020 CGF.EmitLValueForField(DimsLVal, *std::next(RD->field_begin(), StrideFD)); 8021 CGF.EmitStoreOfScalar(llvm::ConstantInt::getSigned(CGM.Int64Ty, /*V=*/1), 8022 StrideLVal); 8023 8024 // Build call void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid, 8025 // kmp_int32 num_dims, struct kmp_dim * dims); 8026 llvm::Value *Args[] = {emitUpdateLocation(CGF, D.getLocStart()), 8027 getThreadID(CGF, D.getLocStart()), 8028 llvm::ConstantInt::getSigned(CGM.Int32Ty, 1), 8029 CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( 8030 DimsAddr.getPointer(), CGM.VoidPtrTy)}; 8031 8032 llvm::Value *RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_init); 8033 CGF.EmitRuntimeCall(RTLFn, Args); 8034 llvm::Value *FiniArgs[DoacrossCleanupTy::DoacrossFinArgs] = { 8035 emitUpdateLocation(CGF, D.getLocEnd()), getThreadID(CGF, D.getLocEnd())}; 8036 llvm::Value *FiniRTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_fini); 8037 CGF.EHStack.pushCleanup<DoacrossCleanupTy>(NormalAndEHCleanup, FiniRTLFn, 8038 llvm::makeArrayRef(FiniArgs)); 8039 } 8040 8041 void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF, 8042 const OMPDependClause *C) { 8043 QualType Int64Ty = 8044 CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1); 8045 const Expr *CounterVal = C->getCounterValue(); 8046 assert(CounterVal); 8047 llvm::Value *CntVal = CGF.EmitScalarConversion(CGF.EmitScalarExpr(CounterVal), 8048 CounterVal->getType(), Int64Ty, 8049 CounterVal->getExprLoc()); 8050 Address CntAddr = CGF.CreateMemTemp(Int64Ty, ".cnt.addr"); 8051 CGF.EmitStoreOfScalar(CntVal, CntAddr, /*Volatile=*/false, Int64Ty); 8052 llvm::Value *Args[] = {emitUpdateLocation(CGF, C->getLocStart()), 8053 getThreadID(CGF, C->getLocStart()), 8054 CntAddr.getPointer()}; 8055 llvm::Value *RTLFn; 8056 if (C->getDependencyKind() == OMPC_DEPEND_source) 8057 RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_post); 8058 else { 8059 assert(C->getDependencyKind() == OMPC_DEPEND_sink); 8060 RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_wait); 8061 } 8062 CGF.EmitRuntimeCall(RTLFn, Args); 8063 } 8064 8065 void CGOpenMPRuntime::emitCall(CodeGenFunction &CGF, llvm::Value *Callee, 8066 ArrayRef<llvm::Value *> Args, 8067 SourceLocation Loc) const { 8068 auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc); 8069 8070 if (auto *Fn = dyn_cast<llvm::Function>(Callee)) { 8071 if (Fn->doesNotThrow()) { 8072 CGF.EmitNounwindRuntimeCall(Fn, Args); 8073 return; 8074 } 8075 } 8076 CGF.EmitRuntimeCall(Callee, Args); 8077 } 8078 8079 void CGOpenMPRuntime::emitOutlinedFunctionCall( 8080 CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn, 8081 ArrayRef<llvm::Value *> Args) const { 8082 assert(Loc.isValid() && "Outlined function call location must be valid."); 8083 emitCall(CGF, OutlinedFn, Args, Loc); 8084 } 8085 8086 Address CGOpenMPRuntime::getParameterAddress(CodeGenFunction &CGF, 8087 const VarDecl *NativeParam, 8088 const VarDecl *TargetParam) const { 8089 return CGF.GetAddrOfLocalVar(NativeParam); 8090 } 8091 8092 llvm::Value *CGOpenMPSIMDRuntime::emitParallelOutlinedFunction( 8093 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, 8094 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) { 8095 llvm_unreachable("Not supported in SIMD-only mode"); 8096 } 8097 8098 llvm::Value *CGOpenMPSIMDRuntime::emitTeamsOutlinedFunction( 8099 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, 8100 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) { 8101 llvm_unreachable("Not supported in SIMD-only mode"); 8102 } 8103 8104 llvm::Value *CGOpenMPSIMDRuntime::emitTaskOutlinedFunction( 8105 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, 8106 const VarDecl *PartIDVar, const VarDecl *TaskTVar, 8107 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, 8108 bool Tied, unsigned &NumberOfParts) { 8109 llvm_unreachable("Not supported in SIMD-only mode"); 8110 } 8111 8112 void CGOpenMPSIMDRuntime::emitParallelCall(CodeGenFunction &CGF, 8113 SourceLocation Loc, 8114 llvm::Value *OutlinedFn, 8115 ArrayRef<llvm::Value *> CapturedVars, 8116 const Expr *IfCond) { 8117 llvm_unreachable("Not supported in SIMD-only mode"); 8118 } 8119 8120 void CGOpenMPSIMDRuntime::emitCriticalRegion( 8121 CodeGenFunction &CGF, StringRef CriticalName, 8122 const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, 8123 const Expr *Hint) { 8124 llvm_unreachable("Not supported in SIMD-only mode"); 8125 } 8126 8127 void CGOpenMPSIMDRuntime::emitMasterRegion(CodeGenFunction &CGF, 8128 const RegionCodeGenTy &MasterOpGen, 8129 SourceLocation Loc) { 8130 llvm_unreachable("Not supported in SIMD-only mode"); 8131 } 8132 8133 void CGOpenMPSIMDRuntime::emitTaskyieldCall(CodeGenFunction &CGF, 8134 SourceLocation Loc) { 8135 llvm_unreachable("Not supported in SIMD-only mode"); 8136 } 8137 8138 void CGOpenMPSIMDRuntime::emitTaskgroupRegion( 8139 CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, 8140 SourceLocation Loc) { 8141 llvm_unreachable("Not supported in SIMD-only mode"); 8142 } 8143 8144 void CGOpenMPSIMDRuntime::emitSingleRegion( 8145 CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, 8146 SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars, 8147 ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs, 8148 ArrayRef<const Expr *> AssignmentOps) { 8149 llvm_unreachable("Not supported in SIMD-only mode"); 8150 } 8151 8152 void CGOpenMPSIMDRuntime::emitOrderedRegion(CodeGenFunction &CGF, 8153 const RegionCodeGenTy &OrderedOpGen, 8154 SourceLocation Loc, 8155 bool IsThreads) { 8156 llvm_unreachable("Not supported in SIMD-only mode"); 8157 } 8158 8159 void CGOpenMPSIMDRuntime::emitBarrierCall(CodeGenFunction &CGF, 8160 SourceLocation Loc, 8161 OpenMPDirectiveKind Kind, 8162 bool EmitChecks, 8163 bool ForceSimpleCall) { 8164 llvm_unreachable("Not supported in SIMD-only mode"); 8165 } 8166 8167 void CGOpenMPSIMDRuntime::emitForDispatchInit( 8168 CodeGenFunction &CGF, SourceLocation Loc, 8169 const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, 8170 bool Ordered, const DispatchRTInput &DispatchValues) { 8171 llvm_unreachable("Not supported in SIMD-only mode"); 8172 } 8173 8174 void CGOpenMPSIMDRuntime::emitForStaticInit( 8175 CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, 8176 const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values) { 8177 llvm_unreachable("Not supported in SIMD-only mode"); 8178 } 8179 8180 void CGOpenMPSIMDRuntime::emitDistributeStaticInit( 8181 CodeGenFunction &CGF, SourceLocation Loc, 8182 OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values) { 8183 llvm_unreachable("Not supported in SIMD-only mode"); 8184 } 8185 8186 void CGOpenMPSIMDRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF, 8187 SourceLocation Loc, 8188 unsigned IVSize, 8189 bool IVSigned) { 8190 llvm_unreachable("Not supported in SIMD-only mode"); 8191 } 8192 8193 void CGOpenMPSIMDRuntime::emitForStaticFinish(CodeGenFunction &CGF, 8194 SourceLocation Loc, 8195 OpenMPDirectiveKind DKind) { 8196 llvm_unreachable("Not supported in SIMD-only mode"); 8197 } 8198 8199 llvm::Value *CGOpenMPSIMDRuntime::emitForNext(CodeGenFunction &CGF, 8200 SourceLocation Loc, 8201 unsigned IVSize, bool IVSigned, 8202 Address IL, Address LB, 8203 Address UB, Address ST) { 8204 llvm_unreachable("Not supported in SIMD-only mode"); 8205 } 8206 8207 void CGOpenMPSIMDRuntime::emitNumThreadsClause(CodeGenFunction &CGF, 8208 llvm::Value *NumThreads, 8209 SourceLocation Loc) { 8210 llvm_unreachable("Not supported in SIMD-only mode"); 8211 } 8212 8213 void CGOpenMPSIMDRuntime::emitProcBindClause(CodeGenFunction &CGF, 8214 OpenMPProcBindClauseKind ProcBind, 8215 SourceLocation Loc) { 8216 llvm_unreachable("Not supported in SIMD-only mode"); 8217 } 8218 8219 Address CGOpenMPSIMDRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF, 8220 const VarDecl *VD, 8221 Address VDAddr, 8222 SourceLocation Loc) { 8223 llvm_unreachable("Not supported in SIMD-only mode"); 8224 } 8225 8226 llvm::Function *CGOpenMPSIMDRuntime::emitThreadPrivateVarDefinition( 8227 const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit, 8228 CodeGenFunction *CGF) { 8229 llvm_unreachable("Not supported in SIMD-only mode"); 8230 } 8231 8232 Address CGOpenMPSIMDRuntime::getAddrOfArtificialThreadPrivate( 8233 CodeGenFunction &CGF, QualType VarType, StringRef Name) { 8234 llvm_unreachable("Not supported in SIMD-only mode"); 8235 } 8236 8237 void CGOpenMPSIMDRuntime::emitFlush(CodeGenFunction &CGF, 8238 ArrayRef<const Expr *> Vars, 8239 SourceLocation Loc) { 8240 llvm_unreachable("Not supported in SIMD-only mode"); 8241 } 8242 8243 void CGOpenMPSIMDRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, 8244 const OMPExecutableDirective &D, 8245 llvm::Value *TaskFunction, 8246 QualType SharedsTy, Address Shareds, 8247 const Expr *IfCond, 8248 const OMPTaskDataTy &Data) { 8249 llvm_unreachable("Not supported in SIMD-only mode"); 8250 } 8251 8252 void CGOpenMPSIMDRuntime::emitTaskLoopCall( 8253 CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D, 8254 llvm::Value *TaskFunction, QualType SharedsTy, Address Shareds, 8255 const Expr *IfCond, const OMPTaskDataTy &Data) { 8256 llvm_unreachable("Not supported in SIMD-only mode"); 8257 } 8258 8259 void CGOpenMPSIMDRuntime::emitReduction( 8260 CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, 8261 ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, 8262 ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) { 8263 assert(Options.SimpleReduction && "Only simple reduction is expected."); 8264 CGOpenMPRuntime::emitReduction(CGF, Loc, Privates, LHSExprs, RHSExprs, 8265 ReductionOps, Options); 8266 } 8267 8268 llvm::Value *CGOpenMPSIMDRuntime::emitTaskReductionInit( 8269 CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs, 8270 ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) { 8271 llvm_unreachable("Not supported in SIMD-only mode"); 8272 } 8273 8274 void CGOpenMPSIMDRuntime::emitTaskReductionFixups(CodeGenFunction &CGF, 8275 SourceLocation Loc, 8276 ReductionCodeGen &RCG, 8277 unsigned N) { 8278 llvm_unreachable("Not supported in SIMD-only mode"); 8279 } 8280 8281 Address CGOpenMPSIMDRuntime::getTaskReductionItem(CodeGenFunction &CGF, 8282 SourceLocation Loc, 8283 llvm::Value *ReductionsPtr, 8284 LValue SharedLVal) { 8285 llvm_unreachable("Not supported in SIMD-only mode"); 8286 } 8287 8288 void CGOpenMPSIMDRuntime::emitTaskwaitCall(CodeGenFunction &CGF, 8289 SourceLocation Loc) { 8290 llvm_unreachable("Not supported in SIMD-only mode"); 8291 } 8292 8293 void CGOpenMPSIMDRuntime::emitCancellationPointCall( 8294 CodeGenFunction &CGF, SourceLocation Loc, 8295 OpenMPDirectiveKind CancelRegion) { 8296 llvm_unreachable("Not supported in SIMD-only mode"); 8297 } 8298 8299 void CGOpenMPSIMDRuntime::emitCancelCall(CodeGenFunction &CGF, 8300 SourceLocation Loc, const Expr *IfCond, 8301 OpenMPDirectiveKind CancelRegion) { 8302 llvm_unreachable("Not supported in SIMD-only mode"); 8303 } 8304 8305 void CGOpenMPSIMDRuntime::emitTargetOutlinedFunction( 8306 const OMPExecutableDirective &D, StringRef ParentName, 8307 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, 8308 bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) { 8309 llvm_unreachable("Not supported in SIMD-only mode"); 8310 } 8311 8312 void CGOpenMPSIMDRuntime::emitTargetCall(CodeGenFunction &CGF, 8313 const OMPExecutableDirective &D, 8314 llvm::Value *OutlinedFn, 8315 llvm::Value *OutlinedFnID, 8316 const Expr *IfCond, const Expr *Device) { 8317 llvm_unreachable("Not supported in SIMD-only mode"); 8318 } 8319 8320 bool CGOpenMPSIMDRuntime::emitTargetFunctions(GlobalDecl GD) { 8321 llvm_unreachable("Not supported in SIMD-only mode"); 8322 } 8323 8324 bool CGOpenMPSIMDRuntime::emitTargetGlobalVariable(GlobalDecl GD) { 8325 llvm_unreachable("Not supported in SIMD-only mode"); 8326 } 8327 8328 bool CGOpenMPSIMDRuntime::emitTargetGlobal(GlobalDecl GD) { 8329 return false; 8330 } 8331 8332 llvm::Function *CGOpenMPSIMDRuntime::emitRegistrationFunction() { 8333 return nullptr; 8334 } 8335 8336 void CGOpenMPSIMDRuntime::emitTeamsCall(CodeGenFunction &CGF, 8337 const OMPExecutableDirective &D, 8338 SourceLocation Loc, 8339 llvm::Value *OutlinedFn, 8340 ArrayRef<llvm::Value *> CapturedVars) { 8341 llvm_unreachable("Not supported in SIMD-only mode"); 8342 } 8343 8344 void CGOpenMPSIMDRuntime::emitNumTeamsClause(CodeGenFunction &CGF, 8345 const Expr *NumTeams, 8346 const Expr *ThreadLimit, 8347 SourceLocation Loc) { 8348 llvm_unreachable("Not supported in SIMD-only mode"); 8349 } 8350 8351 void CGOpenMPSIMDRuntime::emitTargetDataCalls( 8352 CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, 8353 const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) { 8354 llvm_unreachable("Not supported in SIMD-only mode"); 8355 } 8356 8357 void CGOpenMPSIMDRuntime::emitTargetDataStandAloneCall( 8358 CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, 8359 const Expr *Device) { 8360 llvm_unreachable("Not supported in SIMD-only mode"); 8361 } 8362 8363 void CGOpenMPSIMDRuntime::emitDoacrossInit(CodeGenFunction &CGF, 8364 const OMPLoopDirective &D) { 8365 llvm_unreachable("Not supported in SIMD-only mode"); 8366 } 8367 8368 void CGOpenMPSIMDRuntime::emitDoacrossOrdered(CodeGenFunction &CGF, 8369 const OMPDependClause *C) { 8370 llvm_unreachable("Not supported in SIMD-only mode"); 8371 } 8372 8373 const VarDecl * 8374 CGOpenMPSIMDRuntime::translateParameter(const FieldDecl *FD, 8375 const VarDecl *NativeParam) const { 8376 llvm_unreachable("Not supported in SIMD-only mode"); 8377 } 8378 8379 Address 8380 CGOpenMPSIMDRuntime::getParameterAddress(CodeGenFunction &CGF, 8381 const VarDecl *NativeParam, 8382 const VarDecl *TargetParam) const { 8383 llvm_unreachable("Not supported in SIMD-only mode"); 8384 } 8385 8386