1 //===- CodeGenSchedule.cpp - Scheduling MachineModels ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines structures to encapsulate the machine model as described in 10 // the target description. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenSchedule.h" 15 #include "CodeGenInstruction.h" 16 #include "CodeGenTarget.h" 17 #include "llvm/ADT/MapVector.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/SmallPtrSet.h" 20 #include "llvm/ADT/SmallSet.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/Support/Casting.h" 23 #include "llvm/Support/Debug.h" 24 #include "llvm/Support/Regex.h" 25 #include "llvm/Support/raw_ostream.h" 26 #include "llvm/TableGen/Error.h" 27 #include <algorithm> 28 #include <iterator> 29 #include <utility> 30 31 using namespace llvm; 32 33 #define DEBUG_TYPE "subtarget-emitter" 34 35 #ifndef NDEBUG 36 static void dumpIdxVec(ArrayRef<unsigned> V) { 37 for (unsigned Idx : V) 38 dbgs() << Idx << ", "; 39 } 40 #endif 41 42 namespace { 43 44 // (instrs a, b, ...) Evaluate and union all arguments. Identical to AddOp. 45 struct InstrsOp : public SetTheory::Operator { 46 void apply(SetTheory &ST, DagInit *Expr, SetTheory::RecSet &Elts, 47 ArrayRef<SMLoc> Loc) override { 48 ST.evaluate(Expr->arg_begin(), Expr->arg_end(), Elts, Loc); 49 } 50 }; 51 52 // (instregex "OpcPat",...) Find all instructions matching an opcode pattern. 53 struct InstRegexOp : public SetTheory::Operator { 54 const CodeGenTarget &Target; 55 InstRegexOp(const CodeGenTarget &t): Target(t) {} 56 57 /// Remove any text inside of parentheses from S. 58 static std::string removeParens(llvm::StringRef S) { 59 std::string Result; 60 unsigned Paren = 0; 61 // NB: We don't care about escaped parens here. 62 for (char C : S) { 63 switch (C) { 64 case '(': 65 ++Paren; 66 break; 67 case ')': 68 --Paren; 69 break; 70 default: 71 if (Paren == 0) 72 Result += C; 73 } 74 } 75 return Result; 76 } 77 78 void apply(SetTheory &ST, DagInit *Expr, SetTheory::RecSet &Elts, 79 ArrayRef<SMLoc> Loc) override { 80 ArrayRef<const CodeGenInstruction *> Instructions = 81 Target.getInstructionsByEnumValue(); 82 83 unsigned NumGeneric = Target.getNumFixedInstructions(); 84 unsigned NumPseudos = Target.getNumPseudoInstructions(); 85 auto Generics = Instructions.slice(0, NumGeneric); 86 auto Pseudos = Instructions.slice(NumGeneric, NumPseudos); 87 auto NonPseudos = Instructions.slice(NumGeneric + NumPseudos); 88 89 for (Init *Arg : make_range(Expr->arg_begin(), Expr->arg_end())) { 90 StringInit *SI = dyn_cast<StringInit>(Arg); 91 if (!SI) 92 PrintFatalError(Loc, "instregex requires pattern string: " + 93 Expr->getAsString()); 94 StringRef Original = SI->getValue(); 95 96 // Extract a prefix that we can binary search on. 97 static const char RegexMetachars[] = "()^$|*+?.[]\\{}"; 98 auto FirstMeta = Original.find_first_of(RegexMetachars); 99 100 // Look for top-level | or ?. We cannot optimize them to binary search. 101 if (removeParens(Original).find_first_of("|?") != std::string::npos) 102 FirstMeta = 0; 103 104 Optional<Regex> Regexpr = None; 105 StringRef Prefix = Original.substr(0, FirstMeta); 106 StringRef PatStr = Original.substr(FirstMeta); 107 if (!PatStr.empty()) { 108 // For the rest use a python-style prefix match. 109 std::string pat = PatStr; 110 if (pat[0] != '^') { 111 pat.insert(0, "^("); 112 pat.insert(pat.end(), ')'); 113 } 114 Regexpr = Regex(pat); 115 } 116 117 int NumMatches = 0; 118 119 // The generic opcodes are unsorted, handle them manually. 120 for (auto *Inst : Generics) { 121 StringRef InstName = Inst->TheDef->getName(); 122 if (InstName.startswith(Prefix) && 123 (!Regexpr || Regexpr->match(InstName.substr(Prefix.size())))) { 124 Elts.insert(Inst->TheDef); 125 NumMatches++; 126 } 127 } 128 129 // Target instructions are split into two ranges: pseudo instructions 130 // first, than non-pseudos. Each range is in lexicographical order 131 // sorted by name. Find the sub-ranges that start with our prefix. 132 struct Comp { 133 bool operator()(const CodeGenInstruction *LHS, StringRef RHS) { 134 return LHS->TheDef->getName() < RHS; 135 } 136 bool operator()(StringRef LHS, const CodeGenInstruction *RHS) { 137 return LHS < RHS->TheDef->getName() && 138 !RHS->TheDef->getName().startswith(LHS); 139 } 140 }; 141 auto Range1 = 142 std::equal_range(Pseudos.begin(), Pseudos.end(), Prefix, Comp()); 143 auto Range2 = std::equal_range(NonPseudos.begin(), NonPseudos.end(), 144 Prefix, Comp()); 145 146 // For these ranges we know that instruction names start with the prefix. 147 // Check if there's a regex that needs to be checked. 148 const auto HandleNonGeneric = [&](const CodeGenInstruction *Inst) { 149 StringRef InstName = Inst->TheDef->getName(); 150 if (!Regexpr || Regexpr->match(InstName.substr(Prefix.size()))) { 151 Elts.insert(Inst->TheDef); 152 NumMatches++; 153 } 154 }; 155 std::for_each(Range1.first, Range1.second, HandleNonGeneric); 156 std::for_each(Range2.first, Range2.second, HandleNonGeneric); 157 158 if (0 == NumMatches) 159 PrintFatalError(Loc, "instregex has no matches: " + Original); 160 } 161 } 162 }; 163 164 } // end anonymous namespace 165 166 /// CodeGenModels ctor interprets machine model records and populates maps. 167 CodeGenSchedModels::CodeGenSchedModels(RecordKeeper &RK, 168 const CodeGenTarget &TGT): 169 Records(RK), Target(TGT) { 170 171 Sets.addFieldExpander("InstRW", "Instrs"); 172 173 // Allow Set evaluation to recognize the dags used in InstRW records: 174 // (instrs Op1, Op1...) 175 Sets.addOperator("instrs", llvm::make_unique<InstrsOp>()); 176 Sets.addOperator("instregex", llvm::make_unique<InstRegexOp>(Target)); 177 178 // Instantiate a CodeGenProcModel for each SchedMachineModel with the values 179 // that are explicitly referenced in tablegen records. Resources associated 180 // with each processor will be derived later. Populate ProcModelMap with the 181 // CodeGenProcModel instances. 182 collectProcModels(); 183 184 // Instantiate a CodeGenSchedRW for each SchedReadWrite record explicitly 185 // defined, and populate SchedReads and SchedWrites vectors. Implicit 186 // SchedReadWrites that represent sequences derived from expanded variant will 187 // be inferred later. 188 collectSchedRW(); 189 190 // Instantiate a CodeGenSchedClass for each unique SchedRW signature directly 191 // required by an instruction definition, and populate SchedClassIdxMap. Set 192 // NumItineraryClasses to the number of explicit itinerary classes referenced 193 // by instructions. Set NumInstrSchedClasses to the number of itinerary 194 // classes plus any classes implied by instructions that derive from class 195 // Sched and provide SchedRW list. This does not infer any new classes from 196 // SchedVariant. 197 collectSchedClasses(); 198 199 // Find instruction itineraries for each processor. Sort and populate 200 // CodeGenProcModel::ItinDefList. (Cycle-to-cycle itineraries). This requires 201 // all itinerary classes to be discovered. 202 collectProcItins(); 203 204 // Find ItinRW records for each processor and itinerary class. 205 // (For per-operand resources mapped to itinerary classes). 206 collectProcItinRW(); 207 208 // Find UnsupportedFeatures records for each processor. 209 // (For per-operand resources mapped to itinerary classes). 210 collectProcUnsupportedFeatures(); 211 212 // Infer new SchedClasses from SchedVariant. 213 inferSchedClasses(); 214 215 // Populate each CodeGenProcModel's WriteResDefs, ReadAdvanceDefs, and 216 // ProcResourceDefs. 217 LLVM_DEBUG( 218 dbgs() << "\n+++ RESOURCE DEFINITIONS (collectProcResources) +++\n"); 219 collectProcResources(); 220 221 // Collect optional processor description. 222 collectOptionalProcessorInfo(); 223 224 // Check MCInstPredicate definitions. 225 checkMCInstPredicates(); 226 227 // Check STIPredicate definitions. 228 checkSTIPredicates(); 229 230 // Find STIPredicate definitions for each processor model, and construct 231 // STIPredicateFunction objects. 232 collectSTIPredicates(); 233 234 checkCompleteness(); 235 } 236 237 void CodeGenSchedModels::checkSTIPredicates() const { 238 DenseMap<StringRef, const Record *> Declarations; 239 240 // There cannot be multiple declarations with the same name. 241 const RecVec Decls = Records.getAllDerivedDefinitions("STIPredicateDecl"); 242 for (const Record *R : Decls) { 243 StringRef Name = R->getValueAsString("Name"); 244 const auto It = Declarations.find(Name); 245 if (It == Declarations.end()) { 246 Declarations[Name] = R; 247 continue; 248 } 249 250 PrintError(R->getLoc(), "STIPredicate " + Name + " multiply declared."); 251 PrintNote(It->second->getLoc(), "Previous declaration was here."); 252 PrintFatalError(R->getLoc(), "Invalid STIPredicateDecl found."); 253 } 254 255 // Disallow InstructionEquivalenceClasses with an empty instruction list. 256 const RecVec Defs = 257 Records.getAllDerivedDefinitions("InstructionEquivalenceClass"); 258 for (const Record *R : Defs) { 259 RecVec Opcodes = R->getValueAsListOfDefs("Opcodes"); 260 if (Opcodes.empty()) { 261 PrintFatalError(R->getLoc(), "Invalid InstructionEquivalenceClass " 262 "defined with an empty opcode list."); 263 } 264 } 265 } 266 267 // Used by function `processSTIPredicate` to construct a mask of machine 268 // instruction operands. 269 static APInt constructOperandMask(ArrayRef<int64_t> Indices) { 270 APInt OperandMask; 271 if (Indices.empty()) 272 return OperandMask; 273 274 int64_t MaxIndex = *std::max_element(Indices.begin(), Indices.end()); 275 assert(MaxIndex >= 0 && "Invalid negative indices in input!"); 276 OperandMask = OperandMask.zext(MaxIndex + 1); 277 for (const int64_t Index : Indices) { 278 assert(Index >= 0 && "Invalid negative indices!"); 279 OperandMask.setBit(Index); 280 } 281 282 return OperandMask; 283 } 284 285 static void 286 processSTIPredicate(STIPredicateFunction &Fn, 287 const DenseMap<Record *, unsigned> &ProcModelMap) { 288 DenseMap<const Record *, unsigned> Opcode2Index; 289 using OpcodeMapPair = std::pair<const Record *, OpcodeInfo>; 290 std::vector<OpcodeMapPair> OpcodeMappings; 291 std::vector<std::pair<APInt, APInt>> OpcodeMasks; 292 293 DenseMap<const Record *, unsigned> Predicate2Index; 294 unsigned NumUniquePredicates = 0; 295 296 // Number unique predicates and opcodes used by InstructionEquivalenceClass 297 // definitions. Each unique opcode will be associated with an OpcodeInfo 298 // object. 299 for (const Record *Def : Fn.getDefinitions()) { 300 RecVec Classes = Def->getValueAsListOfDefs("Classes"); 301 for (const Record *EC : Classes) { 302 const Record *Pred = EC->getValueAsDef("Predicate"); 303 if (Predicate2Index.find(Pred) == Predicate2Index.end()) 304 Predicate2Index[Pred] = NumUniquePredicates++; 305 306 RecVec Opcodes = EC->getValueAsListOfDefs("Opcodes"); 307 for (const Record *Opcode : Opcodes) { 308 if (Opcode2Index.find(Opcode) == Opcode2Index.end()) { 309 Opcode2Index[Opcode] = OpcodeMappings.size(); 310 OpcodeMappings.emplace_back(Opcode, OpcodeInfo()); 311 } 312 } 313 } 314 } 315 316 // Initialize vector `OpcodeMasks` with default values. We want to keep track 317 // of which processors "use" which opcodes. We also want to be able to 318 // identify predicates that are used by different processors for a same 319 // opcode. 320 // This information is used later on by this algorithm to sort OpcodeMapping 321 // elements based on their processor and predicate sets. 322 OpcodeMasks.resize(OpcodeMappings.size()); 323 APInt DefaultProcMask(ProcModelMap.size(), 0); 324 APInt DefaultPredMask(NumUniquePredicates, 0); 325 for (std::pair<APInt, APInt> &MaskPair : OpcodeMasks) 326 MaskPair = std::make_pair(DefaultProcMask, DefaultPredMask); 327 328 // Construct a OpcodeInfo object for every unique opcode declared by an 329 // InstructionEquivalenceClass definition. 330 for (const Record *Def : Fn.getDefinitions()) { 331 RecVec Classes = Def->getValueAsListOfDefs("Classes"); 332 const Record *SchedModel = Def->getValueAsDef("SchedModel"); 333 unsigned ProcIndex = ProcModelMap.find(SchedModel)->second; 334 APInt ProcMask(ProcModelMap.size(), 0); 335 ProcMask.setBit(ProcIndex); 336 337 for (const Record *EC : Classes) { 338 RecVec Opcodes = EC->getValueAsListOfDefs("Opcodes"); 339 340 std::vector<int64_t> OpIndices = 341 EC->getValueAsListOfInts("OperandIndices"); 342 APInt OperandMask = constructOperandMask(OpIndices); 343 344 const Record *Pred = EC->getValueAsDef("Predicate"); 345 APInt PredMask(NumUniquePredicates, 0); 346 PredMask.setBit(Predicate2Index[Pred]); 347 348 for (const Record *Opcode : Opcodes) { 349 unsigned OpcodeIdx = Opcode2Index[Opcode]; 350 if (OpcodeMasks[OpcodeIdx].first[ProcIndex]) { 351 std::string Message = 352 "Opcode " + Opcode->getName().str() + 353 " used by multiple InstructionEquivalenceClass definitions."; 354 PrintFatalError(EC->getLoc(), Message); 355 } 356 OpcodeMasks[OpcodeIdx].first |= ProcMask; 357 OpcodeMasks[OpcodeIdx].second |= PredMask; 358 OpcodeInfo &OI = OpcodeMappings[OpcodeIdx].second; 359 360 OI.addPredicateForProcModel(ProcMask, OperandMask, Pred); 361 } 362 } 363 } 364 365 // Sort OpcodeMappings elements based on their CPU and predicate masks. 366 // As a last resort, order elements by opcode identifier. 367 llvm::sort(OpcodeMappings, 368 [&](const OpcodeMapPair &Lhs, const OpcodeMapPair &Rhs) { 369 unsigned LhsIdx = Opcode2Index[Lhs.first]; 370 unsigned RhsIdx = Opcode2Index[Rhs.first]; 371 std::pair<APInt, APInt> &LhsMasks = OpcodeMasks[LhsIdx]; 372 std::pair<APInt, APInt> &RhsMasks = OpcodeMasks[RhsIdx]; 373 374 if (LhsMasks.first != RhsMasks.first) { 375 if (LhsMasks.first.countPopulation() < 376 RhsMasks.first.countPopulation()) 377 return true; 378 return LhsMasks.first.countLeadingZeros() > 379 RhsMasks.first.countLeadingZeros(); 380 } 381 382 if (LhsMasks.second != RhsMasks.second) { 383 if (LhsMasks.second.countPopulation() < 384 RhsMasks.second.countPopulation()) 385 return true; 386 return LhsMasks.second.countLeadingZeros() > 387 RhsMasks.second.countLeadingZeros(); 388 } 389 390 return LhsIdx < RhsIdx; 391 }); 392 393 // Now construct opcode groups. Groups are used by the SubtargetEmitter when 394 // expanding the body of a STIPredicate function. In particular, each opcode 395 // group is expanded into a sequence of labels in a switch statement. 396 // It identifies opcodes for which different processors define same predicates 397 // and same opcode masks. 398 for (OpcodeMapPair &Info : OpcodeMappings) 399 Fn.addOpcode(Info.first, std::move(Info.second)); 400 } 401 402 void CodeGenSchedModels::collectSTIPredicates() { 403 // Map STIPredicateDecl records to elements of vector 404 // CodeGenSchedModels::STIPredicates. 405 DenseMap<const Record *, unsigned> Decl2Index; 406 407 RecVec RV = Records.getAllDerivedDefinitions("STIPredicate"); 408 for (const Record *R : RV) { 409 const Record *Decl = R->getValueAsDef("Declaration"); 410 411 const auto It = Decl2Index.find(Decl); 412 if (It == Decl2Index.end()) { 413 Decl2Index[Decl] = STIPredicates.size(); 414 STIPredicateFunction Predicate(Decl); 415 Predicate.addDefinition(R); 416 STIPredicates.emplace_back(std::move(Predicate)); 417 continue; 418 } 419 420 STIPredicateFunction &PreviousDef = STIPredicates[It->second]; 421 PreviousDef.addDefinition(R); 422 } 423 424 for (STIPredicateFunction &Fn : STIPredicates) 425 processSTIPredicate(Fn, ProcModelMap); 426 } 427 428 void OpcodeInfo::addPredicateForProcModel(const llvm::APInt &CpuMask, 429 const llvm::APInt &OperandMask, 430 const Record *Predicate) { 431 auto It = llvm::find_if( 432 Predicates, [&OperandMask, &Predicate](const PredicateInfo &P) { 433 return P.Predicate == Predicate && P.OperandMask == OperandMask; 434 }); 435 if (It == Predicates.end()) { 436 Predicates.emplace_back(CpuMask, OperandMask, Predicate); 437 return; 438 } 439 It->ProcModelMask |= CpuMask; 440 } 441 442 void CodeGenSchedModels::checkMCInstPredicates() const { 443 RecVec MCPredicates = Records.getAllDerivedDefinitions("TIIPredicate"); 444 if (MCPredicates.empty()) 445 return; 446 447 // A target cannot have multiple TIIPredicate definitions with a same name. 448 llvm::StringMap<const Record *> TIIPredicates(MCPredicates.size()); 449 for (const Record *TIIPred : MCPredicates) { 450 StringRef Name = TIIPred->getValueAsString("FunctionName"); 451 StringMap<const Record *>::const_iterator It = TIIPredicates.find(Name); 452 if (It == TIIPredicates.end()) { 453 TIIPredicates[Name] = TIIPred; 454 continue; 455 } 456 457 PrintError(TIIPred->getLoc(), 458 "TIIPredicate " + Name + " is multiply defined."); 459 PrintNote(It->second->getLoc(), 460 " Previous definition of " + Name + " was here."); 461 PrintFatalError(TIIPred->getLoc(), 462 "Found conflicting definitions of TIIPredicate."); 463 } 464 } 465 466 void CodeGenSchedModels::collectRetireControlUnits() { 467 RecVec Units = Records.getAllDerivedDefinitions("RetireControlUnit"); 468 469 for (Record *RCU : Units) { 470 CodeGenProcModel &PM = getProcModel(RCU->getValueAsDef("SchedModel")); 471 if (PM.RetireControlUnit) { 472 PrintError(RCU->getLoc(), 473 "Expected a single RetireControlUnit definition"); 474 PrintNote(PM.RetireControlUnit->getLoc(), 475 "Previous definition of RetireControlUnit was here"); 476 } 477 PM.RetireControlUnit = RCU; 478 } 479 } 480 481 void CodeGenSchedModels::collectLoadStoreQueueInfo() { 482 RecVec Queues = Records.getAllDerivedDefinitions("MemoryQueue"); 483 484 for (Record *Queue : Queues) { 485 CodeGenProcModel &PM = getProcModel(Queue->getValueAsDef("SchedModel")); 486 if (Queue->isSubClassOf("LoadQueue")) { 487 if (PM.LoadQueue) { 488 PrintError(Queue->getLoc(), 489 "Expected a single LoadQueue definition"); 490 PrintNote(PM.LoadQueue->getLoc(), 491 "Previous definition of LoadQueue was here"); 492 } 493 494 PM.LoadQueue = Queue; 495 } 496 497 if (Queue->isSubClassOf("StoreQueue")) { 498 if (PM.StoreQueue) { 499 PrintError(Queue->getLoc(), 500 "Expected a single StoreQueue definition"); 501 PrintNote(PM.LoadQueue->getLoc(), 502 "Previous definition of StoreQueue was here"); 503 } 504 505 PM.StoreQueue = Queue; 506 } 507 } 508 } 509 510 /// Collect optional processor information. 511 void CodeGenSchedModels::collectOptionalProcessorInfo() { 512 // Find register file definitions for each processor. 513 collectRegisterFiles(); 514 515 // Collect processor RetireControlUnit descriptors if available. 516 collectRetireControlUnits(); 517 518 // Collect information about load/store queues. 519 collectLoadStoreQueueInfo(); 520 521 checkCompleteness(); 522 } 523 524 /// Gather all processor models. 525 void CodeGenSchedModels::collectProcModels() { 526 RecVec ProcRecords = Records.getAllDerivedDefinitions("Processor"); 527 llvm::sort(ProcRecords, LessRecordFieldName()); 528 529 // Reserve space because we can. Reallocation would be ok. 530 ProcModels.reserve(ProcRecords.size()+1); 531 532 // Use idx=0 for NoModel/NoItineraries. 533 Record *NoModelDef = Records.getDef("NoSchedModel"); 534 Record *NoItinsDef = Records.getDef("NoItineraries"); 535 ProcModels.emplace_back(0, "NoSchedModel", NoModelDef, NoItinsDef); 536 ProcModelMap[NoModelDef] = 0; 537 538 // For each processor, find a unique machine model. 539 LLVM_DEBUG(dbgs() << "+++ PROCESSOR MODELs (addProcModel) +++\n"); 540 for (Record *ProcRecord : ProcRecords) 541 addProcModel(ProcRecord); 542 } 543 544 /// Get a unique processor model based on the defined MachineModel and 545 /// ProcessorItineraries. 546 void CodeGenSchedModels::addProcModel(Record *ProcDef) { 547 Record *ModelKey = getModelOrItinDef(ProcDef); 548 if (!ProcModelMap.insert(std::make_pair(ModelKey, ProcModels.size())).second) 549 return; 550 551 std::string Name = ModelKey->getName(); 552 if (ModelKey->isSubClassOf("SchedMachineModel")) { 553 Record *ItinsDef = ModelKey->getValueAsDef("Itineraries"); 554 ProcModels.emplace_back(ProcModels.size(), Name, ModelKey, ItinsDef); 555 } 556 else { 557 // An itinerary is defined without a machine model. Infer a new model. 558 if (!ModelKey->getValueAsListOfDefs("IID").empty()) 559 Name = Name + "Model"; 560 ProcModels.emplace_back(ProcModels.size(), Name, 561 ProcDef->getValueAsDef("SchedModel"), ModelKey); 562 } 563 LLVM_DEBUG(ProcModels.back().dump()); 564 } 565 566 // Recursively find all reachable SchedReadWrite records. 567 static void scanSchedRW(Record *RWDef, RecVec &RWDefs, 568 SmallPtrSet<Record*, 16> &RWSet) { 569 if (!RWSet.insert(RWDef).second) 570 return; 571 RWDefs.push_back(RWDef); 572 // Reads don't currently have sequence records, but it can be added later. 573 if (RWDef->isSubClassOf("WriteSequence")) { 574 RecVec Seq = RWDef->getValueAsListOfDefs("Writes"); 575 for (Record *WSRec : Seq) 576 scanSchedRW(WSRec, RWDefs, RWSet); 577 } 578 else if (RWDef->isSubClassOf("SchedVariant")) { 579 // Visit each variant (guarded by a different predicate). 580 RecVec Vars = RWDef->getValueAsListOfDefs("Variants"); 581 for (Record *Variant : Vars) { 582 // Visit each RW in the sequence selected by the current variant. 583 RecVec Selected = Variant->getValueAsListOfDefs("Selected"); 584 for (Record *SelDef : Selected) 585 scanSchedRW(SelDef, RWDefs, RWSet); 586 } 587 } 588 } 589 590 // Collect and sort all SchedReadWrites reachable via tablegen records. 591 // More may be inferred later when inferring new SchedClasses from variants. 592 void CodeGenSchedModels::collectSchedRW() { 593 // Reserve idx=0 for invalid writes/reads. 594 SchedWrites.resize(1); 595 SchedReads.resize(1); 596 597 SmallPtrSet<Record*, 16> RWSet; 598 599 // Find all SchedReadWrites referenced by instruction defs. 600 RecVec SWDefs, SRDefs; 601 for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) { 602 Record *SchedDef = Inst->TheDef; 603 if (SchedDef->isValueUnset("SchedRW")) 604 continue; 605 RecVec RWs = SchedDef->getValueAsListOfDefs("SchedRW"); 606 for (Record *RW : RWs) { 607 if (RW->isSubClassOf("SchedWrite")) 608 scanSchedRW(RW, SWDefs, RWSet); 609 else { 610 assert(RW->isSubClassOf("SchedRead") && "Unknown SchedReadWrite"); 611 scanSchedRW(RW, SRDefs, RWSet); 612 } 613 } 614 } 615 // Find all ReadWrites referenced by InstRW. 616 RecVec InstRWDefs = Records.getAllDerivedDefinitions("InstRW"); 617 for (Record *InstRWDef : InstRWDefs) { 618 // For all OperandReadWrites. 619 RecVec RWDefs = InstRWDef->getValueAsListOfDefs("OperandReadWrites"); 620 for (Record *RWDef : RWDefs) { 621 if (RWDef->isSubClassOf("SchedWrite")) 622 scanSchedRW(RWDef, SWDefs, RWSet); 623 else { 624 assert(RWDef->isSubClassOf("SchedRead") && "Unknown SchedReadWrite"); 625 scanSchedRW(RWDef, SRDefs, RWSet); 626 } 627 } 628 } 629 // Find all ReadWrites referenced by ItinRW. 630 RecVec ItinRWDefs = Records.getAllDerivedDefinitions("ItinRW"); 631 for (Record *ItinRWDef : ItinRWDefs) { 632 // For all OperandReadWrites. 633 RecVec RWDefs = ItinRWDef->getValueAsListOfDefs("OperandReadWrites"); 634 for (Record *RWDef : RWDefs) { 635 if (RWDef->isSubClassOf("SchedWrite")) 636 scanSchedRW(RWDef, SWDefs, RWSet); 637 else { 638 assert(RWDef->isSubClassOf("SchedRead") && "Unknown SchedReadWrite"); 639 scanSchedRW(RWDef, SRDefs, RWSet); 640 } 641 } 642 } 643 // Find all ReadWrites referenced by SchedAlias. AliasDefs needs to be sorted 644 // for the loop below that initializes Alias vectors. 645 RecVec AliasDefs = Records.getAllDerivedDefinitions("SchedAlias"); 646 llvm::sort(AliasDefs, LessRecord()); 647 for (Record *ADef : AliasDefs) { 648 Record *MatchDef = ADef->getValueAsDef("MatchRW"); 649 Record *AliasDef = ADef->getValueAsDef("AliasRW"); 650 if (MatchDef->isSubClassOf("SchedWrite")) { 651 if (!AliasDef->isSubClassOf("SchedWrite")) 652 PrintFatalError(ADef->getLoc(), "SchedWrite Alias must be SchedWrite"); 653 scanSchedRW(AliasDef, SWDefs, RWSet); 654 } 655 else { 656 assert(MatchDef->isSubClassOf("SchedRead") && "Unknown SchedReadWrite"); 657 if (!AliasDef->isSubClassOf("SchedRead")) 658 PrintFatalError(ADef->getLoc(), "SchedRead Alias must be SchedRead"); 659 scanSchedRW(AliasDef, SRDefs, RWSet); 660 } 661 } 662 // Sort and add the SchedReadWrites directly referenced by instructions or 663 // itinerary resources. Index reads and writes in separate domains. 664 llvm::sort(SWDefs, LessRecord()); 665 for (Record *SWDef : SWDefs) { 666 assert(!getSchedRWIdx(SWDef, /*IsRead=*/false) && "duplicate SchedWrite"); 667 SchedWrites.emplace_back(SchedWrites.size(), SWDef); 668 } 669 llvm::sort(SRDefs, LessRecord()); 670 for (Record *SRDef : SRDefs) { 671 assert(!getSchedRWIdx(SRDef, /*IsRead-*/true) && "duplicate SchedWrite"); 672 SchedReads.emplace_back(SchedReads.size(), SRDef); 673 } 674 // Initialize WriteSequence vectors. 675 for (CodeGenSchedRW &CGRW : SchedWrites) { 676 if (!CGRW.IsSequence) 677 continue; 678 findRWs(CGRW.TheDef->getValueAsListOfDefs("Writes"), CGRW.Sequence, 679 /*IsRead=*/false); 680 } 681 // Initialize Aliases vectors. 682 for (Record *ADef : AliasDefs) { 683 Record *AliasDef = ADef->getValueAsDef("AliasRW"); 684 getSchedRW(AliasDef).IsAlias = true; 685 Record *MatchDef = ADef->getValueAsDef("MatchRW"); 686 CodeGenSchedRW &RW = getSchedRW(MatchDef); 687 if (RW.IsAlias) 688 PrintFatalError(ADef->getLoc(), "Cannot Alias an Alias"); 689 RW.Aliases.push_back(ADef); 690 } 691 LLVM_DEBUG( 692 dbgs() << "\n+++ SCHED READS and WRITES (collectSchedRW) +++\n"; 693 for (unsigned WIdx = 0, WEnd = SchedWrites.size(); WIdx != WEnd; ++WIdx) { 694 dbgs() << WIdx << ": "; 695 SchedWrites[WIdx].dump(); 696 dbgs() << '\n'; 697 } for (unsigned RIdx = 0, REnd = SchedReads.size(); RIdx != REnd; 698 ++RIdx) { 699 dbgs() << RIdx << ": "; 700 SchedReads[RIdx].dump(); 701 dbgs() << '\n'; 702 } RecVec RWDefs = Records.getAllDerivedDefinitions("SchedReadWrite"); 703 for (Record *RWDef 704 : RWDefs) { 705 if (!getSchedRWIdx(RWDef, RWDef->isSubClassOf("SchedRead"))) { 706 StringRef Name = RWDef->getName(); 707 if (Name != "NoWrite" && Name != "ReadDefault") 708 dbgs() << "Unused SchedReadWrite " << Name << '\n'; 709 } 710 }); 711 } 712 713 /// Compute a SchedWrite name from a sequence of writes. 714 std::string CodeGenSchedModels::genRWName(ArrayRef<unsigned> Seq, bool IsRead) { 715 std::string Name("("); 716 for (auto I = Seq.begin(), E = Seq.end(); I != E; ++I) { 717 if (I != Seq.begin()) 718 Name += '_'; 719 Name += getSchedRW(*I, IsRead).Name; 720 } 721 Name += ')'; 722 return Name; 723 } 724 725 unsigned CodeGenSchedModels::getSchedRWIdx(const Record *Def, 726 bool IsRead) const { 727 const std::vector<CodeGenSchedRW> &RWVec = IsRead ? SchedReads : SchedWrites; 728 const auto I = find_if( 729 RWVec, [Def](const CodeGenSchedRW &RW) { return RW.TheDef == Def; }); 730 return I == RWVec.end() ? 0 : std::distance(RWVec.begin(), I); 731 } 732 733 bool CodeGenSchedModels::hasReadOfWrite(Record *WriteDef) const { 734 for (const CodeGenSchedRW &Read : SchedReads) { 735 Record *ReadDef = Read.TheDef; 736 if (!ReadDef || !ReadDef->isSubClassOf("ProcReadAdvance")) 737 continue; 738 739 RecVec ValidWrites = ReadDef->getValueAsListOfDefs("ValidWrites"); 740 if (is_contained(ValidWrites, WriteDef)) { 741 return true; 742 } 743 } 744 return false; 745 } 746 747 static void splitSchedReadWrites(const RecVec &RWDefs, 748 RecVec &WriteDefs, RecVec &ReadDefs) { 749 for (Record *RWDef : RWDefs) { 750 if (RWDef->isSubClassOf("SchedWrite")) 751 WriteDefs.push_back(RWDef); 752 else { 753 assert(RWDef->isSubClassOf("SchedRead") && "unknown SchedReadWrite"); 754 ReadDefs.push_back(RWDef); 755 } 756 } 757 } 758 759 // Split the SchedReadWrites defs and call findRWs for each list. 760 void CodeGenSchedModels::findRWs(const RecVec &RWDefs, 761 IdxVec &Writes, IdxVec &Reads) const { 762 RecVec WriteDefs; 763 RecVec ReadDefs; 764 splitSchedReadWrites(RWDefs, WriteDefs, ReadDefs); 765 findRWs(WriteDefs, Writes, false); 766 findRWs(ReadDefs, Reads, true); 767 } 768 769 // Call getSchedRWIdx for all elements in a sequence of SchedRW defs. 770 void CodeGenSchedModels::findRWs(const RecVec &RWDefs, IdxVec &RWs, 771 bool IsRead) const { 772 for (Record *RWDef : RWDefs) { 773 unsigned Idx = getSchedRWIdx(RWDef, IsRead); 774 assert(Idx && "failed to collect SchedReadWrite"); 775 RWs.push_back(Idx); 776 } 777 } 778 779 void CodeGenSchedModels::expandRWSequence(unsigned RWIdx, IdxVec &RWSeq, 780 bool IsRead) const { 781 const CodeGenSchedRW &SchedRW = getSchedRW(RWIdx, IsRead); 782 if (!SchedRW.IsSequence) { 783 RWSeq.push_back(RWIdx); 784 return; 785 } 786 int Repeat = 787 SchedRW.TheDef ? SchedRW.TheDef->getValueAsInt("Repeat") : 1; 788 for (int i = 0; i < Repeat; ++i) { 789 for (unsigned I : SchedRW.Sequence) { 790 expandRWSequence(I, RWSeq, IsRead); 791 } 792 } 793 } 794 795 // Expand a SchedWrite as a sequence following any aliases that coincide with 796 // the given processor model. 797 void CodeGenSchedModels::expandRWSeqForProc( 798 unsigned RWIdx, IdxVec &RWSeq, bool IsRead, 799 const CodeGenProcModel &ProcModel) const { 800 801 const CodeGenSchedRW &SchedWrite = getSchedRW(RWIdx, IsRead); 802 Record *AliasDef = nullptr; 803 for (const Record *Rec : SchedWrite.Aliases) { 804 const CodeGenSchedRW &AliasRW = getSchedRW(Rec->getValueAsDef("AliasRW")); 805 if (Rec->getValueInit("SchedModel")->isComplete()) { 806 Record *ModelDef = Rec->getValueAsDef("SchedModel"); 807 if (&getProcModel(ModelDef) != &ProcModel) 808 continue; 809 } 810 if (AliasDef) 811 PrintFatalError(AliasRW.TheDef->getLoc(), "Multiple aliases " 812 "defined for processor " + ProcModel.ModelName + 813 " Ensure only one SchedAlias exists per RW."); 814 AliasDef = AliasRW.TheDef; 815 } 816 if (AliasDef) { 817 expandRWSeqForProc(getSchedRWIdx(AliasDef, IsRead), 818 RWSeq, IsRead,ProcModel); 819 return; 820 } 821 if (!SchedWrite.IsSequence) { 822 RWSeq.push_back(RWIdx); 823 return; 824 } 825 int Repeat = 826 SchedWrite.TheDef ? SchedWrite.TheDef->getValueAsInt("Repeat") : 1; 827 for (int I = 0, E = Repeat; I < E; ++I) { 828 for (unsigned Idx : SchedWrite.Sequence) { 829 expandRWSeqForProc(Idx, RWSeq, IsRead, ProcModel); 830 } 831 } 832 } 833 834 // Find the existing SchedWrite that models this sequence of writes. 835 unsigned CodeGenSchedModels::findRWForSequence(ArrayRef<unsigned> Seq, 836 bool IsRead) { 837 std::vector<CodeGenSchedRW> &RWVec = IsRead ? SchedReads : SchedWrites; 838 839 auto I = find_if(RWVec, [Seq](CodeGenSchedRW &RW) { 840 return makeArrayRef(RW.Sequence) == Seq; 841 }); 842 // Index zero reserved for invalid RW. 843 return I == RWVec.end() ? 0 : std::distance(RWVec.begin(), I); 844 } 845 846 /// Add this ReadWrite if it doesn't already exist. 847 unsigned CodeGenSchedModels::findOrInsertRW(ArrayRef<unsigned> Seq, 848 bool IsRead) { 849 assert(!Seq.empty() && "cannot insert empty sequence"); 850 if (Seq.size() == 1) 851 return Seq.back(); 852 853 unsigned Idx = findRWForSequence(Seq, IsRead); 854 if (Idx) 855 return Idx; 856 857 std::vector<CodeGenSchedRW> &RWVec = IsRead ? SchedReads : SchedWrites; 858 unsigned RWIdx = RWVec.size(); 859 CodeGenSchedRW SchedRW(RWIdx, IsRead, Seq, genRWName(Seq, IsRead)); 860 RWVec.push_back(SchedRW); 861 return RWIdx; 862 } 863 864 /// Visit all the instruction definitions for this target to gather and 865 /// enumerate the itinerary classes. These are the explicitly specified 866 /// SchedClasses. More SchedClasses may be inferred. 867 void CodeGenSchedModels::collectSchedClasses() { 868 869 // NoItinerary is always the first class at Idx=0 870 assert(SchedClasses.empty() && "Expected empty sched class"); 871 SchedClasses.emplace_back(0, "NoInstrModel", 872 Records.getDef("NoItinerary")); 873 SchedClasses.back().ProcIndices.push_back(0); 874 875 // Create a SchedClass for each unique combination of itinerary class and 876 // SchedRW list. 877 for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) { 878 Record *ItinDef = Inst->TheDef->getValueAsDef("Itinerary"); 879 IdxVec Writes, Reads; 880 if (!Inst->TheDef->isValueUnset("SchedRW")) 881 findRWs(Inst->TheDef->getValueAsListOfDefs("SchedRW"), Writes, Reads); 882 883 // ProcIdx == 0 indicates the class applies to all processors. 884 unsigned SCIdx = addSchedClass(ItinDef, Writes, Reads, /*ProcIndices*/{0}); 885 InstrClassMap[Inst->TheDef] = SCIdx; 886 } 887 // Create classes for InstRW defs. 888 RecVec InstRWDefs = Records.getAllDerivedDefinitions("InstRW"); 889 llvm::sort(InstRWDefs, LessRecord()); 890 LLVM_DEBUG(dbgs() << "\n+++ SCHED CLASSES (createInstRWClass) +++\n"); 891 for (Record *RWDef : InstRWDefs) 892 createInstRWClass(RWDef); 893 894 NumInstrSchedClasses = SchedClasses.size(); 895 896 bool EnableDump = false; 897 LLVM_DEBUG(EnableDump = true); 898 if (!EnableDump) 899 return; 900 901 LLVM_DEBUG( 902 dbgs() 903 << "\n+++ ITINERARIES and/or MACHINE MODELS (collectSchedClasses) +++\n"); 904 for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) { 905 StringRef InstName = Inst->TheDef->getName(); 906 unsigned SCIdx = getSchedClassIdx(*Inst); 907 if (!SCIdx) { 908 LLVM_DEBUG({ 909 if (!Inst->hasNoSchedulingInfo) 910 dbgs() << "No machine model for " << Inst->TheDef->getName() << '\n'; 911 }); 912 continue; 913 } 914 CodeGenSchedClass &SC = getSchedClass(SCIdx); 915 if (SC.ProcIndices[0] != 0) 916 PrintFatalError(Inst->TheDef->getLoc(), "Instruction's sched class " 917 "must not be subtarget specific."); 918 919 IdxVec ProcIndices; 920 if (SC.ItinClassDef->getName() != "NoItinerary") { 921 ProcIndices.push_back(0); 922 dbgs() << "Itinerary for " << InstName << ": " 923 << SC.ItinClassDef->getName() << '\n'; 924 } 925 if (!SC.Writes.empty()) { 926 ProcIndices.push_back(0); 927 LLVM_DEBUG({ 928 dbgs() << "SchedRW machine model for " << InstName; 929 for (IdxIter WI = SC.Writes.begin(), WE = SC.Writes.end(); WI != WE; 930 ++WI) 931 dbgs() << " " << SchedWrites[*WI].Name; 932 for (IdxIter RI = SC.Reads.begin(), RE = SC.Reads.end(); RI != RE; ++RI) 933 dbgs() << " " << SchedReads[*RI].Name; 934 dbgs() << '\n'; 935 }); 936 } 937 const RecVec &RWDefs = SchedClasses[SCIdx].InstRWs; 938 for (Record *RWDef : RWDefs) { 939 const CodeGenProcModel &ProcModel = 940 getProcModel(RWDef->getValueAsDef("SchedModel")); 941 ProcIndices.push_back(ProcModel.Index); 942 LLVM_DEBUG(dbgs() << "InstRW on " << ProcModel.ModelName << " for " 943 << InstName); 944 IdxVec Writes; 945 IdxVec Reads; 946 findRWs(RWDef->getValueAsListOfDefs("OperandReadWrites"), 947 Writes, Reads); 948 LLVM_DEBUG({ 949 for (unsigned WIdx : Writes) 950 dbgs() << " " << SchedWrites[WIdx].Name; 951 for (unsigned RIdx : Reads) 952 dbgs() << " " << SchedReads[RIdx].Name; 953 dbgs() << '\n'; 954 }); 955 } 956 // If ProcIndices contains zero, the class applies to all processors. 957 LLVM_DEBUG({ 958 if (!std::count(ProcIndices.begin(), ProcIndices.end(), 0)) { 959 for (const CodeGenProcModel &PM : ProcModels) { 960 if (!std::count(ProcIndices.begin(), ProcIndices.end(), PM.Index)) 961 dbgs() << "No machine model for " << Inst->TheDef->getName() 962 << " on processor " << PM.ModelName << '\n'; 963 } 964 } 965 }); 966 } 967 } 968 969 // Get the SchedClass index for an instruction. 970 unsigned 971 CodeGenSchedModels::getSchedClassIdx(const CodeGenInstruction &Inst) const { 972 return InstrClassMap.lookup(Inst.TheDef); 973 } 974 975 std::string 976 CodeGenSchedModels::createSchedClassName(Record *ItinClassDef, 977 ArrayRef<unsigned> OperWrites, 978 ArrayRef<unsigned> OperReads) { 979 980 std::string Name; 981 if (ItinClassDef && ItinClassDef->getName() != "NoItinerary") 982 Name = ItinClassDef->getName(); 983 for (unsigned Idx : OperWrites) { 984 if (!Name.empty()) 985 Name += '_'; 986 Name += SchedWrites[Idx].Name; 987 } 988 for (unsigned Idx : OperReads) { 989 Name += '_'; 990 Name += SchedReads[Idx].Name; 991 } 992 return Name; 993 } 994 995 std::string CodeGenSchedModels::createSchedClassName(const RecVec &InstDefs) { 996 997 std::string Name; 998 for (RecIter I = InstDefs.begin(), E = InstDefs.end(); I != E; ++I) { 999 if (I != InstDefs.begin()) 1000 Name += '_'; 1001 Name += (*I)->getName(); 1002 } 1003 return Name; 1004 } 1005 1006 /// Add an inferred sched class from an itinerary class and per-operand list of 1007 /// SchedWrites and SchedReads. ProcIndices contains the set of IDs of 1008 /// processors that may utilize this class. 1009 unsigned CodeGenSchedModels::addSchedClass(Record *ItinClassDef, 1010 ArrayRef<unsigned> OperWrites, 1011 ArrayRef<unsigned> OperReads, 1012 ArrayRef<unsigned> ProcIndices) { 1013 assert(!ProcIndices.empty() && "expect at least one ProcIdx"); 1014 1015 auto IsKeyEqual = [=](const CodeGenSchedClass &SC) { 1016 return SC.isKeyEqual(ItinClassDef, OperWrites, OperReads); 1017 }; 1018 1019 auto I = find_if(make_range(schedClassBegin(), schedClassEnd()), IsKeyEqual); 1020 unsigned Idx = I == schedClassEnd() ? 0 : std::distance(schedClassBegin(), I); 1021 if (Idx || SchedClasses[0].isKeyEqual(ItinClassDef, OperWrites, OperReads)) { 1022 IdxVec PI; 1023 std::set_union(SchedClasses[Idx].ProcIndices.begin(), 1024 SchedClasses[Idx].ProcIndices.end(), 1025 ProcIndices.begin(), ProcIndices.end(), 1026 std::back_inserter(PI)); 1027 SchedClasses[Idx].ProcIndices = std::move(PI); 1028 return Idx; 1029 } 1030 Idx = SchedClasses.size(); 1031 SchedClasses.emplace_back(Idx, 1032 createSchedClassName(ItinClassDef, OperWrites, 1033 OperReads), 1034 ItinClassDef); 1035 CodeGenSchedClass &SC = SchedClasses.back(); 1036 SC.Writes = OperWrites; 1037 SC.Reads = OperReads; 1038 SC.ProcIndices = ProcIndices; 1039 1040 return Idx; 1041 } 1042 1043 // Create classes for each set of opcodes that are in the same InstReadWrite 1044 // definition across all processors. 1045 void CodeGenSchedModels::createInstRWClass(Record *InstRWDef) { 1046 // ClassInstrs will hold an entry for each subset of Instrs in InstRWDef that 1047 // intersects with an existing class via a previous InstRWDef. Instrs that do 1048 // not intersect with an existing class refer back to their former class as 1049 // determined from ItinDef or SchedRW. 1050 SmallMapVector<unsigned, SmallVector<Record *, 8>, 4> ClassInstrs; 1051 // Sort Instrs into sets. 1052 const RecVec *InstDefs = Sets.expand(InstRWDef); 1053 if (InstDefs->empty()) 1054 PrintFatalError(InstRWDef->getLoc(), "No matching instruction opcodes"); 1055 1056 for (Record *InstDef : *InstDefs) { 1057 InstClassMapTy::const_iterator Pos = InstrClassMap.find(InstDef); 1058 if (Pos == InstrClassMap.end()) 1059 PrintFatalError(InstDef->getLoc(), "No sched class for instruction."); 1060 unsigned SCIdx = Pos->second; 1061 ClassInstrs[SCIdx].push_back(InstDef); 1062 } 1063 // For each set of Instrs, create a new class if necessary, and map or remap 1064 // the Instrs to it. 1065 for (auto &Entry : ClassInstrs) { 1066 unsigned OldSCIdx = Entry.first; 1067 ArrayRef<Record*> InstDefs = Entry.second; 1068 // If the all instrs in the current class are accounted for, then leave 1069 // them mapped to their old class. 1070 if (OldSCIdx) { 1071 const RecVec &RWDefs = SchedClasses[OldSCIdx].InstRWs; 1072 if (!RWDefs.empty()) { 1073 const RecVec *OrigInstDefs = Sets.expand(RWDefs[0]); 1074 unsigned OrigNumInstrs = 1075 count_if(*OrigInstDefs, [&](Record *OIDef) { 1076 return InstrClassMap[OIDef] == OldSCIdx; 1077 }); 1078 if (OrigNumInstrs == InstDefs.size()) { 1079 assert(SchedClasses[OldSCIdx].ProcIndices[0] == 0 && 1080 "expected a generic SchedClass"); 1081 Record *RWModelDef = InstRWDef->getValueAsDef("SchedModel"); 1082 // Make sure we didn't already have a InstRW containing this 1083 // instruction on this model. 1084 for (Record *RWD : RWDefs) { 1085 if (RWD->getValueAsDef("SchedModel") == RWModelDef && 1086 RWModelDef->getValueAsBit("FullInstRWOverlapCheck")) { 1087 for (Record *Inst : InstDefs) { 1088 PrintFatalError(InstRWDef->getLoc(), "Overlapping InstRW def " + 1089 Inst->getName() + " also matches " + 1090 RWD->getValue("Instrs")->getValue()->getAsString()); 1091 } 1092 } 1093 } 1094 LLVM_DEBUG(dbgs() << "InstRW: Reuse SC " << OldSCIdx << ":" 1095 << SchedClasses[OldSCIdx].Name << " on " 1096 << RWModelDef->getName() << "\n"); 1097 SchedClasses[OldSCIdx].InstRWs.push_back(InstRWDef); 1098 continue; 1099 } 1100 } 1101 } 1102 unsigned SCIdx = SchedClasses.size(); 1103 SchedClasses.emplace_back(SCIdx, createSchedClassName(InstDefs), nullptr); 1104 CodeGenSchedClass &SC = SchedClasses.back(); 1105 LLVM_DEBUG(dbgs() << "InstRW: New SC " << SCIdx << ":" << SC.Name << " on " 1106 << InstRWDef->getValueAsDef("SchedModel")->getName() 1107 << "\n"); 1108 1109 // Preserve ItinDef and Writes/Reads for processors without an InstRW entry. 1110 SC.ItinClassDef = SchedClasses[OldSCIdx].ItinClassDef; 1111 SC.Writes = SchedClasses[OldSCIdx].Writes; 1112 SC.Reads = SchedClasses[OldSCIdx].Reads; 1113 SC.ProcIndices.push_back(0); 1114 // If we had an old class, copy it's InstRWs to this new class. 1115 if (OldSCIdx) { 1116 Record *RWModelDef = InstRWDef->getValueAsDef("SchedModel"); 1117 for (Record *OldRWDef : SchedClasses[OldSCIdx].InstRWs) { 1118 if (OldRWDef->getValueAsDef("SchedModel") == RWModelDef) { 1119 for (Record *InstDef : InstDefs) { 1120 PrintFatalError(OldRWDef->getLoc(), "Overlapping InstRW def " + 1121 InstDef->getName() + " also matches " + 1122 OldRWDef->getValue("Instrs")->getValue()->getAsString()); 1123 } 1124 } 1125 assert(OldRWDef != InstRWDef && 1126 "SchedClass has duplicate InstRW def"); 1127 SC.InstRWs.push_back(OldRWDef); 1128 } 1129 } 1130 // Map each Instr to this new class. 1131 for (Record *InstDef : InstDefs) 1132 InstrClassMap[InstDef] = SCIdx; 1133 SC.InstRWs.push_back(InstRWDef); 1134 } 1135 } 1136 1137 // True if collectProcItins found anything. 1138 bool CodeGenSchedModels::hasItineraries() const { 1139 for (const CodeGenProcModel &PM : make_range(procModelBegin(),procModelEnd())) 1140 if (PM.hasItineraries()) 1141 return true; 1142 return false; 1143 } 1144 1145 // Gather the processor itineraries. 1146 void CodeGenSchedModels::collectProcItins() { 1147 LLVM_DEBUG(dbgs() << "\n+++ PROBLEM ITINERARIES (collectProcItins) +++\n"); 1148 for (CodeGenProcModel &ProcModel : ProcModels) { 1149 if (!ProcModel.hasItineraries()) 1150 continue; 1151 1152 RecVec ItinRecords = ProcModel.ItinsDef->getValueAsListOfDefs("IID"); 1153 assert(!ItinRecords.empty() && "ProcModel.hasItineraries is incorrect"); 1154 1155 // Populate ItinDefList with Itinerary records. 1156 ProcModel.ItinDefList.resize(NumInstrSchedClasses); 1157 1158 // Insert each itinerary data record in the correct position within 1159 // the processor model's ItinDefList. 1160 for (Record *ItinData : ItinRecords) { 1161 const Record *ItinDef = ItinData->getValueAsDef("TheClass"); 1162 bool FoundClass = false; 1163 1164 for (const CodeGenSchedClass &SC : 1165 make_range(schedClassBegin(), schedClassEnd())) { 1166 // Multiple SchedClasses may share an itinerary. Update all of them. 1167 if (SC.ItinClassDef == ItinDef) { 1168 ProcModel.ItinDefList[SC.Index] = ItinData; 1169 FoundClass = true; 1170 } 1171 } 1172 if (!FoundClass) { 1173 LLVM_DEBUG(dbgs() << ProcModel.ItinsDef->getName() 1174 << " missing class for itinerary " 1175 << ItinDef->getName() << '\n'); 1176 } 1177 } 1178 // Check for missing itinerary entries. 1179 assert(!ProcModel.ItinDefList[0] && "NoItinerary class can't have rec"); 1180 LLVM_DEBUG( 1181 for (unsigned i = 1, N = ProcModel.ItinDefList.size(); i < N; ++i) { 1182 if (!ProcModel.ItinDefList[i]) 1183 dbgs() << ProcModel.ItinsDef->getName() 1184 << " missing itinerary for class " << SchedClasses[i].Name 1185 << '\n'; 1186 }); 1187 } 1188 } 1189 1190 // Gather the read/write types for each itinerary class. 1191 void CodeGenSchedModels::collectProcItinRW() { 1192 RecVec ItinRWDefs = Records.getAllDerivedDefinitions("ItinRW"); 1193 llvm::sort(ItinRWDefs, LessRecord()); 1194 for (Record *RWDef : ItinRWDefs) { 1195 if (!RWDef->getValueInit("SchedModel")->isComplete()) 1196 PrintFatalError(RWDef->getLoc(), "SchedModel is undefined"); 1197 Record *ModelDef = RWDef->getValueAsDef("SchedModel"); 1198 ProcModelMapTy::const_iterator I = ProcModelMap.find(ModelDef); 1199 if (I == ProcModelMap.end()) { 1200 PrintFatalError(RWDef->getLoc(), "Undefined SchedMachineModel " 1201 + ModelDef->getName()); 1202 } 1203 ProcModels[I->second].ItinRWDefs.push_back(RWDef); 1204 } 1205 } 1206 1207 // Gather the unsupported features for processor models. 1208 void CodeGenSchedModels::collectProcUnsupportedFeatures() { 1209 for (CodeGenProcModel &ProcModel : ProcModels) { 1210 for (Record *Pred : ProcModel.ModelDef->getValueAsListOfDefs("UnsupportedFeatures")) { 1211 ProcModel.UnsupportedFeaturesDefs.push_back(Pred); 1212 } 1213 } 1214 } 1215 1216 /// Infer new classes from existing classes. In the process, this may create new 1217 /// SchedWrites from sequences of existing SchedWrites. 1218 void CodeGenSchedModels::inferSchedClasses() { 1219 LLVM_DEBUG( 1220 dbgs() << "\n+++ INFERRING SCHED CLASSES (inferSchedClasses) +++\n"); 1221 LLVM_DEBUG(dbgs() << NumInstrSchedClasses << " instr sched classes.\n"); 1222 1223 // Visit all existing classes and newly created classes. 1224 for (unsigned Idx = 0; Idx != SchedClasses.size(); ++Idx) { 1225 assert(SchedClasses[Idx].Index == Idx && "bad SCIdx"); 1226 1227 if (SchedClasses[Idx].ItinClassDef) 1228 inferFromItinClass(SchedClasses[Idx].ItinClassDef, Idx); 1229 if (!SchedClasses[Idx].InstRWs.empty()) 1230 inferFromInstRWs(Idx); 1231 if (!SchedClasses[Idx].Writes.empty()) { 1232 inferFromRW(SchedClasses[Idx].Writes, SchedClasses[Idx].Reads, 1233 Idx, SchedClasses[Idx].ProcIndices); 1234 } 1235 assert(SchedClasses.size() < (NumInstrSchedClasses*6) && 1236 "too many SchedVariants"); 1237 } 1238 } 1239 1240 /// Infer classes from per-processor itinerary resources. 1241 void CodeGenSchedModels::inferFromItinClass(Record *ItinClassDef, 1242 unsigned FromClassIdx) { 1243 for (unsigned PIdx = 0, PEnd = ProcModels.size(); PIdx != PEnd; ++PIdx) { 1244 const CodeGenProcModel &PM = ProcModels[PIdx]; 1245 // For all ItinRW entries. 1246 bool HasMatch = false; 1247 for (const Record *Rec : PM.ItinRWDefs) { 1248 RecVec Matched = Rec->getValueAsListOfDefs("MatchedItinClasses"); 1249 if (!std::count(Matched.begin(), Matched.end(), ItinClassDef)) 1250 continue; 1251 if (HasMatch) 1252 PrintFatalError(Rec->getLoc(), "Duplicate itinerary class " 1253 + ItinClassDef->getName() 1254 + " in ItinResources for " + PM.ModelName); 1255 HasMatch = true; 1256 IdxVec Writes, Reads; 1257 findRWs(Rec->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads); 1258 inferFromRW(Writes, Reads, FromClassIdx, PIdx); 1259 } 1260 } 1261 } 1262 1263 /// Infer classes from per-processor InstReadWrite definitions. 1264 void CodeGenSchedModels::inferFromInstRWs(unsigned SCIdx) { 1265 for (unsigned I = 0, E = SchedClasses[SCIdx].InstRWs.size(); I != E; ++I) { 1266 assert(SchedClasses[SCIdx].InstRWs.size() == E && "InstrRWs was mutated!"); 1267 Record *Rec = SchedClasses[SCIdx].InstRWs[I]; 1268 const RecVec *InstDefs = Sets.expand(Rec); 1269 RecIter II = InstDefs->begin(), IE = InstDefs->end(); 1270 for (; II != IE; ++II) { 1271 if (InstrClassMap[*II] == SCIdx) 1272 break; 1273 } 1274 // If this class no longer has any instructions mapped to it, it has become 1275 // irrelevant. 1276 if (II == IE) 1277 continue; 1278 IdxVec Writes, Reads; 1279 findRWs(Rec->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads); 1280 unsigned PIdx = getProcModel(Rec->getValueAsDef("SchedModel")).Index; 1281 inferFromRW(Writes, Reads, SCIdx, PIdx); // May mutate SchedClasses. 1282 } 1283 } 1284 1285 namespace { 1286 1287 // Helper for substituteVariantOperand. 1288 struct TransVariant { 1289 Record *VarOrSeqDef; // Variant or sequence. 1290 unsigned RWIdx; // Index of this variant or sequence's matched type. 1291 unsigned ProcIdx; // Processor model index or zero for any. 1292 unsigned TransVecIdx; // Index into PredTransitions::TransVec. 1293 1294 TransVariant(Record *def, unsigned rwi, unsigned pi, unsigned ti): 1295 VarOrSeqDef(def), RWIdx(rwi), ProcIdx(pi), TransVecIdx(ti) {} 1296 }; 1297 1298 // Associate a predicate with the SchedReadWrite that it guards. 1299 // RWIdx is the index of the read/write variant. 1300 struct PredCheck { 1301 bool IsRead; 1302 unsigned RWIdx; 1303 Record *Predicate; 1304 1305 PredCheck(bool r, unsigned w, Record *p): IsRead(r), RWIdx(w), Predicate(p) {} 1306 }; 1307 1308 // A Predicate transition is a list of RW sequences guarded by a PredTerm. 1309 struct PredTransition { 1310 // A predicate term is a conjunction of PredChecks. 1311 SmallVector<PredCheck, 4> PredTerm; 1312 SmallVector<SmallVector<unsigned,4>, 16> WriteSequences; 1313 SmallVector<SmallVector<unsigned,4>, 16> ReadSequences; 1314 SmallVector<unsigned, 4> ProcIndices; 1315 }; 1316 1317 // Encapsulate a set of partially constructed transitions. 1318 // The results are built by repeated calls to substituteVariants. 1319 class PredTransitions { 1320 CodeGenSchedModels &SchedModels; 1321 1322 public: 1323 std::vector<PredTransition> TransVec; 1324 1325 PredTransitions(CodeGenSchedModels &sm): SchedModels(sm) {} 1326 1327 void substituteVariantOperand(const SmallVectorImpl<unsigned> &RWSeq, 1328 bool IsRead, unsigned StartIdx); 1329 1330 void substituteVariants(const PredTransition &Trans); 1331 1332 #ifndef NDEBUG 1333 void dump() const; 1334 #endif 1335 1336 private: 1337 bool mutuallyExclusive(Record *PredDef, ArrayRef<PredCheck> Term); 1338 void getIntersectingVariants( 1339 const CodeGenSchedRW &SchedRW, unsigned TransIdx, 1340 std::vector<TransVariant> &IntersectingVariants); 1341 void pushVariant(const TransVariant &VInfo, bool IsRead); 1342 }; 1343 1344 } // end anonymous namespace 1345 1346 // Return true if this predicate is mutually exclusive with a PredTerm. This 1347 // degenerates into checking if the predicate is mutually exclusive with any 1348 // predicate in the Term's conjunction. 1349 // 1350 // All predicates associated with a given SchedRW are considered mutually 1351 // exclusive. This should work even if the conditions expressed by the 1352 // predicates are not exclusive because the predicates for a given SchedWrite 1353 // are always checked in the order they are defined in the .td file. Later 1354 // conditions implicitly negate any prior condition. 1355 bool PredTransitions::mutuallyExclusive(Record *PredDef, 1356 ArrayRef<PredCheck> Term) { 1357 for (const PredCheck &PC: Term) { 1358 if (PC.Predicate == PredDef) 1359 return false; 1360 1361 const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(PC.RWIdx, PC.IsRead); 1362 assert(SchedRW.HasVariants && "PredCheck must refer to a SchedVariant"); 1363 RecVec Variants = SchedRW.TheDef->getValueAsListOfDefs("Variants"); 1364 if (any_of(Variants, [PredDef](const Record *R) { 1365 return R->getValueAsDef("Predicate") == PredDef; 1366 })) 1367 return true; 1368 } 1369 return false; 1370 } 1371 1372 static bool hasAliasedVariants(const CodeGenSchedRW &RW, 1373 CodeGenSchedModels &SchedModels) { 1374 if (RW.HasVariants) 1375 return true; 1376 1377 for (Record *Alias : RW.Aliases) { 1378 const CodeGenSchedRW &AliasRW = 1379 SchedModels.getSchedRW(Alias->getValueAsDef("AliasRW")); 1380 if (AliasRW.HasVariants) 1381 return true; 1382 if (AliasRW.IsSequence) { 1383 IdxVec ExpandedRWs; 1384 SchedModels.expandRWSequence(AliasRW.Index, ExpandedRWs, AliasRW.IsRead); 1385 for (unsigned SI : ExpandedRWs) { 1386 if (hasAliasedVariants(SchedModels.getSchedRW(SI, AliasRW.IsRead), 1387 SchedModels)) 1388 return true; 1389 } 1390 } 1391 } 1392 return false; 1393 } 1394 1395 static bool hasVariant(ArrayRef<PredTransition> Transitions, 1396 CodeGenSchedModels &SchedModels) { 1397 for (const PredTransition &PTI : Transitions) { 1398 for (const SmallVectorImpl<unsigned> &WSI : PTI.WriteSequences) 1399 for (unsigned WI : WSI) 1400 if (hasAliasedVariants(SchedModels.getSchedWrite(WI), SchedModels)) 1401 return true; 1402 1403 for (const SmallVectorImpl<unsigned> &RSI : PTI.ReadSequences) 1404 for (unsigned RI : RSI) 1405 if (hasAliasedVariants(SchedModels.getSchedRead(RI), SchedModels)) 1406 return true; 1407 } 1408 return false; 1409 } 1410 1411 // Populate IntersectingVariants with any variants or aliased sequences of the 1412 // given SchedRW whose processor indices and predicates are not mutually 1413 // exclusive with the given transition. 1414 void PredTransitions::getIntersectingVariants( 1415 const CodeGenSchedRW &SchedRW, unsigned TransIdx, 1416 std::vector<TransVariant> &IntersectingVariants) { 1417 1418 bool GenericRW = false; 1419 1420 std::vector<TransVariant> Variants; 1421 if (SchedRW.HasVariants) { 1422 unsigned VarProcIdx = 0; 1423 if (SchedRW.TheDef->getValueInit("SchedModel")->isComplete()) { 1424 Record *ModelDef = SchedRW.TheDef->getValueAsDef("SchedModel"); 1425 VarProcIdx = SchedModels.getProcModel(ModelDef).Index; 1426 } 1427 // Push each variant. Assign TransVecIdx later. 1428 const RecVec VarDefs = SchedRW.TheDef->getValueAsListOfDefs("Variants"); 1429 for (Record *VarDef : VarDefs) 1430 Variants.emplace_back(VarDef, SchedRW.Index, VarProcIdx, 0); 1431 if (VarProcIdx == 0) 1432 GenericRW = true; 1433 } 1434 for (RecIter AI = SchedRW.Aliases.begin(), AE = SchedRW.Aliases.end(); 1435 AI != AE; ++AI) { 1436 // If either the SchedAlias itself or the SchedReadWrite that it aliases 1437 // to is defined within a processor model, constrain all variants to 1438 // that processor. 1439 unsigned AliasProcIdx = 0; 1440 if ((*AI)->getValueInit("SchedModel")->isComplete()) { 1441 Record *ModelDef = (*AI)->getValueAsDef("SchedModel"); 1442 AliasProcIdx = SchedModels.getProcModel(ModelDef).Index; 1443 } 1444 const CodeGenSchedRW &AliasRW = 1445 SchedModels.getSchedRW((*AI)->getValueAsDef("AliasRW")); 1446 1447 if (AliasRW.HasVariants) { 1448 const RecVec VarDefs = AliasRW.TheDef->getValueAsListOfDefs("Variants"); 1449 for (Record *VD : VarDefs) 1450 Variants.emplace_back(VD, AliasRW.Index, AliasProcIdx, 0); 1451 } 1452 if (AliasRW.IsSequence) 1453 Variants.emplace_back(AliasRW.TheDef, SchedRW.Index, AliasProcIdx, 0); 1454 if (AliasProcIdx == 0) 1455 GenericRW = true; 1456 } 1457 for (TransVariant &Variant : Variants) { 1458 // Don't expand variants if the processor models don't intersect. 1459 // A zero processor index means any processor. 1460 SmallVectorImpl<unsigned> &ProcIndices = TransVec[TransIdx].ProcIndices; 1461 if (ProcIndices[0] && Variant.ProcIdx) { 1462 unsigned Cnt = std::count(ProcIndices.begin(), ProcIndices.end(), 1463 Variant.ProcIdx); 1464 if (!Cnt) 1465 continue; 1466 if (Cnt > 1) { 1467 const CodeGenProcModel &PM = 1468 *(SchedModels.procModelBegin() + Variant.ProcIdx); 1469 PrintFatalError(Variant.VarOrSeqDef->getLoc(), 1470 "Multiple variants defined for processor " + 1471 PM.ModelName + 1472 " Ensure only one SchedAlias exists per RW."); 1473 } 1474 } 1475 if (Variant.VarOrSeqDef->isSubClassOf("SchedVar")) { 1476 Record *PredDef = Variant.VarOrSeqDef->getValueAsDef("Predicate"); 1477 if (mutuallyExclusive(PredDef, TransVec[TransIdx].PredTerm)) 1478 continue; 1479 } 1480 if (IntersectingVariants.empty()) { 1481 // The first variant builds on the existing transition. 1482 Variant.TransVecIdx = TransIdx; 1483 IntersectingVariants.push_back(Variant); 1484 } 1485 else { 1486 // Push another copy of the current transition for more variants. 1487 Variant.TransVecIdx = TransVec.size(); 1488 IntersectingVariants.push_back(Variant); 1489 TransVec.push_back(TransVec[TransIdx]); 1490 } 1491 } 1492 if (GenericRW && IntersectingVariants.empty()) { 1493 PrintFatalError(SchedRW.TheDef->getLoc(), "No variant of this type has " 1494 "a matching predicate on any processor"); 1495 } 1496 } 1497 1498 // Push the Reads/Writes selected by this variant onto the PredTransition 1499 // specified by VInfo. 1500 void PredTransitions:: 1501 pushVariant(const TransVariant &VInfo, bool IsRead) { 1502 PredTransition &Trans = TransVec[VInfo.TransVecIdx]; 1503 1504 // If this operand transition is reached through a processor-specific alias, 1505 // then the whole transition is specific to this processor. 1506 if (VInfo.ProcIdx != 0) 1507 Trans.ProcIndices.assign(1, VInfo.ProcIdx); 1508 1509 IdxVec SelectedRWs; 1510 if (VInfo.VarOrSeqDef->isSubClassOf("SchedVar")) { 1511 Record *PredDef = VInfo.VarOrSeqDef->getValueAsDef("Predicate"); 1512 Trans.PredTerm.emplace_back(IsRead, VInfo.RWIdx,PredDef); 1513 RecVec SelectedDefs = VInfo.VarOrSeqDef->getValueAsListOfDefs("Selected"); 1514 SchedModels.findRWs(SelectedDefs, SelectedRWs, IsRead); 1515 } 1516 else { 1517 assert(VInfo.VarOrSeqDef->isSubClassOf("WriteSequence") && 1518 "variant must be a SchedVariant or aliased WriteSequence"); 1519 SelectedRWs.push_back(SchedModels.getSchedRWIdx(VInfo.VarOrSeqDef, IsRead)); 1520 } 1521 1522 const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(VInfo.RWIdx, IsRead); 1523 1524 SmallVectorImpl<SmallVector<unsigned,4>> &RWSequences = IsRead 1525 ? Trans.ReadSequences : Trans.WriteSequences; 1526 if (SchedRW.IsVariadic) { 1527 unsigned OperIdx = RWSequences.size()-1; 1528 // Make N-1 copies of this transition's last sequence. 1529 RWSequences.insert(RWSequences.end(), SelectedRWs.size() - 1, 1530 RWSequences[OperIdx]); 1531 // Push each of the N elements of the SelectedRWs onto a copy of the last 1532 // sequence (split the current operand into N operands). 1533 // Note that write sequences should be expanded within this loop--the entire 1534 // sequence belongs to a single operand. 1535 for (IdxIter RWI = SelectedRWs.begin(), RWE = SelectedRWs.end(); 1536 RWI != RWE; ++RWI, ++OperIdx) { 1537 IdxVec ExpandedRWs; 1538 if (IsRead) 1539 ExpandedRWs.push_back(*RWI); 1540 else 1541 SchedModels.expandRWSequence(*RWI, ExpandedRWs, IsRead); 1542 RWSequences[OperIdx].insert(RWSequences[OperIdx].end(), 1543 ExpandedRWs.begin(), ExpandedRWs.end()); 1544 } 1545 assert(OperIdx == RWSequences.size() && "missed a sequence"); 1546 } 1547 else { 1548 // Push this transition's expanded sequence onto this transition's last 1549 // sequence (add to the current operand's sequence). 1550 SmallVectorImpl<unsigned> &Seq = RWSequences.back(); 1551 IdxVec ExpandedRWs; 1552 for (IdxIter RWI = SelectedRWs.begin(), RWE = SelectedRWs.end(); 1553 RWI != RWE; ++RWI) { 1554 if (IsRead) 1555 ExpandedRWs.push_back(*RWI); 1556 else 1557 SchedModels.expandRWSequence(*RWI, ExpandedRWs, IsRead); 1558 } 1559 Seq.insert(Seq.end(), ExpandedRWs.begin(), ExpandedRWs.end()); 1560 } 1561 } 1562 1563 // RWSeq is a sequence of all Reads or all Writes for the next read or write 1564 // operand. StartIdx is an index into TransVec where partial results 1565 // starts. RWSeq must be applied to all transitions between StartIdx and the end 1566 // of TransVec. 1567 void PredTransitions::substituteVariantOperand( 1568 const SmallVectorImpl<unsigned> &RWSeq, bool IsRead, unsigned StartIdx) { 1569 1570 // Visit each original RW within the current sequence. 1571 for (SmallVectorImpl<unsigned>::const_iterator 1572 RWI = RWSeq.begin(), RWE = RWSeq.end(); RWI != RWE; ++RWI) { 1573 const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(*RWI, IsRead); 1574 // Push this RW on all partial PredTransitions or distribute variants. 1575 // New PredTransitions may be pushed within this loop which should not be 1576 // revisited (TransEnd must be loop invariant). 1577 for (unsigned TransIdx = StartIdx, TransEnd = TransVec.size(); 1578 TransIdx != TransEnd; ++TransIdx) { 1579 // In the common case, push RW onto the current operand's sequence. 1580 if (!hasAliasedVariants(SchedRW, SchedModels)) { 1581 if (IsRead) 1582 TransVec[TransIdx].ReadSequences.back().push_back(*RWI); 1583 else 1584 TransVec[TransIdx].WriteSequences.back().push_back(*RWI); 1585 continue; 1586 } 1587 // Distribute this partial PredTransition across intersecting variants. 1588 // This will push a copies of TransVec[TransIdx] on the back of TransVec. 1589 std::vector<TransVariant> IntersectingVariants; 1590 getIntersectingVariants(SchedRW, TransIdx, IntersectingVariants); 1591 // Now expand each variant on top of its copy of the transition. 1592 for (std::vector<TransVariant>::const_iterator 1593 IVI = IntersectingVariants.begin(), 1594 IVE = IntersectingVariants.end(); 1595 IVI != IVE; ++IVI) { 1596 pushVariant(*IVI, IsRead); 1597 } 1598 } 1599 } 1600 } 1601 1602 // For each variant of a Read/Write in Trans, substitute the sequence of 1603 // Read/Writes guarded by the variant. This is exponential in the number of 1604 // variant Read/Writes, but in practice detection of mutually exclusive 1605 // predicates should result in linear growth in the total number variants. 1606 // 1607 // This is one step in a breadth-first search of nested variants. 1608 void PredTransitions::substituteVariants(const PredTransition &Trans) { 1609 // Build up a set of partial results starting at the back of 1610 // PredTransitions. Remember the first new transition. 1611 unsigned StartIdx = TransVec.size(); 1612 TransVec.emplace_back(); 1613 TransVec.back().PredTerm = Trans.PredTerm; 1614 TransVec.back().ProcIndices = Trans.ProcIndices; 1615 1616 // Visit each original write sequence. 1617 for (SmallVectorImpl<SmallVector<unsigned,4>>::const_iterator 1618 WSI = Trans.WriteSequences.begin(), WSE = Trans.WriteSequences.end(); 1619 WSI != WSE; ++WSI) { 1620 // Push a new (empty) write sequence onto all partial Transitions. 1621 for (std::vector<PredTransition>::iterator I = 1622 TransVec.begin() + StartIdx, E = TransVec.end(); I != E; ++I) { 1623 I->WriteSequences.emplace_back(); 1624 } 1625 substituteVariantOperand(*WSI, /*IsRead=*/false, StartIdx); 1626 } 1627 // Visit each original read sequence. 1628 for (SmallVectorImpl<SmallVector<unsigned,4>>::const_iterator 1629 RSI = Trans.ReadSequences.begin(), RSE = Trans.ReadSequences.end(); 1630 RSI != RSE; ++RSI) { 1631 // Push a new (empty) read sequence onto all partial Transitions. 1632 for (std::vector<PredTransition>::iterator I = 1633 TransVec.begin() + StartIdx, E = TransVec.end(); I != E; ++I) { 1634 I->ReadSequences.emplace_back(); 1635 } 1636 substituteVariantOperand(*RSI, /*IsRead=*/true, StartIdx); 1637 } 1638 } 1639 1640 // Create a new SchedClass for each variant found by inferFromRW. Pass 1641 static void inferFromTransitions(ArrayRef<PredTransition> LastTransitions, 1642 unsigned FromClassIdx, 1643 CodeGenSchedModels &SchedModels) { 1644 // For each PredTransition, create a new CodeGenSchedTransition, which usually 1645 // requires creating a new SchedClass. 1646 for (ArrayRef<PredTransition>::iterator 1647 I = LastTransitions.begin(), E = LastTransitions.end(); I != E; ++I) { 1648 IdxVec OperWritesVariant; 1649 transform(I->WriteSequences, std::back_inserter(OperWritesVariant), 1650 [&SchedModels](ArrayRef<unsigned> WS) { 1651 return SchedModels.findOrInsertRW(WS, /*IsRead=*/false); 1652 }); 1653 IdxVec OperReadsVariant; 1654 transform(I->ReadSequences, std::back_inserter(OperReadsVariant), 1655 [&SchedModels](ArrayRef<unsigned> RS) { 1656 return SchedModels.findOrInsertRW(RS, /*IsRead=*/true); 1657 }); 1658 CodeGenSchedTransition SCTrans; 1659 SCTrans.ToClassIdx = 1660 SchedModels.addSchedClass(/*ItinClassDef=*/nullptr, OperWritesVariant, 1661 OperReadsVariant, I->ProcIndices); 1662 SCTrans.ProcIndices.assign(I->ProcIndices.begin(), I->ProcIndices.end()); 1663 // The final PredTerm is unique set of predicates guarding the transition. 1664 RecVec Preds; 1665 transform(I->PredTerm, std::back_inserter(Preds), 1666 [](const PredCheck &P) { 1667 return P.Predicate; 1668 }); 1669 Preds.erase(std::unique(Preds.begin(), Preds.end()), Preds.end()); 1670 SCTrans.PredTerm = std::move(Preds); 1671 SchedModels.getSchedClass(FromClassIdx) 1672 .Transitions.push_back(std::move(SCTrans)); 1673 } 1674 } 1675 1676 // Create new SchedClasses for the given ReadWrite list. If any of the 1677 // ReadWrites refers to a SchedVariant, create a new SchedClass for each variant 1678 // of the ReadWrite list, following Aliases if necessary. 1679 void CodeGenSchedModels::inferFromRW(ArrayRef<unsigned> OperWrites, 1680 ArrayRef<unsigned> OperReads, 1681 unsigned FromClassIdx, 1682 ArrayRef<unsigned> ProcIndices) { 1683 LLVM_DEBUG(dbgs() << "INFER RW proc("; dumpIdxVec(ProcIndices); 1684 dbgs() << ") "); 1685 1686 // Create a seed transition with an empty PredTerm and the expanded sequences 1687 // of SchedWrites for the current SchedClass. 1688 std::vector<PredTransition> LastTransitions; 1689 LastTransitions.emplace_back(); 1690 LastTransitions.back().ProcIndices.append(ProcIndices.begin(), 1691 ProcIndices.end()); 1692 1693 for (unsigned WriteIdx : OperWrites) { 1694 IdxVec WriteSeq; 1695 expandRWSequence(WriteIdx, WriteSeq, /*IsRead=*/false); 1696 LastTransitions[0].WriteSequences.emplace_back(); 1697 SmallVectorImpl<unsigned> &Seq = LastTransitions[0].WriteSequences.back(); 1698 Seq.append(WriteSeq.begin(), WriteSeq.end()); 1699 LLVM_DEBUG(dbgs() << "("; dumpIdxVec(Seq); dbgs() << ") "); 1700 } 1701 LLVM_DEBUG(dbgs() << " Reads: "); 1702 for (unsigned ReadIdx : OperReads) { 1703 IdxVec ReadSeq; 1704 expandRWSequence(ReadIdx, ReadSeq, /*IsRead=*/true); 1705 LastTransitions[0].ReadSequences.emplace_back(); 1706 SmallVectorImpl<unsigned> &Seq = LastTransitions[0].ReadSequences.back(); 1707 Seq.append(ReadSeq.begin(), ReadSeq.end()); 1708 LLVM_DEBUG(dbgs() << "("; dumpIdxVec(Seq); dbgs() << ") "); 1709 } 1710 LLVM_DEBUG(dbgs() << '\n'); 1711 1712 // Collect all PredTransitions for individual operands. 1713 // Iterate until no variant writes remain. 1714 while (hasVariant(LastTransitions, *this)) { 1715 PredTransitions Transitions(*this); 1716 for (const PredTransition &Trans : LastTransitions) 1717 Transitions.substituteVariants(Trans); 1718 LLVM_DEBUG(Transitions.dump()); 1719 LastTransitions.swap(Transitions.TransVec); 1720 } 1721 // If the first transition has no variants, nothing to do. 1722 if (LastTransitions[0].PredTerm.empty()) 1723 return; 1724 1725 // WARNING: We are about to mutate the SchedClasses vector. Do not refer to 1726 // OperWrites, OperReads, or ProcIndices after calling inferFromTransitions. 1727 inferFromTransitions(LastTransitions, FromClassIdx, *this); 1728 } 1729 1730 // Check if any processor resource group contains all resource records in 1731 // SubUnits. 1732 bool CodeGenSchedModels::hasSuperGroup(RecVec &SubUnits, CodeGenProcModel &PM) { 1733 for (unsigned i = 0, e = PM.ProcResourceDefs.size(); i < e; ++i) { 1734 if (!PM.ProcResourceDefs[i]->isSubClassOf("ProcResGroup")) 1735 continue; 1736 RecVec SuperUnits = 1737 PM.ProcResourceDefs[i]->getValueAsListOfDefs("Resources"); 1738 RecIter RI = SubUnits.begin(), RE = SubUnits.end(); 1739 for ( ; RI != RE; ++RI) { 1740 if (!is_contained(SuperUnits, *RI)) { 1741 break; 1742 } 1743 } 1744 if (RI == RE) 1745 return true; 1746 } 1747 return false; 1748 } 1749 1750 // Verify that overlapping groups have a common supergroup. 1751 void CodeGenSchedModels::verifyProcResourceGroups(CodeGenProcModel &PM) { 1752 for (unsigned i = 0, e = PM.ProcResourceDefs.size(); i < e; ++i) { 1753 if (!PM.ProcResourceDefs[i]->isSubClassOf("ProcResGroup")) 1754 continue; 1755 RecVec CheckUnits = 1756 PM.ProcResourceDefs[i]->getValueAsListOfDefs("Resources"); 1757 for (unsigned j = i+1; j < e; ++j) { 1758 if (!PM.ProcResourceDefs[j]->isSubClassOf("ProcResGroup")) 1759 continue; 1760 RecVec OtherUnits = 1761 PM.ProcResourceDefs[j]->getValueAsListOfDefs("Resources"); 1762 if (std::find_first_of(CheckUnits.begin(), CheckUnits.end(), 1763 OtherUnits.begin(), OtherUnits.end()) 1764 != CheckUnits.end()) { 1765 // CheckUnits and OtherUnits overlap 1766 OtherUnits.insert(OtherUnits.end(), CheckUnits.begin(), 1767 CheckUnits.end()); 1768 if (!hasSuperGroup(OtherUnits, PM)) { 1769 PrintFatalError((PM.ProcResourceDefs[i])->getLoc(), 1770 "proc resource group overlaps with " 1771 + PM.ProcResourceDefs[j]->getName() 1772 + " but no supergroup contains both."); 1773 } 1774 } 1775 } 1776 } 1777 } 1778 1779 // Collect all the RegisterFile definitions available in this target. 1780 void CodeGenSchedModels::collectRegisterFiles() { 1781 RecVec RegisterFileDefs = Records.getAllDerivedDefinitions("RegisterFile"); 1782 1783 // RegisterFiles is the vector of CodeGenRegisterFile. 1784 for (Record *RF : RegisterFileDefs) { 1785 // For each register file definition, construct a CodeGenRegisterFile object 1786 // and add it to the appropriate scheduling model. 1787 CodeGenProcModel &PM = getProcModel(RF->getValueAsDef("SchedModel")); 1788 PM.RegisterFiles.emplace_back(CodeGenRegisterFile(RF->getName(),RF)); 1789 CodeGenRegisterFile &CGRF = PM.RegisterFiles.back(); 1790 CGRF.MaxMovesEliminatedPerCycle = 1791 RF->getValueAsInt("MaxMovesEliminatedPerCycle"); 1792 CGRF.AllowZeroMoveEliminationOnly = 1793 RF->getValueAsBit("AllowZeroMoveEliminationOnly"); 1794 1795 // Now set the number of physical registers as well as the cost of registers 1796 // in each register class. 1797 CGRF.NumPhysRegs = RF->getValueAsInt("NumPhysRegs"); 1798 if (!CGRF.NumPhysRegs) { 1799 PrintFatalError(RF->getLoc(), 1800 "Invalid RegisterFile with zero physical registers"); 1801 } 1802 1803 RecVec RegisterClasses = RF->getValueAsListOfDefs("RegClasses"); 1804 std::vector<int64_t> RegisterCosts = RF->getValueAsListOfInts("RegCosts"); 1805 ListInit *MoveElimInfo = RF->getValueAsListInit("AllowMoveElimination"); 1806 for (unsigned I = 0, E = RegisterClasses.size(); I < E; ++I) { 1807 int Cost = RegisterCosts.size() > I ? RegisterCosts[I] : 1; 1808 1809 bool AllowMoveElim = false; 1810 if (MoveElimInfo->size() > I) { 1811 BitInit *Val = cast<BitInit>(MoveElimInfo->getElement(I)); 1812 AllowMoveElim = Val->getValue(); 1813 } 1814 1815 CGRF.Costs.emplace_back(RegisterClasses[I], Cost, AllowMoveElim); 1816 } 1817 } 1818 } 1819 1820 // Collect and sort WriteRes, ReadAdvance, and ProcResources. 1821 void CodeGenSchedModels::collectProcResources() { 1822 ProcResourceDefs = Records.getAllDerivedDefinitions("ProcResourceUnits"); 1823 ProcResGroups = Records.getAllDerivedDefinitions("ProcResGroup"); 1824 1825 // Add any subtarget-specific SchedReadWrites that are directly associated 1826 // with processor resources. Refer to the parent SchedClass's ProcIndices to 1827 // determine which processors they apply to. 1828 for (const CodeGenSchedClass &SC : 1829 make_range(schedClassBegin(), schedClassEnd())) { 1830 if (SC.ItinClassDef) { 1831 collectItinProcResources(SC.ItinClassDef); 1832 continue; 1833 } 1834 1835 // This class may have a default ReadWrite list which can be overriden by 1836 // InstRW definitions. 1837 for (Record *RW : SC.InstRWs) { 1838 Record *RWModelDef = RW->getValueAsDef("SchedModel"); 1839 unsigned PIdx = getProcModel(RWModelDef).Index; 1840 IdxVec Writes, Reads; 1841 findRWs(RW->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads); 1842 collectRWResources(Writes, Reads, PIdx); 1843 } 1844 1845 collectRWResources(SC.Writes, SC.Reads, SC.ProcIndices); 1846 } 1847 // Add resources separately defined by each subtarget. 1848 RecVec WRDefs = Records.getAllDerivedDefinitions("WriteRes"); 1849 for (Record *WR : WRDefs) { 1850 Record *ModelDef = WR->getValueAsDef("SchedModel"); 1851 addWriteRes(WR, getProcModel(ModelDef).Index); 1852 } 1853 RecVec SWRDefs = Records.getAllDerivedDefinitions("SchedWriteRes"); 1854 for (Record *SWR : SWRDefs) { 1855 Record *ModelDef = SWR->getValueAsDef("SchedModel"); 1856 addWriteRes(SWR, getProcModel(ModelDef).Index); 1857 } 1858 RecVec RADefs = Records.getAllDerivedDefinitions("ReadAdvance"); 1859 for (Record *RA : RADefs) { 1860 Record *ModelDef = RA->getValueAsDef("SchedModel"); 1861 addReadAdvance(RA, getProcModel(ModelDef).Index); 1862 } 1863 RecVec SRADefs = Records.getAllDerivedDefinitions("SchedReadAdvance"); 1864 for (Record *SRA : SRADefs) { 1865 if (SRA->getValueInit("SchedModel")->isComplete()) { 1866 Record *ModelDef = SRA->getValueAsDef("SchedModel"); 1867 addReadAdvance(SRA, getProcModel(ModelDef).Index); 1868 } 1869 } 1870 // Add ProcResGroups that are defined within this processor model, which may 1871 // not be directly referenced but may directly specify a buffer size. 1872 RecVec ProcResGroups = Records.getAllDerivedDefinitions("ProcResGroup"); 1873 for (Record *PRG : ProcResGroups) { 1874 if (!PRG->getValueInit("SchedModel")->isComplete()) 1875 continue; 1876 CodeGenProcModel &PM = getProcModel(PRG->getValueAsDef("SchedModel")); 1877 if (!is_contained(PM.ProcResourceDefs, PRG)) 1878 PM.ProcResourceDefs.push_back(PRG); 1879 } 1880 // Add ProcResourceUnits unconditionally. 1881 for (Record *PRU : Records.getAllDerivedDefinitions("ProcResourceUnits")) { 1882 if (!PRU->getValueInit("SchedModel")->isComplete()) 1883 continue; 1884 CodeGenProcModel &PM = getProcModel(PRU->getValueAsDef("SchedModel")); 1885 if (!is_contained(PM.ProcResourceDefs, PRU)) 1886 PM.ProcResourceDefs.push_back(PRU); 1887 } 1888 // Finalize each ProcModel by sorting the record arrays. 1889 for (CodeGenProcModel &PM : ProcModels) { 1890 llvm::sort(PM.WriteResDefs, LessRecord()); 1891 llvm::sort(PM.ReadAdvanceDefs, LessRecord()); 1892 llvm::sort(PM.ProcResourceDefs, LessRecord()); 1893 LLVM_DEBUG( 1894 PM.dump(); 1895 dbgs() << "WriteResDefs: "; for (RecIter RI = PM.WriteResDefs.begin(), 1896 RE = PM.WriteResDefs.end(); 1897 RI != RE; ++RI) { 1898 if ((*RI)->isSubClassOf("WriteRes")) 1899 dbgs() << (*RI)->getValueAsDef("WriteType")->getName() << " "; 1900 else 1901 dbgs() << (*RI)->getName() << " "; 1902 } dbgs() << "\nReadAdvanceDefs: "; 1903 for (RecIter RI = PM.ReadAdvanceDefs.begin(), 1904 RE = PM.ReadAdvanceDefs.end(); 1905 RI != RE; ++RI) { 1906 if ((*RI)->isSubClassOf("ReadAdvance")) 1907 dbgs() << (*RI)->getValueAsDef("ReadType")->getName() << " "; 1908 else 1909 dbgs() << (*RI)->getName() << " "; 1910 } dbgs() 1911 << "\nProcResourceDefs: "; 1912 for (RecIter RI = PM.ProcResourceDefs.begin(), 1913 RE = PM.ProcResourceDefs.end(); 1914 RI != RE; ++RI) { dbgs() << (*RI)->getName() << " "; } dbgs() 1915 << '\n'); 1916 verifyProcResourceGroups(PM); 1917 } 1918 1919 ProcResourceDefs.clear(); 1920 ProcResGroups.clear(); 1921 } 1922 1923 void CodeGenSchedModels::checkCompleteness() { 1924 bool Complete = true; 1925 bool HadCompleteModel = false; 1926 for (const CodeGenProcModel &ProcModel : procModels()) { 1927 const bool HasItineraries = ProcModel.hasItineraries(); 1928 if (!ProcModel.ModelDef->getValueAsBit("CompleteModel")) 1929 continue; 1930 for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) { 1931 if (Inst->hasNoSchedulingInfo) 1932 continue; 1933 if (ProcModel.isUnsupported(*Inst)) 1934 continue; 1935 unsigned SCIdx = getSchedClassIdx(*Inst); 1936 if (!SCIdx) { 1937 if (Inst->TheDef->isValueUnset("SchedRW") && !HadCompleteModel) { 1938 PrintError("No schedule information for instruction '" 1939 + Inst->TheDef->getName() + "'"); 1940 Complete = false; 1941 } 1942 continue; 1943 } 1944 1945 const CodeGenSchedClass &SC = getSchedClass(SCIdx); 1946 if (!SC.Writes.empty()) 1947 continue; 1948 if (HasItineraries && SC.ItinClassDef != nullptr && 1949 SC.ItinClassDef->getName() != "NoItinerary") 1950 continue; 1951 1952 const RecVec &InstRWs = SC.InstRWs; 1953 auto I = find_if(InstRWs, [&ProcModel](const Record *R) { 1954 return R->getValueAsDef("SchedModel") == ProcModel.ModelDef; 1955 }); 1956 if (I == InstRWs.end()) { 1957 PrintError("'" + ProcModel.ModelName + "' lacks information for '" + 1958 Inst->TheDef->getName() + "'"); 1959 Complete = false; 1960 } 1961 } 1962 HadCompleteModel = true; 1963 } 1964 if (!Complete) { 1965 errs() << "\n\nIncomplete schedule models found.\n" 1966 << "- Consider setting 'CompleteModel = 0' while developing new models.\n" 1967 << "- Pseudo instructions can be marked with 'hasNoSchedulingInfo = 1'.\n" 1968 << "- Instructions should usually have Sched<[...]> as a superclass, " 1969 "you may temporarily use an empty list.\n" 1970 << "- Instructions related to unsupported features can be excluded with " 1971 "list<Predicate> UnsupportedFeatures = [HasA,..,HasY]; in the " 1972 "processor model.\n\n"; 1973 PrintFatalError("Incomplete schedule model"); 1974 } 1975 } 1976 1977 // Collect itinerary class resources for each processor. 1978 void CodeGenSchedModels::collectItinProcResources(Record *ItinClassDef) { 1979 for (unsigned PIdx = 0, PEnd = ProcModels.size(); PIdx != PEnd; ++PIdx) { 1980 const CodeGenProcModel &PM = ProcModels[PIdx]; 1981 // For all ItinRW entries. 1982 bool HasMatch = false; 1983 for (RecIter II = PM.ItinRWDefs.begin(), IE = PM.ItinRWDefs.end(); 1984 II != IE; ++II) { 1985 RecVec Matched = (*II)->getValueAsListOfDefs("MatchedItinClasses"); 1986 if (!std::count(Matched.begin(), Matched.end(), ItinClassDef)) 1987 continue; 1988 if (HasMatch) 1989 PrintFatalError((*II)->getLoc(), "Duplicate itinerary class " 1990 + ItinClassDef->getName() 1991 + " in ItinResources for " + PM.ModelName); 1992 HasMatch = true; 1993 IdxVec Writes, Reads; 1994 findRWs((*II)->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads); 1995 collectRWResources(Writes, Reads, PIdx); 1996 } 1997 } 1998 } 1999 2000 void CodeGenSchedModels::collectRWResources(unsigned RWIdx, bool IsRead, 2001 ArrayRef<unsigned> ProcIndices) { 2002 const CodeGenSchedRW &SchedRW = getSchedRW(RWIdx, IsRead); 2003 if (SchedRW.TheDef) { 2004 if (!IsRead && SchedRW.TheDef->isSubClassOf("SchedWriteRes")) { 2005 for (unsigned Idx : ProcIndices) 2006 addWriteRes(SchedRW.TheDef, Idx); 2007 } 2008 else if (IsRead && SchedRW.TheDef->isSubClassOf("SchedReadAdvance")) { 2009 for (unsigned Idx : ProcIndices) 2010 addReadAdvance(SchedRW.TheDef, Idx); 2011 } 2012 } 2013 for (RecIter AI = SchedRW.Aliases.begin(), AE = SchedRW.Aliases.end(); 2014 AI != AE; ++AI) { 2015 IdxVec AliasProcIndices; 2016 if ((*AI)->getValueInit("SchedModel")->isComplete()) { 2017 AliasProcIndices.push_back( 2018 getProcModel((*AI)->getValueAsDef("SchedModel")).Index); 2019 } 2020 else 2021 AliasProcIndices = ProcIndices; 2022 const CodeGenSchedRW &AliasRW = getSchedRW((*AI)->getValueAsDef("AliasRW")); 2023 assert(AliasRW.IsRead == IsRead && "cannot alias reads to writes"); 2024 2025 IdxVec ExpandedRWs; 2026 expandRWSequence(AliasRW.Index, ExpandedRWs, IsRead); 2027 for (IdxIter SI = ExpandedRWs.begin(), SE = ExpandedRWs.end(); 2028 SI != SE; ++SI) { 2029 collectRWResources(*SI, IsRead, AliasProcIndices); 2030 } 2031 } 2032 } 2033 2034 // Collect resources for a set of read/write types and processor indices. 2035 void CodeGenSchedModels::collectRWResources(ArrayRef<unsigned> Writes, 2036 ArrayRef<unsigned> Reads, 2037 ArrayRef<unsigned> ProcIndices) { 2038 for (unsigned Idx : Writes) 2039 collectRWResources(Idx, /*IsRead=*/false, ProcIndices); 2040 2041 for (unsigned Idx : Reads) 2042 collectRWResources(Idx, /*IsRead=*/true, ProcIndices); 2043 } 2044 2045 // Find the processor's resource units for this kind of resource. 2046 Record *CodeGenSchedModels::findProcResUnits(Record *ProcResKind, 2047 const CodeGenProcModel &PM, 2048 ArrayRef<SMLoc> Loc) const { 2049 if (ProcResKind->isSubClassOf("ProcResourceUnits")) 2050 return ProcResKind; 2051 2052 Record *ProcUnitDef = nullptr; 2053 assert(!ProcResourceDefs.empty()); 2054 assert(!ProcResGroups.empty()); 2055 2056 for (Record *ProcResDef : ProcResourceDefs) { 2057 if (ProcResDef->getValueAsDef("Kind") == ProcResKind 2058 && ProcResDef->getValueAsDef("SchedModel") == PM.ModelDef) { 2059 if (ProcUnitDef) { 2060 PrintFatalError(Loc, 2061 "Multiple ProcessorResourceUnits associated with " 2062 + ProcResKind->getName()); 2063 } 2064 ProcUnitDef = ProcResDef; 2065 } 2066 } 2067 for (Record *ProcResGroup : ProcResGroups) { 2068 if (ProcResGroup == ProcResKind 2069 && ProcResGroup->getValueAsDef("SchedModel") == PM.ModelDef) { 2070 if (ProcUnitDef) { 2071 PrintFatalError(Loc, 2072 "Multiple ProcessorResourceUnits associated with " 2073 + ProcResKind->getName()); 2074 } 2075 ProcUnitDef = ProcResGroup; 2076 } 2077 } 2078 if (!ProcUnitDef) { 2079 PrintFatalError(Loc, 2080 "No ProcessorResources associated with " 2081 + ProcResKind->getName()); 2082 } 2083 return ProcUnitDef; 2084 } 2085 2086 // Iteratively add a resource and its super resources. 2087 void CodeGenSchedModels::addProcResource(Record *ProcResKind, 2088 CodeGenProcModel &PM, 2089 ArrayRef<SMLoc> Loc) { 2090 while (true) { 2091 Record *ProcResUnits = findProcResUnits(ProcResKind, PM, Loc); 2092 2093 // See if this ProcResource is already associated with this processor. 2094 if (is_contained(PM.ProcResourceDefs, ProcResUnits)) 2095 return; 2096 2097 PM.ProcResourceDefs.push_back(ProcResUnits); 2098 if (ProcResUnits->isSubClassOf("ProcResGroup")) 2099 return; 2100 2101 if (!ProcResUnits->getValueInit("Super")->isComplete()) 2102 return; 2103 2104 ProcResKind = ProcResUnits->getValueAsDef("Super"); 2105 } 2106 } 2107 2108 // Add resources for a SchedWrite to this processor if they don't exist. 2109 void CodeGenSchedModels::addWriteRes(Record *ProcWriteResDef, unsigned PIdx) { 2110 assert(PIdx && "don't add resources to an invalid Processor model"); 2111 2112 RecVec &WRDefs = ProcModels[PIdx].WriteResDefs; 2113 if (is_contained(WRDefs, ProcWriteResDef)) 2114 return; 2115 WRDefs.push_back(ProcWriteResDef); 2116 2117 // Visit ProcResourceKinds referenced by the newly discovered WriteRes. 2118 RecVec ProcResDefs = ProcWriteResDef->getValueAsListOfDefs("ProcResources"); 2119 for (RecIter WritePRI = ProcResDefs.begin(), WritePRE = ProcResDefs.end(); 2120 WritePRI != WritePRE; ++WritePRI) { 2121 addProcResource(*WritePRI, ProcModels[PIdx], ProcWriteResDef->getLoc()); 2122 } 2123 } 2124 2125 // Add resources for a ReadAdvance to this processor if they don't exist. 2126 void CodeGenSchedModels::addReadAdvance(Record *ProcReadAdvanceDef, 2127 unsigned PIdx) { 2128 RecVec &RADefs = ProcModels[PIdx].ReadAdvanceDefs; 2129 if (is_contained(RADefs, ProcReadAdvanceDef)) 2130 return; 2131 RADefs.push_back(ProcReadAdvanceDef); 2132 } 2133 2134 unsigned CodeGenProcModel::getProcResourceIdx(Record *PRDef) const { 2135 RecIter PRPos = find(ProcResourceDefs, PRDef); 2136 if (PRPos == ProcResourceDefs.end()) 2137 PrintFatalError(PRDef->getLoc(), "ProcResource def is not included in " 2138 "the ProcResources list for " + ModelName); 2139 // Idx=0 is reserved for invalid. 2140 return 1 + (PRPos - ProcResourceDefs.begin()); 2141 } 2142 2143 bool CodeGenProcModel::isUnsupported(const CodeGenInstruction &Inst) const { 2144 for (const Record *TheDef : UnsupportedFeaturesDefs) { 2145 for (const Record *PredDef : Inst.TheDef->getValueAsListOfDefs("Predicates")) { 2146 if (TheDef->getName() == PredDef->getName()) 2147 return true; 2148 } 2149 } 2150 return false; 2151 } 2152 2153 #ifndef NDEBUG 2154 void CodeGenProcModel::dump() const { 2155 dbgs() << Index << ": " << ModelName << " " 2156 << (ModelDef ? ModelDef->getName() : "inferred") << " " 2157 << (ItinsDef ? ItinsDef->getName() : "no itinerary") << '\n'; 2158 } 2159 2160 void CodeGenSchedRW::dump() const { 2161 dbgs() << Name << (IsVariadic ? " (V) " : " "); 2162 if (IsSequence) { 2163 dbgs() << "("; 2164 dumpIdxVec(Sequence); 2165 dbgs() << ")"; 2166 } 2167 } 2168 2169 void CodeGenSchedClass::dump(const CodeGenSchedModels* SchedModels) const { 2170 dbgs() << "SCHEDCLASS " << Index << ":" << Name << '\n' 2171 << " Writes: "; 2172 for (unsigned i = 0, N = Writes.size(); i < N; ++i) { 2173 SchedModels->getSchedWrite(Writes[i]).dump(); 2174 if (i < N-1) { 2175 dbgs() << '\n'; 2176 dbgs().indent(10); 2177 } 2178 } 2179 dbgs() << "\n Reads: "; 2180 for (unsigned i = 0, N = Reads.size(); i < N; ++i) { 2181 SchedModels->getSchedRead(Reads[i]).dump(); 2182 if (i < N-1) { 2183 dbgs() << '\n'; 2184 dbgs().indent(10); 2185 } 2186 } 2187 dbgs() << "\n ProcIdx: "; dumpIdxVec(ProcIndices); dbgs() << '\n'; 2188 if (!Transitions.empty()) { 2189 dbgs() << "\n Transitions for Proc "; 2190 for (const CodeGenSchedTransition &Transition : Transitions) { 2191 dumpIdxVec(Transition.ProcIndices); 2192 } 2193 } 2194 } 2195 2196 void PredTransitions::dump() const { 2197 dbgs() << "Expanded Variants:\n"; 2198 for (std::vector<PredTransition>::const_iterator 2199 TI = TransVec.begin(), TE = TransVec.end(); TI != TE; ++TI) { 2200 dbgs() << "{"; 2201 for (SmallVectorImpl<PredCheck>::const_iterator 2202 PCI = TI->PredTerm.begin(), PCE = TI->PredTerm.end(); 2203 PCI != PCE; ++PCI) { 2204 if (PCI != TI->PredTerm.begin()) 2205 dbgs() << ", "; 2206 dbgs() << SchedModels.getSchedRW(PCI->RWIdx, PCI->IsRead).Name 2207 << ":" << PCI->Predicate->getName(); 2208 } 2209 dbgs() << "},\n => {"; 2210 for (SmallVectorImpl<SmallVector<unsigned,4>>::const_iterator 2211 WSI = TI->WriteSequences.begin(), WSE = TI->WriteSequences.end(); 2212 WSI != WSE; ++WSI) { 2213 dbgs() << "("; 2214 for (SmallVectorImpl<unsigned>::const_iterator 2215 WI = WSI->begin(), WE = WSI->end(); WI != WE; ++WI) { 2216 if (WI != WSI->begin()) 2217 dbgs() << ", "; 2218 dbgs() << SchedModels.getSchedWrite(*WI).Name; 2219 } 2220 dbgs() << "),"; 2221 } 2222 dbgs() << "}\n"; 2223 } 2224 } 2225 #endif // NDEBUG 2226