1 //===- CodeGenSchedule.cpp - Scheduling MachineModels ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines structures to encapsulate the machine model as described in 11 // the target description. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "CodeGenSchedule.h" 16 #include "CodeGenInstruction.h" 17 #include "CodeGenTarget.h" 18 #include "llvm/ADT/MapVector.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SmallPtrSet.h" 21 #include "llvm/ADT/SmallSet.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/Support/Casting.h" 24 #include "llvm/Support/Debug.h" 25 #include "llvm/Support/Regex.h" 26 #include "llvm/Support/raw_ostream.h" 27 #include "llvm/TableGen/Error.h" 28 #include <algorithm> 29 #include <iterator> 30 #include <utility> 31 32 using namespace llvm; 33 34 #define DEBUG_TYPE "subtarget-emitter" 35 36 #ifndef NDEBUG 37 static void dumpIdxVec(ArrayRef<unsigned> V) { 38 for (unsigned Idx : V) 39 dbgs() << Idx << ", "; 40 } 41 #endif 42 43 namespace { 44 45 // (instrs a, b, ...) Evaluate and union all arguments. Identical to AddOp. 46 struct InstrsOp : public SetTheory::Operator { 47 void apply(SetTheory &ST, DagInit *Expr, SetTheory::RecSet &Elts, 48 ArrayRef<SMLoc> Loc) override { 49 ST.evaluate(Expr->arg_begin(), Expr->arg_end(), Elts, Loc); 50 } 51 }; 52 53 // (instregex "OpcPat",...) Find all instructions matching an opcode pattern. 54 struct InstRegexOp : public SetTheory::Operator { 55 const CodeGenTarget &Target; 56 InstRegexOp(const CodeGenTarget &t): Target(t) {} 57 58 /// Remove any text inside of parentheses from S. 59 static std::string removeParens(llvm::StringRef S) { 60 std::string Result; 61 unsigned Paren = 0; 62 // NB: We don't care about escaped parens here. 63 for (char C : S) { 64 switch (C) { 65 case '(': 66 ++Paren; 67 break; 68 case ')': 69 --Paren; 70 break; 71 default: 72 if (Paren == 0) 73 Result += C; 74 } 75 } 76 return Result; 77 } 78 79 void apply(SetTheory &ST, DagInit *Expr, SetTheory::RecSet &Elts, 80 ArrayRef<SMLoc> Loc) override { 81 ArrayRef<const CodeGenInstruction *> Instructions = 82 Target.getInstructionsByEnumValue(); 83 84 unsigned NumGeneric = Target.getNumFixedInstructions(); 85 unsigned NumPseudos = Target.getNumPseudoInstructions(); 86 auto Generics = Instructions.slice(0, NumGeneric); 87 auto Pseudos = Instructions.slice(NumGeneric, NumPseudos); 88 auto NonPseudos = Instructions.slice(NumGeneric + NumPseudos); 89 90 for (Init *Arg : make_range(Expr->arg_begin(), Expr->arg_end())) { 91 StringInit *SI = dyn_cast<StringInit>(Arg); 92 if (!SI) 93 PrintFatalError(Loc, "instregex requires pattern string: " + 94 Expr->getAsString()); 95 StringRef Original = SI->getValue(); 96 97 // Extract a prefix that we can binary search on. 98 static const char RegexMetachars[] = "()^$|*+?.[]\\{}"; 99 auto FirstMeta = Original.find_first_of(RegexMetachars); 100 101 // Look for top-level | or ?. We cannot optimize them to binary search. 102 if (removeParens(Original).find_first_of("|?") != std::string::npos) 103 FirstMeta = 0; 104 105 Optional<Regex> Regexpr = None; 106 StringRef Prefix = Original.substr(0, FirstMeta); 107 StringRef PatStr = Original.substr(FirstMeta); 108 if (!PatStr.empty()) { 109 // For the rest use a python-style prefix match. 110 std::string pat = PatStr; 111 if (pat[0] != '^') { 112 pat.insert(0, "^("); 113 pat.insert(pat.end(), ')'); 114 } 115 Regexpr = Regex(pat); 116 } 117 118 int NumMatches = 0; 119 120 // The generic opcodes are unsorted, handle them manually. 121 for (auto *Inst : Generics) { 122 StringRef InstName = Inst->TheDef->getName(); 123 if (InstName.startswith(Prefix) && 124 (!Regexpr || Regexpr->match(InstName.substr(Prefix.size())))) { 125 Elts.insert(Inst->TheDef); 126 NumMatches++; 127 } 128 } 129 130 // Target instructions are split into two ranges: pseudo instructions 131 // first, than non-pseudos. Each range is in lexicographical order 132 // sorted by name. Find the sub-ranges that start with our prefix. 133 struct Comp { 134 bool operator()(const CodeGenInstruction *LHS, StringRef RHS) { 135 return LHS->TheDef->getName() < RHS; 136 } 137 bool operator()(StringRef LHS, const CodeGenInstruction *RHS) { 138 return LHS < RHS->TheDef->getName() && 139 !RHS->TheDef->getName().startswith(LHS); 140 } 141 }; 142 auto Range1 = 143 std::equal_range(Pseudos.begin(), Pseudos.end(), Prefix, Comp()); 144 auto Range2 = std::equal_range(NonPseudos.begin(), NonPseudos.end(), 145 Prefix, Comp()); 146 147 // For these ranges we know that instruction names start with the prefix. 148 // Check if there's a regex that needs to be checked. 149 const auto HandleNonGeneric = [&](const CodeGenInstruction *Inst) { 150 StringRef InstName = Inst->TheDef->getName(); 151 if (!Regexpr || Regexpr->match(InstName.substr(Prefix.size()))) { 152 Elts.insert(Inst->TheDef); 153 NumMatches++; 154 } 155 }; 156 std::for_each(Range1.first, Range1.second, HandleNonGeneric); 157 std::for_each(Range2.first, Range2.second, HandleNonGeneric); 158 159 if (0 == NumMatches) 160 PrintFatalError(Loc, "instregex has no matches: " + Original); 161 } 162 } 163 }; 164 165 } // end anonymous namespace 166 167 /// CodeGenModels ctor interprets machine model records and populates maps. 168 CodeGenSchedModels::CodeGenSchedModels(RecordKeeper &RK, 169 const CodeGenTarget &TGT): 170 Records(RK), Target(TGT) { 171 172 Sets.addFieldExpander("InstRW", "Instrs"); 173 174 // Allow Set evaluation to recognize the dags used in InstRW records: 175 // (instrs Op1, Op1...) 176 Sets.addOperator("instrs", llvm::make_unique<InstrsOp>()); 177 Sets.addOperator("instregex", llvm::make_unique<InstRegexOp>(Target)); 178 179 // Instantiate a CodeGenProcModel for each SchedMachineModel with the values 180 // that are explicitly referenced in tablegen records. Resources associated 181 // with each processor will be derived later. Populate ProcModelMap with the 182 // CodeGenProcModel instances. 183 collectProcModels(); 184 185 // Instantiate a CodeGenSchedRW for each SchedReadWrite record explicitly 186 // defined, and populate SchedReads and SchedWrites vectors. Implicit 187 // SchedReadWrites that represent sequences derived from expanded variant will 188 // be inferred later. 189 collectSchedRW(); 190 191 // Instantiate a CodeGenSchedClass for each unique SchedRW signature directly 192 // required by an instruction definition, and populate SchedClassIdxMap. Set 193 // NumItineraryClasses to the number of explicit itinerary classes referenced 194 // by instructions. Set NumInstrSchedClasses to the number of itinerary 195 // classes plus any classes implied by instructions that derive from class 196 // Sched and provide SchedRW list. This does not infer any new classes from 197 // SchedVariant. 198 collectSchedClasses(); 199 200 // Find instruction itineraries for each processor. Sort and populate 201 // CodeGenProcModel::ItinDefList. (Cycle-to-cycle itineraries). This requires 202 // all itinerary classes to be discovered. 203 collectProcItins(); 204 205 // Find ItinRW records for each processor and itinerary class. 206 // (For per-operand resources mapped to itinerary classes). 207 collectProcItinRW(); 208 209 // Find UnsupportedFeatures records for each processor. 210 // (For per-operand resources mapped to itinerary classes). 211 collectProcUnsupportedFeatures(); 212 213 // Infer new SchedClasses from SchedVariant. 214 inferSchedClasses(); 215 216 // Populate each CodeGenProcModel's WriteResDefs, ReadAdvanceDefs, and 217 // ProcResourceDefs. 218 LLVM_DEBUG( 219 dbgs() << "\n+++ RESOURCE DEFINITIONS (collectProcResources) +++\n"); 220 collectProcResources(); 221 222 // Collect optional processor description. 223 collectOptionalProcessorInfo(); 224 225 // Check MCInstPredicate definitions. 226 checkMCInstPredicates(); 227 228 // Check STIPredicate definitions. 229 checkSTIPredicates(); 230 231 // Find STIPredicate definitions for each processor model, and construct 232 // STIPredicateFunction objects. 233 collectSTIPredicates(); 234 235 checkCompleteness(); 236 } 237 238 void CodeGenSchedModels::checkSTIPredicates() const { 239 DenseMap<StringRef, const Record *> Declarations; 240 241 // There cannot be multiple declarations with the same name. 242 const RecVec Decls = Records.getAllDerivedDefinitions("STIPredicateDecl"); 243 for (const Record *R : Decls) { 244 StringRef Name = R->getValueAsString("Name"); 245 const auto It = Declarations.find(Name); 246 if (It == Declarations.end()) { 247 Declarations[Name] = R; 248 continue; 249 } 250 251 PrintError(R->getLoc(), "STIPredicate " + Name + " multiply declared."); 252 PrintNote(It->second->getLoc(), "Previous declaration was here."); 253 PrintFatalError(R->getLoc(), "Invalid STIPredicateDecl found."); 254 } 255 256 // Disallow InstructionEquivalenceClasses with an empty instruction list. 257 const RecVec Defs = 258 Records.getAllDerivedDefinitions("InstructionEquivalenceClass"); 259 for (const Record *R : Defs) { 260 RecVec Opcodes = R->getValueAsListOfDefs("Opcodes"); 261 if (Opcodes.empty()) { 262 PrintFatalError(R->getLoc(), "Invalid InstructionEquivalenceClass " 263 "defined with an empty opcode list."); 264 } 265 } 266 } 267 268 // Used by function `processSTIPredicate` to construct a mask of machine 269 // instruction operands. 270 static APInt constructOperandMask(ArrayRef<int64_t> Indices) { 271 APInt OperandMask; 272 if (Indices.empty()) 273 return OperandMask; 274 275 int64_t MaxIndex = *std::max_element(Indices.begin(), Indices.end()); 276 assert(MaxIndex >= 0 && "Invalid negative indices in input!"); 277 OperandMask = OperandMask.zext(MaxIndex + 1); 278 for (const int64_t Index : Indices) { 279 assert(Index >= 0 && "Invalid negative indices!"); 280 OperandMask.setBit(Index); 281 } 282 283 return OperandMask; 284 } 285 286 static void 287 processSTIPredicate(STIPredicateFunction &Fn, 288 const DenseMap<Record *, unsigned> &ProcModelMap) { 289 DenseMap<const Record *, unsigned> Opcode2Index; 290 using OpcodeMapPair = std::pair<const Record *, OpcodeInfo>; 291 std::vector<OpcodeMapPair> OpcodeMappings; 292 std::vector<std::pair<APInt, APInt>> OpcodeMasks; 293 294 DenseMap<const Record *, unsigned> Predicate2Index; 295 unsigned NumUniquePredicates = 0; 296 297 // Number unique predicates and opcodes used by InstructionEquivalenceClass 298 // definitions. Each unique opcode will be associated with an OpcodeInfo 299 // object. 300 for (const Record *Def : Fn.getDefinitions()) { 301 RecVec Classes = Def->getValueAsListOfDefs("Classes"); 302 for (const Record *EC : Classes) { 303 const Record *Pred = EC->getValueAsDef("Predicate"); 304 if (Predicate2Index.find(Pred) == Predicate2Index.end()) 305 Predicate2Index[Pred] = NumUniquePredicates++; 306 307 RecVec Opcodes = EC->getValueAsListOfDefs("Opcodes"); 308 for (const Record *Opcode : Opcodes) { 309 if (Opcode2Index.find(Opcode) == Opcode2Index.end()) { 310 Opcode2Index[Opcode] = OpcodeMappings.size(); 311 OpcodeMappings.emplace_back(Opcode, OpcodeInfo()); 312 } 313 } 314 } 315 } 316 317 // Initialize vector `OpcodeMasks` with default values. We want to keep track 318 // of which processors "use" which opcodes. We also want to be able to 319 // identify predicates that are used by different processors for a same 320 // opcode. 321 // This information is used later on by this algorithm to sort OpcodeMapping 322 // elements based on their processor and predicate sets. 323 OpcodeMasks.resize(OpcodeMappings.size()); 324 APInt DefaultProcMask(ProcModelMap.size(), 0); 325 APInt DefaultPredMask(NumUniquePredicates, 0); 326 for (std::pair<APInt, APInt> &MaskPair : OpcodeMasks) 327 MaskPair = std::make_pair(DefaultProcMask, DefaultPredMask); 328 329 // Construct a OpcodeInfo object for every unique opcode declared by an 330 // InstructionEquivalenceClass definition. 331 for (const Record *Def : Fn.getDefinitions()) { 332 RecVec Classes = Def->getValueAsListOfDefs("Classes"); 333 const Record *SchedModel = Def->getValueAsDef("SchedModel"); 334 unsigned ProcIndex = ProcModelMap.find(SchedModel)->second; 335 APInt ProcMask(ProcModelMap.size(), 0); 336 ProcMask.setBit(ProcIndex); 337 338 for (const Record *EC : Classes) { 339 RecVec Opcodes = EC->getValueAsListOfDefs("Opcodes"); 340 341 std::vector<int64_t> OpIndices = 342 EC->getValueAsListOfInts("OperandIndices"); 343 APInt OperandMask = constructOperandMask(OpIndices); 344 345 const Record *Pred = EC->getValueAsDef("Predicate"); 346 APInt PredMask(NumUniquePredicates, 0); 347 PredMask.setBit(Predicate2Index[Pred]); 348 349 for (const Record *Opcode : Opcodes) { 350 unsigned OpcodeIdx = Opcode2Index[Opcode]; 351 if (OpcodeMasks[OpcodeIdx].first[ProcIndex]) { 352 std::string Message = 353 "Opcode " + Opcode->getName().str() + 354 " used by multiple InstructionEquivalenceClass definitions."; 355 PrintFatalError(EC->getLoc(), Message); 356 } 357 OpcodeMasks[OpcodeIdx].first |= ProcMask; 358 OpcodeMasks[OpcodeIdx].second |= PredMask; 359 OpcodeInfo &OI = OpcodeMappings[OpcodeIdx].second; 360 361 OI.addPredicateForProcModel(ProcMask, OperandMask, Pred); 362 } 363 } 364 } 365 366 // Sort OpcodeMappings elements based on their CPU and predicate masks. 367 // As a last resort, order elements by opcode identifier. 368 llvm::sort(OpcodeMappings, 369 [&](const OpcodeMapPair &Lhs, const OpcodeMapPair &Rhs) { 370 unsigned LhsIdx = Opcode2Index[Lhs.first]; 371 unsigned RhsIdx = Opcode2Index[Rhs.first]; 372 std::pair<APInt, APInt> &LhsMasks = OpcodeMasks[LhsIdx]; 373 std::pair<APInt, APInt> &RhsMasks = OpcodeMasks[RhsIdx]; 374 375 if (LhsMasks.first != RhsMasks.first) { 376 if (LhsMasks.first.countPopulation() < 377 RhsMasks.first.countPopulation()) 378 return true; 379 return LhsMasks.first.countLeadingZeros() > 380 RhsMasks.first.countLeadingZeros(); 381 } 382 383 if (LhsMasks.second != RhsMasks.second) { 384 if (LhsMasks.second.countPopulation() < 385 RhsMasks.second.countPopulation()) 386 return true; 387 return LhsMasks.second.countLeadingZeros() > 388 RhsMasks.second.countLeadingZeros(); 389 } 390 391 return LhsIdx < RhsIdx; 392 }); 393 394 // Now construct opcode groups. Groups are used by the SubtargetEmitter when 395 // expanding the body of a STIPredicate function. In particular, each opcode 396 // group is expanded into a sequence of labels in a switch statement. 397 // It identifies opcodes for which different processors define same predicates 398 // and same opcode masks. 399 for (OpcodeMapPair &Info : OpcodeMappings) 400 Fn.addOpcode(Info.first, std::move(Info.second)); 401 } 402 403 void CodeGenSchedModels::collectSTIPredicates() { 404 // Map STIPredicateDecl records to elements of vector 405 // CodeGenSchedModels::STIPredicates. 406 DenseMap<const Record *, unsigned> Decl2Index; 407 408 RecVec RV = Records.getAllDerivedDefinitions("STIPredicate"); 409 for (const Record *R : RV) { 410 const Record *Decl = R->getValueAsDef("Declaration"); 411 412 const auto It = Decl2Index.find(Decl); 413 if (It == Decl2Index.end()) { 414 Decl2Index[Decl] = STIPredicates.size(); 415 STIPredicateFunction Predicate(Decl); 416 Predicate.addDefinition(R); 417 STIPredicates.emplace_back(std::move(Predicate)); 418 continue; 419 } 420 421 STIPredicateFunction &PreviousDef = STIPredicates[It->second]; 422 PreviousDef.addDefinition(R); 423 } 424 425 for (STIPredicateFunction &Fn : STIPredicates) 426 processSTIPredicate(Fn, ProcModelMap); 427 } 428 429 void OpcodeInfo::addPredicateForProcModel(const llvm::APInt &CpuMask, 430 const llvm::APInt &OperandMask, 431 const Record *Predicate) { 432 auto It = llvm::find_if( 433 Predicates, [&OperandMask, &Predicate](const PredicateInfo &P) { 434 return P.Predicate == Predicate && P.OperandMask == OperandMask; 435 }); 436 if (It == Predicates.end()) { 437 Predicates.emplace_back(CpuMask, OperandMask, Predicate); 438 return; 439 } 440 It->ProcModelMask |= CpuMask; 441 } 442 443 void CodeGenSchedModels::checkMCInstPredicates() const { 444 RecVec MCPredicates = Records.getAllDerivedDefinitions("TIIPredicate"); 445 if (MCPredicates.empty()) 446 return; 447 448 // A target cannot have multiple TIIPredicate definitions with a same name. 449 llvm::StringMap<const Record *> TIIPredicates(MCPredicates.size()); 450 for (const Record *TIIPred : MCPredicates) { 451 StringRef Name = TIIPred->getValueAsString("FunctionName"); 452 StringMap<const Record *>::const_iterator It = TIIPredicates.find(Name); 453 if (It == TIIPredicates.end()) { 454 TIIPredicates[Name] = TIIPred; 455 continue; 456 } 457 458 PrintError(TIIPred->getLoc(), 459 "TIIPredicate " + Name + " is multiply defined."); 460 PrintNote(It->second->getLoc(), 461 " Previous definition of " + Name + " was here."); 462 PrintFatalError(TIIPred->getLoc(), 463 "Found conflicting definitions of TIIPredicate."); 464 } 465 } 466 467 void CodeGenSchedModels::collectRetireControlUnits() { 468 RecVec Units = Records.getAllDerivedDefinitions("RetireControlUnit"); 469 470 for (Record *RCU : Units) { 471 CodeGenProcModel &PM = getProcModel(RCU->getValueAsDef("SchedModel")); 472 if (PM.RetireControlUnit) { 473 PrintError(RCU->getLoc(), 474 "Expected a single RetireControlUnit definition"); 475 PrintNote(PM.RetireControlUnit->getLoc(), 476 "Previous definition of RetireControlUnit was here"); 477 } 478 PM.RetireControlUnit = RCU; 479 } 480 } 481 482 void CodeGenSchedModels::collectLoadStoreQueueInfo() { 483 RecVec Queues = Records.getAllDerivedDefinitions("MemoryQueue"); 484 485 for (Record *Queue : Queues) { 486 CodeGenProcModel &PM = getProcModel(Queue->getValueAsDef("SchedModel")); 487 if (Queue->isSubClassOf("LoadQueue")) { 488 if (PM.LoadQueue) { 489 PrintError(Queue->getLoc(), 490 "Expected a single LoadQueue definition"); 491 PrintNote(PM.LoadQueue->getLoc(), 492 "Previous definition of LoadQueue was here"); 493 } 494 495 PM.LoadQueue = Queue; 496 } 497 498 if (Queue->isSubClassOf("StoreQueue")) { 499 if (PM.StoreQueue) { 500 PrintError(Queue->getLoc(), 501 "Expected a single StoreQueue definition"); 502 PrintNote(PM.LoadQueue->getLoc(), 503 "Previous definition of StoreQueue was here"); 504 } 505 506 PM.StoreQueue = Queue; 507 } 508 } 509 } 510 511 /// Collect optional processor information. 512 void CodeGenSchedModels::collectOptionalProcessorInfo() { 513 // Find register file definitions for each processor. 514 collectRegisterFiles(); 515 516 // Collect processor RetireControlUnit descriptors if available. 517 collectRetireControlUnits(); 518 519 // Collect information about load/store queues. 520 collectLoadStoreQueueInfo(); 521 522 checkCompleteness(); 523 } 524 525 /// Gather all processor models. 526 void CodeGenSchedModels::collectProcModels() { 527 RecVec ProcRecords = Records.getAllDerivedDefinitions("Processor"); 528 llvm::sort(ProcRecords, LessRecordFieldName()); 529 530 // Reserve space because we can. Reallocation would be ok. 531 ProcModels.reserve(ProcRecords.size()+1); 532 533 // Use idx=0 for NoModel/NoItineraries. 534 Record *NoModelDef = Records.getDef("NoSchedModel"); 535 Record *NoItinsDef = Records.getDef("NoItineraries"); 536 ProcModels.emplace_back(0, "NoSchedModel", NoModelDef, NoItinsDef); 537 ProcModelMap[NoModelDef] = 0; 538 539 // For each processor, find a unique machine model. 540 LLVM_DEBUG(dbgs() << "+++ PROCESSOR MODELs (addProcModel) +++\n"); 541 for (Record *ProcRecord : ProcRecords) 542 addProcModel(ProcRecord); 543 } 544 545 /// Get a unique processor model based on the defined MachineModel and 546 /// ProcessorItineraries. 547 void CodeGenSchedModels::addProcModel(Record *ProcDef) { 548 Record *ModelKey = getModelOrItinDef(ProcDef); 549 if (!ProcModelMap.insert(std::make_pair(ModelKey, ProcModels.size())).second) 550 return; 551 552 std::string Name = ModelKey->getName(); 553 if (ModelKey->isSubClassOf("SchedMachineModel")) { 554 Record *ItinsDef = ModelKey->getValueAsDef("Itineraries"); 555 ProcModels.emplace_back(ProcModels.size(), Name, ModelKey, ItinsDef); 556 } 557 else { 558 // An itinerary is defined without a machine model. Infer a new model. 559 if (!ModelKey->getValueAsListOfDefs("IID").empty()) 560 Name = Name + "Model"; 561 ProcModels.emplace_back(ProcModels.size(), Name, 562 ProcDef->getValueAsDef("SchedModel"), ModelKey); 563 } 564 LLVM_DEBUG(ProcModels.back().dump()); 565 } 566 567 // Recursively find all reachable SchedReadWrite records. 568 static void scanSchedRW(Record *RWDef, RecVec &RWDefs, 569 SmallPtrSet<Record*, 16> &RWSet) { 570 if (!RWSet.insert(RWDef).second) 571 return; 572 RWDefs.push_back(RWDef); 573 // Reads don't currently have sequence records, but it can be added later. 574 if (RWDef->isSubClassOf("WriteSequence")) { 575 RecVec Seq = RWDef->getValueAsListOfDefs("Writes"); 576 for (Record *WSRec : Seq) 577 scanSchedRW(WSRec, RWDefs, RWSet); 578 } 579 else if (RWDef->isSubClassOf("SchedVariant")) { 580 // Visit each variant (guarded by a different predicate). 581 RecVec Vars = RWDef->getValueAsListOfDefs("Variants"); 582 for (Record *Variant : Vars) { 583 // Visit each RW in the sequence selected by the current variant. 584 RecVec Selected = Variant->getValueAsListOfDefs("Selected"); 585 for (Record *SelDef : Selected) 586 scanSchedRW(SelDef, RWDefs, RWSet); 587 } 588 } 589 } 590 591 // Collect and sort all SchedReadWrites reachable via tablegen records. 592 // More may be inferred later when inferring new SchedClasses from variants. 593 void CodeGenSchedModels::collectSchedRW() { 594 // Reserve idx=0 for invalid writes/reads. 595 SchedWrites.resize(1); 596 SchedReads.resize(1); 597 598 SmallPtrSet<Record*, 16> RWSet; 599 600 // Find all SchedReadWrites referenced by instruction defs. 601 RecVec SWDefs, SRDefs; 602 for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) { 603 Record *SchedDef = Inst->TheDef; 604 if (SchedDef->isValueUnset("SchedRW")) 605 continue; 606 RecVec RWs = SchedDef->getValueAsListOfDefs("SchedRW"); 607 for (Record *RW : RWs) { 608 if (RW->isSubClassOf("SchedWrite")) 609 scanSchedRW(RW, SWDefs, RWSet); 610 else { 611 assert(RW->isSubClassOf("SchedRead") && "Unknown SchedReadWrite"); 612 scanSchedRW(RW, SRDefs, RWSet); 613 } 614 } 615 } 616 // Find all ReadWrites referenced by InstRW. 617 RecVec InstRWDefs = Records.getAllDerivedDefinitions("InstRW"); 618 for (Record *InstRWDef : InstRWDefs) { 619 // For all OperandReadWrites. 620 RecVec RWDefs = InstRWDef->getValueAsListOfDefs("OperandReadWrites"); 621 for (Record *RWDef : RWDefs) { 622 if (RWDef->isSubClassOf("SchedWrite")) 623 scanSchedRW(RWDef, SWDefs, RWSet); 624 else { 625 assert(RWDef->isSubClassOf("SchedRead") && "Unknown SchedReadWrite"); 626 scanSchedRW(RWDef, SRDefs, RWSet); 627 } 628 } 629 } 630 // Find all ReadWrites referenced by ItinRW. 631 RecVec ItinRWDefs = Records.getAllDerivedDefinitions("ItinRW"); 632 for (Record *ItinRWDef : ItinRWDefs) { 633 // For all OperandReadWrites. 634 RecVec RWDefs = ItinRWDef->getValueAsListOfDefs("OperandReadWrites"); 635 for (Record *RWDef : RWDefs) { 636 if (RWDef->isSubClassOf("SchedWrite")) 637 scanSchedRW(RWDef, SWDefs, RWSet); 638 else { 639 assert(RWDef->isSubClassOf("SchedRead") && "Unknown SchedReadWrite"); 640 scanSchedRW(RWDef, SRDefs, RWSet); 641 } 642 } 643 } 644 // Find all ReadWrites referenced by SchedAlias. AliasDefs needs to be sorted 645 // for the loop below that initializes Alias vectors. 646 RecVec AliasDefs = Records.getAllDerivedDefinitions("SchedAlias"); 647 llvm::sort(AliasDefs, LessRecord()); 648 for (Record *ADef : AliasDefs) { 649 Record *MatchDef = ADef->getValueAsDef("MatchRW"); 650 Record *AliasDef = ADef->getValueAsDef("AliasRW"); 651 if (MatchDef->isSubClassOf("SchedWrite")) { 652 if (!AliasDef->isSubClassOf("SchedWrite")) 653 PrintFatalError(ADef->getLoc(), "SchedWrite Alias must be SchedWrite"); 654 scanSchedRW(AliasDef, SWDefs, RWSet); 655 } 656 else { 657 assert(MatchDef->isSubClassOf("SchedRead") && "Unknown SchedReadWrite"); 658 if (!AliasDef->isSubClassOf("SchedRead")) 659 PrintFatalError(ADef->getLoc(), "SchedRead Alias must be SchedRead"); 660 scanSchedRW(AliasDef, SRDefs, RWSet); 661 } 662 } 663 // Sort and add the SchedReadWrites directly referenced by instructions or 664 // itinerary resources. Index reads and writes in separate domains. 665 llvm::sort(SWDefs, LessRecord()); 666 for (Record *SWDef : SWDefs) { 667 assert(!getSchedRWIdx(SWDef, /*IsRead=*/false) && "duplicate SchedWrite"); 668 SchedWrites.emplace_back(SchedWrites.size(), SWDef); 669 } 670 llvm::sort(SRDefs, LessRecord()); 671 for (Record *SRDef : SRDefs) { 672 assert(!getSchedRWIdx(SRDef, /*IsRead-*/true) && "duplicate SchedWrite"); 673 SchedReads.emplace_back(SchedReads.size(), SRDef); 674 } 675 // Initialize WriteSequence vectors. 676 for (CodeGenSchedRW &CGRW : SchedWrites) { 677 if (!CGRW.IsSequence) 678 continue; 679 findRWs(CGRW.TheDef->getValueAsListOfDefs("Writes"), CGRW.Sequence, 680 /*IsRead=*/false); 681 } 682 // Initialize Aliases vectors. 683 for (Record *ADef : AliasDefs) { 684 Record *AliasDef = ADef->getValueAsDef("AliasRW"); 685 getSchedRW(AliasDef).IsAlias = true; 686 Record *MatchDef = ADef->getValueAsDef("MatchRW"); 687 CodeGenSchedRW &RW = getSchedRW(MatchDef); 688 if (RW.IsAlias) 689 PrintFatalError(ADef->getLoc(), "Cannot Alias an Alias"); 690 RW.Aliases.push_back(ADef); 691 } 692 LLVM_DEBUG( 693 dbgs() << "\n+++ SCHED READS and WRITES (collectSchedRW) +++\n"; 694 for (unsigned WIdx = 0, WEnd = SchedWrites.size(); WIdx != WEnd; ++WIdx) { 695 dbgs() << WIdx << ": "; 696 SchedWrites[WIdx].dump(); 697 dbgs() << '\n'; 698 } for (unsigned RIdx = 0, REnd = SchedReads.size(); RIdx != REnd; 699 ++RIdx) { 700 dbgs() << RIdx << ": "; 701 SchedReads[RIdx].dump(); 702 dbgs() << '\n'; 703 } RecVec RWDefs = Records.getAllDerivedDefinitions("SchedReadWrite"); 704 for (Record *RWDef 705 : RWDefs) { 706 if (!getSchedRWIdx(RWDef, RWDef->isSubClassOf("SchedRead"))) { 707 StringRef Name = RWDef->getName(); 708 if (Name != "NoWrite" && Name != "ReadDefault") 709 dbgs() << "Unused SchedReadWrite " << Name << '\n'; 710 } 711 }); 712 } 713 714 /// Compute a SchedWrite name from a sequence of writes. 715 std::string CodeGenSchedModels::genRWName(ArrayRef<unsigned> Seq, bool IsRead) { 716 std::string Name("("); 717 for (auto I = Seq.begin(), E = Seq.end(); I != E; ++I) { 718 if (I != Seq.begin()) 719 Name += '_'; 720 Name += getSchedRW(*I, IsRead).Name; 721 } 722 Name += ')'; 723 return Name; 724 } 725 726 unsigned CodeGenSchedModels::getSchedRWIdx(const Record *Def, 727 bool IsRead) const { 728 const std::vector<CodeGenSchedRW> &RWVec = IsRead ? SchedReads : SchedWrites; 729 const auto I = find_if( 730 RWVec, [Def](const CodeGenSchedRW &RW) { return RW.TheDef == Def; }); 731 return I == RWVec.end() ? 0 : std::distance(RWVec.begin(), I); 732 } 733 734 bool CodeGenSchedModels::hasReadOfWrite(Record *WriteDef) const { 735 for (const CodeGenSchedRW &Read : SchedReads) { 736 Record *ReadDef = Read.TheDef; 737 if (!ReadDef || !ReadDef->isSubClassOf("ProcReadAdvance")) 738 continue; 739 740 RecVec ValidWrites = ReadDef->getValueAsListOfDefs("ValidWrites"); 741 if (is_contained(ValidWrites, WriteDef)) { 742 return true; 743 } 744 } 745 return false; 746 } 747 748 static void splitSchedReadWrites(const RecVec &RWDefs, 749 RecVec &WriteDefs, RecVec &ReadDefs) { 750 for (Record *RWDef : RWDefs) { 751 if (RWDef->isSubClassOf("SchedWrite")) 752 WriteDefs.push_back(RWDef); 753 else { 754 assert(RWDef->isSubClassOf("SchedRead") && "unknown SchedReadWrite"); 755 ReadDefs.push_back(RWDef); 756 } 757 } 758 } 759 760 // Split the SchedReadWrites defs and call findRWs for each list. 761 void CodeGenSchedModels::findRWs(const RecVec &RWDefs, 762 IdxVec &Writes, IdxVec &Reads) const { 763 RecVec WriteDefs; 764 RecVec ReadDefs; 765 splitSchedReadWrites(RWDefs, WriteDefs, ReadDefs); 766 findRWs(WriteDefs, Writes, false); 767 findRWs(ReadDefs, Reads, true); 768 } 769 770 // Call getSchedRWIdx for all elements in a sequence of SchedRW defs. 771 void CodeGenSchedModels::findRWs(const RecVec &RWDefs, IdxVec &RWs, 772 bool IsRead) const { 773 for (Record *RWDef : RWDefs) { 774 unsigned Idx = getSchedRWIdx(RWDef, IsRead); 775 assert(Idx && "failed to collect SchedReadWrite"); 776 RWs.push_back(Idx); 777 } 778 } 779 780 void CodeGenSchedModels::expandRWSequence(unsigned RWIdx, IdxVec &RWSeq, 781 bool IsRead) const { 782 const CodeGenSchedRW &SchedRW = getSchedRW(RWIdx, IsRead); 783 if (!SchedRW.IsSequence) { 784 RWSeq.push_back(RWIdx); 785 return; 786 } 787 int Repeat = 788 SchedRW.TheDef ? SchedRW.TheDef->getValueAsInt("Repeat") : 1; 789 for (int i = 0; i < Repeat; ++i) { 790 for (unsigned I : SchedRW.Sequence) { 791 expandRWSequence(I, RWSeq, IsRead); 792 } 793 } 794 } 795 796 // Expand a SchedWrite as a sequence following any aliases that coincide with 797 // the given processor model. 798 void CodeGenSchedModels::expandRWSeqForProc( 799 unsigned RWIdx, IdxVec &RWSeq, bool IsRead, 800 const CodeGenProcModel &ProcModel) const { 801 802 const CodeGenSchedRW &SchedWrite = getSchedRW(RWIdx, IsRead); 803 Record *AliasDef = nullptr; 804 for (const Record *Rec : SchedWrite.Aliases) { 805 const CodeGenSchedRW &AliasRW = getSchedRW(Rec->getValueAsDef("AliasRW")); 806 if (Rec->getValueInit("SchedModel")->isComplete()) { 807 Record *ModelDef = Rec->getValueAsDef("SchedModel"); 808 if (&getProcModel(ModelDef) != &ProcModel) 809 continue; 810 } 811 if (AliasDef) 812 PrintFatalError(AliasRW.TheDef->getLoc(), "Multiple aliases " 813 "defined for processor " + ProcModel.ModelName + 814 " Ensure only one SchedAlias exists per RW."); 815 AliasDef = AliasRW.TheDef; 816 } 817 if (AliasDef) { 818 expandRWSeqForProc(getSchedRWIdx(AliasDef, IsRead), 819 RWSeq, IsRead,ProcModel); 820 return; 821 } 822 if (!SchedWrite.IsSequence) { 823 RWSeq.push_back(RWIdx); 824 return; 825 } 826 int Repeat = 827 SchedWrite.TheDef ? SchedWrite.TheDef->getValueAsInt("Repeat") : 1; 828 for (int I = 0, E = Repeat; I < E; ++I) { 829 for (unsigned Idx : SchedWrite.Sequence) { 830 expandRWSeqForProc(Idx, RWSeq, IsRead, ProcModel); 831 } 832 } 833 } 834 835 // Find the existing SchedWrite that models this sequence of writes. 836 unsigned CodeGenSchedModels::findRWForSequence(ArrayRef<unsigned> Seq, 837 bool IsRead) { 838 std::vector<CodeGenSchedRW> &RWVec = IsRead ? SchedReads : SchedWrites; 839 840 auto I = find_if(RWVec, [Seq](CodeGenSchedRW &RW) { 841 return makeArrayRef(RW.Sequence) == Seq; 842 }); 843 // Index zero reserved for invalid RW. 844 return I == RWVec.end() ? 0 : std::distance(RWVec.begin(), I); 845 } 846 847 /// Add this ReadWrite if it doesn't already exist. 848 unsigned CodeGenSchedModels::findOrInsertRW(ArrayRef<unsigned> Seq, 849 bool IsRead) { 850 assert(!Seq.empty() && "cannot insert empty sequence"); 851 if (Seq.size() == 1) 852 return Seq.back(); 853 854 unsigned Idx = findRWForSequence(Seq, IsRead); 855 if (Idx) 856 return Idx; 857 858 std::vector<CodeGenSchedRW> &RWVec = IsRead ? SchedReads : SchedWrites; 859 unsigned RWIdx = RWVec.size(); 860 CodeGenSchedRW SchedRW(RWIdx, IsRead, Seq, genRWName(Seq, IsRead)); 861 RWVec.push_back(SchedRW); 862 return RWIdx; 863 } 864 865 /// Visit all the instruction definitions for this target to gather and 866 /// enumerate the itinerary classes. These are the explicitly specified 867 /// SchedClasses. More SchedClasses may be inferred. 868 void CodeGenSchedModels::collectSchedClasses() { 869 870 // NoItinerary is always the first class at Idx=0 871 assert(SchedClasses.empty() && "Expected empty sched class"); 872 SchedClasses.emplace_back(0, "NoInstrModel", 873 Records.getDef("NoItinerary")); 874 SchedClasses.back().ProcIndices.push_back(0); 875 876 // Create a SchedClass for each unique combination of itinerary class and 877 // SchedRW list. 878 for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) { 879 Record *ItinDef = Inst->TheDef->getValueAsDef("Itinerary"); 880 IdxVec Writes, Reads; 881 if (!Inst->TheDef->isValueUnset("SchedRW")) 882 findRWs(Inst->TheDef->getValueAsListOfDefs("SchedRW"), Writes, Reads); 883 884 // ProcIdx == 0 indicates the class applies to all processors. 885 unsigned SCIdx = addSchedClass(ItinDef, Writes, Reads, /*ProcIndices*/{0}); 886 InstrClassMap[Inst->TheDef] = SCIdx; 887 } 888 // Create classes for InstRW defs. 889 RecVec InstRWDefs = Records.getAllDerivedDefinitions("InstRW"); 890 llvm::sort(InstRWDefs, LessRecord()); 891 LLVM_DEBUG(dbgs() << "\n+++ SCHED CLASSES (createInstRWClass) +++\n"); 892 for (Record *RWDef : InstRWDefs) 893 createInstRWClass(RWDef); 894 895 NumInstrSchedClasses = SchedClasses.size(); 896 897 bool EnableDump = false; 898 LLVM_DEBUG(EnableDump = true); 899 if (!EnableDump) 900 return; 901 902 LLVM_DEBUG( 903 dbgs() 904 << "\n+++ ITINERARIES and/or MACHINE MODELS (collectSchedClasses) +++\n"); 905 for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) { 906 StringRef InstName = Inst->TheDef->getName(); 907 unsigned SCIdx = getSchedClassIdx(*Inst); 908 if (!SCIdx) { 909 LLVM_DEBUG({ 910 if (!Inst->hasNoSchedulingInfo) 911 dbgs() << "No machine model for " << Inst->TheDef->getName() << '\n'; 912 }); 913 continue; 914 } 915 CodeGenSchedClass &SC = getSchedClass(SCIdx); 916 if (SC.ProcIndices[0] != 0) 917 PrintFatalError(Inst->TheDef->getLoc(), "Instruction's sched class " 918 "must not be subtarget specific."); 919 920 IdxVec ProcIndices; 921 if (SC.ItinClassDef->getName() != "NoItinerary") { 922 ProcIndices.push_back(0); 923 dbgs() << "Itinerary for " << InstName << ": " 924 << SC.ItinClassDef->getName() << '\n'; 925 } 926 if (!SC.Writes.empty()) { 927 ProcIndices.push_back(0); 928 LLVM_DEBUG({ 929 dbgs() << "SchedRW machine model for " << InstName; 930 for (IdxIter WI = SC.Writes.begin(), WE = SC.Writes.end(); WI != WE; 931 ++WI) 932 dbgs() << " " << SchedWrites[*WI].Name; 933 for (IdxIter RI = SC.Reads.begin(), RE = SC.Reads.end(); RI != RE; ++RI) 934 dbgs() << " " << SchedReads[*RI].Name; 935 dbgs() << '\n'; 936 }); 937 } 938 const RecVec &RWDefs = SchedClasses[SCIdx].InstRWs; 939 for (Record *RWDef : RWDefs) { 940 const CodeGenProcModel &ProcModel = 941 getProcModel(RWDef->getValueAsDef("SchedModel")); 942 ProcIndices.push_back(ProcModel.Index); 943 LLVM_DEBUG(dbgs() << "InstRW on " << ProcModel.ModelName << " for " 944 << InstName); 945 IdxVec Writes; 946 IdxVec Reads; 947 findRWs(RWDef->getValueAsListOfDefs("OperandReadWrites"), 948 Writes, Reads); 949 LLVM_DEBUG({ 950 for (unsigned WIdx : Writes) 951 dbgs() << " " << SchedWrites[WIdx].Name; 952 for (unsigned RIdx : Reads) 953 dbgs() << " " << SchedReads[RIdx].Name; 954 dbgs() << '\n'; 955 }); 956 } 957 // If ProcIndices contains zero, the class applies to all processors. 958 LLVM_DEBUG({ 959 if (!std::count(ProcIndices.begin(), ProcIndices.end(), 0)) { 960 for (const CodeGenProcModel &PM : ProcModels) { 961 if (!std::count(ProcIndices.begin(), ProcIndices.end(), PM.Index)) 962 dbgs() << "No machine model for " << Inst->TheDef->getName() 963 << " on processor " << PM.ModelName << '\n'; 964 } 965 } 966 }); 967 } 968 } 969 970 // Get the SchedClass index for an instruction. 971 unsigned 972 CodeGenSchedModels::getSchedClassIdx(const CodeGenInstruction &Inst) const { 973 return InstrClassMap.lookup(Inst.TheDef); 974 } 975 976 std::string 977 CodeGenSchedModels::createSchedClassName(Record *ItinClassDef, 978 ArrayRef<unsigned> OperWrites, 979 ArrayRef<unsigned> OperReads) { 980 981 std::string Name; 982 if (ItinClassDef && ItinClassDef->getName() != "NoItinerary") 983 Name = ItinClassDef->getName(); 984 for (unsigned Idx : OperWrites) { 985 if (!Name.empty()) 986 Name += '_'; 987 Name += SchedWrites[Idx].Name; 988 } 989 for (unsigned Idx : OperReads) { 990 Name += '_'; 991 Name += SchedReads[Idx].Name; 992 } 993 return Name; 994 } 995 996 std::string CodeGenSchedModels::createSchedClassName(const RecVec &InstDefs) { 997 998 std::string Name; 999 for (RecIter I = InstDefs.begin(), E = InstDefs.end(); I != E; ++I) { 1000 if (I != InstDefs.begin()) 1001 Name += '_'; 1002 Name += (*I)->getName(); 1003 } 1004 return Name; 1005 } 1006 1007 /// Add an inferred sched class from an itinerary class and per-operand list of 1008 /// SchedWrites and SchedReads. ProcIndices contains the set of IDs of 1009 /// processors that may utilize this class. 1010 unsigned CodeGenSchedModels::addSchedClass(Record *ItinClassDef, 1011 ArrayRef<unsigned> OperWrites, 1012 ArrayRef<unsigned> OperReads, 1013 ArrayRef<unsigned> ProcIndices) { 1014 assert(!ProcIndices.empty() && "expect at least one ProcIdx"); 1015 1016 auto IsKeyEqual = [=](const CodeGenSchedClass &SC) { 1017 return SC.isKeyEqual(ItinClassDef, OperWrites, OperReads); 1018 }; 1019 1020 auto I = find_if(make_range(schedClassBegin(), schedClassEnd()), IsKeyEqual); 1021 unsigned Idx = I == schedClassEnd() ? 0 : std::distance(schedClassBegin(), I); 1022 if (Idx || SchedClasses[0].isKeyEqual(ItinClassDef, OperWrites, OperReads)) { 1023 IdxVec PI; 1024 std::set_union(SchedClasses[Idx].ProcIndices.begin(), 1025 SchedClasses[Idx].ProcIndices.end(), 1026 ProcIndices.begin(), ProcIndices.end(), 1027 std::back_inserter(PI)); 1028 SchedClasses[Idx].ProcIndices = std::move(PI); 1029 return Idx; 1030 } 1031 Idx = SchedClasses.size(); 1032 SchedClasses.emplace_back(Idx, 1033 createSchedClassName(ItinClassDef, OperWrites, 1034 OperReads), 1035 ItinClassDef); 1036 CodeGenSchedClass &SC = SchedClasses.back(); 1037 SC.Writes = OperWrites; 1038 SC.Reads = OperReads; 1039 SC.ProcIndices = ProcIndices; 1040 1041 return Idx; 1042 } 1043 1044 // Create classes for each set of opcodes that are in the same InstReadWrite 1045 // definition across all processors. 1046 void CodeGenSchedModels::createInstRWClass(Record *InstRWDef) { 1047 // ClassInstrs will hold an entry for each subset of Instrs in InstRWDef that 1048 // intersects with an existing class via a previous InstRWDef. Instrs that do 1049 // not intersect with an existing class refer back to their former class as 1050 // determined from ItinDef or SchedRW. 1051 SmallMapVector<unsigned, SmallVector<Record *, 8>, 4> ClassInstrs; 1052 // Sort Instrs into sets. 1053 const RecVec *InstDefs = Sets.expand(InstRWDef); 1054 if (InstDefs->empty()) 1055 PrintFatalError(InstRWDef->getLoc(), "No matching instruction opcodes"); 1056 1057 for (Record *InstDef : *InstDefs) { 1058 InstClassMapTy::const_iterator Pos = InstrClassMap.find(InstDef); 1059 if (Pos == InstrClassMap.end()) 1060 PrintFatalError(InstDef->getLoc(), "No sched class for instruction."); 1061 unsigned SCIdx = Pos->second; 1062 ClassInstrs[SCIdx].push_back(InstDef); 1063 } 1064 // For each set of Instrs, create a new class if necessary, and map or remap 1065 // the Instrs to it. 1066 for (auto &Entry : ClassInstrs) { 1067 unsigned OldSCIdx = Entry.first; 1068 ArrayRef<Record*> InstDefs = Entry.second; 1069 // If the all instrs in the current class are accounted for, then leave 1070 // them mapped to their old class. 1071 if (OldSCIdx) { 1072 const RecVec &RWDefs = SchedClasses[OldSCIdx].InstRWs; 1073 if (!RWDefs.empty()) { 1074 const RecVec *OrigInstDefs = Sets.expand(RWDefs[0]); 1075 unsigned OrigNumInstrs = 1076 count_if(*OrigInstDefs, [&](Record *OIDef) { 1077 return InstrClassMap[OIDef] == OldSCIdx; 1078 }); 1079 if (OrigNumInstrs == InstDefs.size()) { 1080 assert(SchedClasses[OldSCIdx].ProcIndices[0] == 0 && 1081 "expected a generic SchedClass"); 1082 Record *RWModelDef = InstRWDef->getValueAsDef("SchedModel"); 1083 // Make sure we didn't already have a InstRW containing this 1084 // instruction on this model. 1085 for (Record *RWD : RWDefs) { 1086 if (RWD->getValueAsDef("SchedModel") == RWModelDef && 1087 RWModelDef->getValueAsBit("FullInstRWOverlapCheck")) { 1088 for (Record *Inst : InstDefs) { 1089 PrintFatalError(InstRWDef->getLoc(), "Overlapping InstRW def " + 1090 Inst->getName() + " also matches " + 1091 RWD->getValue("Instrs")->getValue()->getAsString()); 1092 } 1093 } 1094 } 1095 LLVM_DEBUG(dbgs() << "InstRW: Reuse SC " << OldSCIdx << ":" 1096 << SchedClasses[OldSCIdx].Name << " on " 1097 << RWModelDef->getName() << "\n"); 1098 SchedClasses[OldSCIdx].InstRWs.push_back(InstRWDef); 1099 continue; 1100 } 1101 } 1102 } 1103 unsigned SCIdx = SchedClasses.size(); 1104 SchedClasses.emplace_back(SCIdx, createSchedClassName(InstDefs), nullptr); 1105 CodeGenSchedClass &SC = SchedClasses.back(); 1106 LLVM_DEBUG(dbgs() << "InstRW: New SC " << SCIdx << ":" << SC.Name << " on " 1107 << InstRWDef->getValueAsDef("SchedModel")->getName() 1108 << "\n"); 1109 1110 // Preserve ItinDef and Writes/Reads for processors without an InstRW entry. 1111 SC.ItinClassDef = SchedClasses[OldSCIdx].ItinClassDef; 1112 SC.Writes = SchedClasses[OldSCIdx].Writes; 1113 SC.Reads = SchedClasses[OldSCIdx].Reads; 1114 SC.ProcIndices.push_back(0); 1115 // If we had an old class, copy it's InstRWs to this new class. 1116 if (OldSCIdx) { 1117 Record *RWModelDef = InstRWDef->getValueAsDef("SchedModel"); 1118 for (Record *OldRWDef : SchedClasses[OldSCIdx].InstRWs) { 1119 if (OldRWDef->getValueAsDef("SchedModel") == RWModelDef) { 1120 for (Record *InstDef : InstDefs) { 1121 PrintFatalError(OldRWDef->getLoc(), "Overlapping InstRW def " + 1122 InstDef->getName() + " also matches " + 1123 OldRWDef->getValue("Instrs")->getValue()->getAsString()); 1124 } 1125 } 1126 assert(OldRWDef != InstRWDef && 1127 "SchedClass has duplicate InstRW def"); 1128 SC.InstRWs.push_back(OldRWDef); 1129 } 1130 } 1131 // Map each Instr to this new class. 1132 for (Record *InstDef : InstDefs) 1133 InstrClassMap[InstDef] = SCIdx; 1134 SC.InstRWs.push_back(InstRWDef); 1135 } 1136 } 1137 1138 // True if collectProcItins found anything. 1139 bool CodeGenSchedModels::hasItineraries() const { 1140 for (const CodeGenProcModel &PM : make_range(procModelBegin(),procModelEnd())) 1141 if (PM.hasItineraries()) 1142 return true; 1143 return false; 1144 } 1145 1146 // Gather the processor itineraries. 1147 void CodeGenSchedModels::collectProcItins() { 1148 LLVM_DEBUG(dbgs() << "\n+++ PROBLEM ITINERARIES (collectProcItins) +++\n"); 1149 for (CodeGenProcModel &ProcModel : ProcModels) { 1150 if (!ProcModel.hasItineraries()) 1151 continue; 1152 1153 RecVec ItinRecords = ProcModel.ItinsDef->getValueAsListOfDefs("IID"); 1154 assert(!ItinRecords.empty() && "ProcModel.hasItineraries is incorrect"); 1155 1156 // Populate ItinDefList with Itinerary records. 1157 ProcModel.ItinDefList.resize(NumInstrSchedClasses); 1158 1159 // Insert each itinerary data record in the correct position within 1160 // the processor model's ItinDefList. 1161 for (Record *ItinData : ItinRecords) { 1162 const Record *ItinDef = ItinData->getValueAsDef("TheClass"); 1163 bool FoundClass = false; 1164 1165 for (const CodeGenSchedClass &SC : 1166 make_range(schedClassBegin(), schedClassEnd())) { 1167 // Multiple SchedClasses may share an itinerary. Update all of them. 1168 if (SC.ItinClassDef == ItinDef) { 1169 ProcModel.ItinDefList[SC.Index] = ItinData; 1170 FoundClass = true; 1171 } 1172 } 1173 if (!FoundClass) { 1174 LLVM_DEBUG(dbgs() << ProcModel.ItinsDef->getName() 1175 << " missing class for itinerary " 1176 << ItinDef->getName() << '\n'); 1177 } 1178 } 1179 // Check for missing itinerary entries. 1180 assert(!ProcModel.ItinDefList[0] && "NoItinerary class can't have rec"); 1181 LLVM_DEBUG( 1182 for (unsigned i = 1, N = ProcModel.ItinDefList.size(); i < N; ++i) { 1183 if (!ProcModel.ItinDefList[i]) 1184 dbgs() << ProcModel.ItinsDef->getName() 1185 << " missing itinerary for class " << SchedClasses[i].Name 1186 << '\n'; 1187 }); 1188 } 1189 } 1190 1191 // Gather the read/write types for each itinerary class. 1192 void CodeGenSchedModels::collectProcItinRW() { 1193 RecVec ItinRWDefs = Records.getAllDerivedDefinitions("ItinRW"); 1194 llvm::sort(ItinRWDefs, LessRecord()); 1195 for (Record *RWDef : ItinRWDefs) { 1196 if (!RWDef->getValueInit("SchedModel")->isComplete()) 1197 PrintFatalError(RWDef->getLoc(), "SchedModel is undefined"); 1198 Record *ModelDef = RWDef->getValueAsDef("SchedModel"); 1199 ProcModelMapTy::const_iterator I = ProcModelMap.find(ModelDef); 1200 if (I == ProcModelMap.end()) { 1201 PrintFatalError(RWDef->getLoc(), "Undefined SchedMachineModel " 1202 + ModelDef->getName()); 1203 } 1204 ProcModels[I->second].ItinRWDefs.push_back(RWDef); 1205 } 1206 } 1207 1208 // Gather the unsupported features for processor models. 1209 void CodeGenSchedModels::collectProcUnsupportedFeatures() { 1210 for (CodeGenProcModel &ProcModel : ProcModels) { 1211 for (Record *Pred : ProcModel.ModelDef->getValueAsListOfDefs("UnsupportedFeatures")) { 1212 ProcModel.UnsupportedFeaturesDefs.push_back(Pred); 1213 } 1214 } 1215 } 1216 1217 /// Infer new classes from existing classes. In the process, this may create new 1218 /// SchedWrites from sequences of existing SchedWrites. 1219 void CodeGenSchedModels::inferSchedClasses() { 1220 LLVM_DEBUG( 1221 dbgs() << "\n+++ INFERRING SCHED CLASSES (inferSchedClasses) +++\n"); 1222 LLVM_DEBUG(dbgs() << NumInstrSchedClasses << " instr sched classes.\n"); 1223 1224 // Visit all existing classes and newly created classes. 1225 for (unsigned Idx = 0; Idx != SchedClasses.size(); ++Idx) { 1226 assert(SchedClasses[Idx].Index == Idx && "bad SCIdx"); 1227 1228 if (SchedClasses[Idx].ItinClassDef) 1229 inferFromItinClass(SchedClasses[Idx].ItinClassDef, Idx); 1230 if (!SchedClasses[Idx].InstRWs.empty()) 1231 inferFromInstRWs(Idx); 1232 if (!SchedClasses[Idx].Writes.empty()) { 1233 inferFromRW(SchedClasses[Idx].Writes, SchedClasses[Idx].Reads, 1234 Idx, SchedClasses[Idx].ProcIndices); 1235 } 1236 assert(SchedClasses.size() < (NumInstrSchedClasses*6) && 1237 "too many SchedVariants"); 1238 } 1239 } 1240 1241 /// Infer classes from per-processor itinerary resources. 1242 void CodeGenSchedModels::inferFromItinClass(Record *ItinClassDef, 1243 unsigned FromClassIdx) { 1244 for (unsigned PIdx = 0, PEnd = ProcModels.size(); PIdx != PEnd; ++PIdx) { 1245 const CodeGenProcModel &PM = ProcModels[PIdx]; 1246 // For all ItinRW entries. 1247 bool HasMatch = false; 1248 for (const Record *Rec : PM.ItinRWDefs) { 1249 RecVec Matched = Rec->getValueAsListOfDefs("MatchedItinClasses"); 1250 if (!std::count(Matched.begin(), Matched.end(), ItinClassDef)) 1251 continue; 1252 if (HasMatch) 1253 PrintFatalError(Rec->getLoc(), "Duplicate itinerary class " 1254 + ItinClassDef->getName() 1255 + " in ItinResources for " + PM.ModelName); 1256 HasMatch = true; 1257 IdxVec Writes, Reads; 1258 findRWs(Rec->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads); 1259 inferFromRW(Writes, Reads, FromClassIdx, PIdx); 1260 } 1261 } 1262 } 1263 1264 /// Infer classes from per-processor InstReadWrite definitions. 1265 void CodeGenSchedModels::inferFromInstRWs(unsigned SCIdx) { 1266 for (unsigned I = 0, E = SchedClasses[SCIdx].InstRWs.size(); I != E; ++I) { 1267 assert(SchedClasses[SCIdx].InstRWs.size() == E && "InstrRWs was mutated!"); 1268 Record *Rec = SchedClasses[SCIdx].InstRWs[I]; 1269 const RecVec *InstDefs = Sets.expand(Rec); 1270 RecIter II = InstDefs->begin(), IE = InstDefs->end(); 1271 for (; II != IE; ++II) { 1272 if (InstrClassMap[*II] == SCIdx) 1273 break; 1274 } 1275 // If this class no longer has any instructions mapped to it, it has become 1276 // irrelevant. 1277 if (II == IE) 1278 continue; 1279 IdxVec Writes, Reads; 1280 findRWs(Rec->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads); 1281 unsigned PIdx = getProcModel(Rec->getValueAsDef("SchedModel")).Index; 1282 inferFromRW(Writes, Reads, SCIdx, PIdx); // May mutate SchedClasses. 1283 } 1284 } 1285 1286 namespace { 1287 1288 // Helper for substituteVariantOperand. 1289 struct TransVariant { 1290 Record *VarOrSeqDef; // Variant or sequence. 1291 unsigned RWIdx; // Index of this variant or sequence's matched type. 1292 unsigned ProcIdx; // Processor model index or zero for any. 1293 unsigned TransVecIdx; // Index into PredTransitions::TransVec. 1294 1295 TransVariant(Record *def, unsigned rwi, unsigned pi, unsigned ti): 1296 VarOrSeqDef(def), RWIdx(rwi), ProcIdx(pi), TransVecIdx(ti) {} 1297 }; 1298 1299 // Associate a predicate with the SchedReadWrite that it guards. 1300 // RWIdx is the index of the read/write variant. 1301 struct PredCheck { 1302 bool IsRead; 1303 unsigned RWIdx; 1304 Record *Predicate; 1305 1306 PredCheck(bool r, unsigned w, Record *p): IsRead(r), RWIdx(w), Predicate(p) {} 1307 }; 1308 1309 // A Predicate transition is a list of RW sequences guarded by a PredTerm. 1310 struct PredTransition { 1311 // A predicate term is a conjunction of PredChecks. 1312 SmallVector<PredCheck, 4> PredTerm; 1313 SmallVector<SmallVector<unsigned,4>, 16> WriteSequences; 1314 SmallVector<SmallVector<unsigned,4>, 16> ReadSequences; 1315 SmallVector<unsigned, 4> ProcIndices; 1316 }; 1317 1318 // Encapsulate a set of partially constructed transitions. 1319 // The results are built by repeated calls to substituteVariants. 1320 class PredTransitions { 1321 CodeGenSchedModels &SchedModels; 1322 1323 public: 1324 std::vector<PredTransition> TransVec; 1325 1326 PredTransitions(CodeGenSchedModels &sm): SchedModels(sm) {} 1327 1328 void substituteVariantOperand(const SmallVectorImpl<unsigned> &RWSeq, 1329 bool IsRead, unsigned StartIdx); 1330 1331 void substituteVariants(const PredTransition &Trans); 1332 1333 #ifndef NDEBUG 1334 void dump() const; 1335 #endif 1336 1337 private: 1338 bool mutuallyExclusive(Record *PredDef, ArrayRef<PredCheck> Term); 1339 void getIntersectingVariants( 1340 const CodeGenSchedRW &SchedRW, unsigned TransIdx, 1341 std::vector<TransVariant> &IntersectingVariants); 1342 void pushVariant(const TransVariant &VInfo, bool IsRead); 1343 }; 1344 1345 } // end anonymous namespace 1346 1347 // Return true if this predicate is mutually exclusive with a PredTerm. This 1348 // degenerates into checking if the predicate is mutually exclusive with any 1349 // predicate in the Term's conjunction. 1350 // 1351 // All predicates associated with a given SchedRW are considered mutually 1352 // exclusive. This should work even if the conditions expressed by the 1353 // predicates are not exclusive because the predicates for a given SchedWrite 1354 // are always checked in the order they are defined in the .td file. Later 1355 // conditions implicitly negate any prior condition. 1356 bool PredTransitions::mutuallyExclusive(Record *PredDef, 1357 ArrayRef<PredCheck> Term) { 1358 for (const PredCheck &PC: Term) { 1359 if (PC.Predicate == PredDef) 1360 return false; 1361 1362 const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(PC.RWIdx, PC.IsRead); 1363 assert(SchedRW.HasVariants && "PredCheck must refer to a SchedVariant"); 1364 RecVec Variants = SchedRW.TheDef->getValueAsListOfDefs("Variants"); 1365 if (any_of(Variants, [PredDef](const Record *R) { 1366 return R->getValueAsDef("Predicate") == PredDef; 1367 })) 1368 return true; 1369 } 1370 return false; 1371 } 1372 1373 static bool hasAliasedVariants(const CodeGenSchedRW &RW, 1374 CodeGenSchedModels &SchedModels) { 1375 if (RW.HasVariants) 1376 return true; 1377 1378 for (Record *Alias : RW.Aliases) { 1379 const CodeGenSchedRW &AliasRW = 1380 SchedModels.getSchedRW(Alias->getValueAsDef("AliasRW")); 1381 if (AliasRW.HasVariants) 1382 return true; 1383 if (AliasRW.IsSequence) { 1384 IdxVec ExpandedRWs; 1385 SchedModels.expandRWSequence(AliasRW.Index, ExpandedRWs, AliasRW.IsRead); 1386 for (unsigned SI : ExpandedRWs) { 1387 if (hasAliasedVariants(SchedModels.getSchedRW(SI, AliasRW.IsRead), 1388 SchedModels)) 1389 return true; 1390 } 1391 } 1392 } 1393 return false; 1394 } 1395 1396 static bool hasVariant(ArrayRef<PredTransition> Transitions, 1397 CodeGenSchedModels &SchedModels) { 1398 for (const PredTransition &PTI : Transitions) { 1399 for (const SmallVectorImpl<unsigned> &WSI : PTI.WriteSequences) 1400 for (unsigned WI : WSI) 1401 if (hasAliasedVariants(SchedModels.getSchedWrite(WI), SchedModels)) 1402 return true; 1403 1404 for (const SmallVectorImpl<unsigned> &RSI : PTI.ReadSequences) 1405 for (unsigned RI : RSI) 1406 if (hasAliasedVariants(SchedModels.getSchedRead(RI), SchedModels)) 1407 return true; 1408 } 1409 return false; 1410 } 1411 1412 // Populate IntersectingVariants with any variants or aliased sequences of the 1413 // given SchedRW whose processor indices and predicates are not mutually 1414 // exclusive with the given transition. 1415 void PredTransitions::getIntersectingVariants( 1416 const CodeGenSchedRW &SchedRW, unsigned TransIdx, 1417 std::vector<TransVariant> &IntersectingVariants) { 1418 1419 bool GenericRW = false; 1420 1421 std::vector<TransVariant> Variants; 1422 if (SchedRW.HasVariants) { 1423 unsigned VarProcIdx = 0; 1424 if (SchedRW.TheDef->getValueInit("SchedModel")->isComplete()) { 1425 Record *ModelDef = SchedRW.TheDef->getValueAsDef("SchedModel"); 1426 VarProcIdx = SchedModels.getProcModel(ModelDef).Index; 1427 } 1428 // Push each variant. Assign TransVecIdx later. 1429 const RecVec VarDefs = SchedRW.TheDef->getValueAsListOfDefs("Variants"); 1430 for (Record *VarDef : VarDefs) 1431 Variants.emplace_back(VarDef, SchedRW.Index, VarProcIdx, 0); 1432 if (VarProcIdx == 0) 1433 GenericRW = true; 1434 } 1435 for (RecIter AI = SchedRW.Aliases.begin(), AE = SchedRW.Aliases.end(); 1436 AI != AE; ++AI) { 1437 // If either the SchedAlias itself or the SchedReadWrite that it aliases 1438 // to is defined within a processor model, constrain all variants to 1439 // that processor. 1440 unsigned AliasProcIdx = 0; 1441 if ((*AI)->getValueInit("SchedModel")->isComplete()) { 1442 Record *ModelDef = (*AI)->getValueAsDef("SchedModel"); 1443 AliasProcIdx = SchedModels.getProcModel(ModelDef).Index; 1444 } 1445 const CodeGenSchedRW &AliasRW = 1446 SchedModels.getSchedRW((*AI)->getValueAsDef("AliasRW")); 1447 1448 if (AliasRW.HasVariants) { 1449 const RecVec VarDefs = AliasRW.TheDef->getValueAsListOfDefs("Variants"); 1450 for (Record *VD : VarDefs) 1451 Variants.emplace_back(VD, AliasRW.Index, AliasProcIdx, 0); 1452 } 1453 if (AliasRW.IsSequence) 1454 Variants.emplace_back(AliasRW.TheDef, SchedRW.Index, AliasProcIdx, 0); 1455 if (AliasProcIdx == 0) 1456 GenericRW = true; 1457 } 1458 for (TransVariant &Variant : Variants) { 1459 // Don't expand variants if the processor models don't intersect. 1460 // A zero processor index means any processor. 1461 SmallVectorImpl<unsigned> &ProcIndices = TransVec[TransIdx].ProcIndices; 1462 if (ProcIndices[0] && Variant.ProcIdx) { 1463 unsigned Cnt = std::count(ProcIndices.begin(), ProcIndices.end(), 1464 Variant.ProcIdx); 1465 if (!Cnt) 1466 continue; 1467 if (Cnt > 1) { 1468 const CodeGenProcModel &PM = 1469 *(SchedModels.procModelBegin() + Variant.ProcIdx); 1470 PrintFatalError(Variant.VarOrSeqDef->getLoc(), 1471 "Multiple variants defined for processor " + 1472 PM.ModelName + 1473 " Ensure only one SchedAlias exists per RW."); 1474 } 1475 } 1476 if (Variant.VarOrSeqDef->isSubClassOf("SchedVar")) { 1477 Record *PredDef = Variant.VarOrSeqDef->getValueAsDef("Predicate"); 1478 if (mutuallyExclusive(PredDef, TransVec[TransIdx].PredTerm)) 1479 continue; 1480 } 1481 if (IntersectingVariants.empty()) { 1482 // The first variant builds on the existing transition. 1483 Variant.TransVecIdx = TransIdx; 1484 IntersectingVariants.push_back(Variant); 1485 } 1486 else { 1487 // Push another copy of the current transition for more variants. 1488 Variant.TransVecIdx = TransVec.size(); 1489 IntersectingVariants.push_back(Variant); 1490 TransVec.push_back(TransVec[TransIdx]); 1491 } 1492 } 1493 if (GenericRW && IntersectingVariants.empty()) { 1494 PrintFatalError(SchedRW.TheDef->getLoc(), "No variant of this type has " 1495 "a matching predicate on any processor"); 1496 } 1497 } 1498 1499 // Push the Reads/Writes selected by this variant onto the PredTransition 1500 // specified by VInfo. 1501 void PredTransitions:: 1502 pushVariant(const TransVariant &VInfo, bool IsRead) { 1503 PredTransition &Trans = TransVec[VInfo.TransVecIdx]; 1504 1505 // If this operand transition is reached through a processor-specific alias, 1506 // then the whole transition is specific to this processor. 1507 if (VInfo.ProcIdx != 0) 1508 Trans.ProcIndices.assign(1, VInfo.ProcIdx); 1509 1510 IdxVec SelectedRWs; 1511 if (VInfo.VarOrSeqDef->isSubClassOf("SchedVar")) { 1512 Record *PredDef = VInfo.VarOrSeqDef->getValueAsDef("Predicate"); 1513 Trans.PredTerm.emplace_back(IsRead, VInfo.RWIdx,PredDef); 1514 RecVec SelectedDefs = VInfo.VarOrSeqDef->getValueAsListOfDefs("Selected"); 1515 SchedModels.findRWs(SelectedDefs, SelectedRWs, IsRead); 1516 } 1517 else { 1518 assert(VInfo.VarOrSeqDef->isSubClassOf("WriteSequence") && 1519 "variant must be a SchedVariant or aliased WriteSequence"); 1520 SelectedRWs.push_back(SchedModels.getSchedRWIdx(VInfo.VarOrSeqDef, IsRead)); 1521 } 1522 1523 const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(VInfo.RWIdx, IsRead); 1524 1525 SmallVectorImpl<SmallVector<unsigned,4>> &RWSequences = IsRead 1526 ? Trans.ReadSequences : Trans.WriteSequences; 1527 if (SchedRW.IsVariadic) { 1528 unsigned OperIdx = RWSequences.size()-1; 1529 // Make N-1 copies of this transition's last sequence. 1530 RWSequences.insert(RWSequences.end(), SelectedRWs.size() - 1, 1531 RWSequences[OperIdx]); 1532 // Push each of the N elements of the SelectedRWs onto a copy of the last 1533 // sequence (split the current operand into N operands). 1534 // Note that write sequences should be expanded within this loop--the entire 1535 // sequence belongs to a single operand. 1536 for (IdxIter RWI = SelectedRWs.begin(), RWE = SelectedRWs.end(); 1537 RWI != RWE; ++RWI, ++OperIdx) { 1538 IdxVec ExpandedRWs; 1539 if (IsRead) 1540 ExpandedRWs.push_back(*RWI); 1541 else 1542 SchedModels.expandRWSequence(*RWI, ExpandedRWs, IsRead); 1543 RWSequences[OperIdx].insert(RWSequences[OperIdx].end(), 1544 ExpandedRWs.begin(), ExpandedRWs.end()); 1545 } 1546 assert(OperIdx == RWSequences.size() && "missed a sequence"); 1547 } 1548 else { 1549 // Push this transition's expanded sequence onto this transition's last 1550 // sequence (add to the current operand's sequence). 1551 SmallVectorImpl<unsigned> &Seq = RWSequences.back(); 1552 IdxVec ExpandedRWs; 1553 for (IdxIter RWI = SelectedRWs.begin(), RWE = SelectedRWs.end(); 1554 RWI != RWE; ++RWI) { 1555 if (IsRead) 1556 ExpandedRWs.push_back(*RWI); 1557 else 1558 SchedModels.expandRWSequence(*RWI, ExpandedRWs, IsRead); 1559 } 1560 Seq.insert(Seq.end(), ExpandedRWs.begin(), ExpandedRWs.end()); 1561 } 1562 } 1563 1564 // RWSeq is a sequence of all Reads or all Writes for the next read or write 1565 // operand. StartIdx is an index into TransVec where partial results 1566 // starts. RWSeq must be applied to all transitions between StartIdx and the end 1567 // of TransVec. 1568 void PredTransitions::substituteVariantOperand( 1569 const SmallVectorImpl<unsigned> &RWSeq, bool IsRead, unsigned StartIdx) { 1570 1571 // Visit each original RW within the current sequence. 1572 for (SmallVectorImpl<unsigned>::const_iterator 1573 RWI = RWSeq.begin(), RWE = RWSeq.end(); RWI != RWE; ++RWI) { 1574 const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(*RWI, IsRead); 1575 // Push this RW on all partial PredTransitions or distribute variants. 1576 // New PredTransitions may be pushed within this loop which should not be 1577 // revisited (TransEnd must be loop invariant). 1578 for (unsigned TransIdx = StartIdx, TransEnd = TransVec.size(); 1579 TransIdx != TransEnd; ++TransIdx) { 1580 // In the common case, push RW onto the current operand's sequence. 1581 if (!hasAliasedVariants(SchedRW, SchedModels)) { 1582 if (IsRead) 1583 TransVec[TransIdx].ReadSequences.back().push_back(*RWI); 1584 else 1585 TransVec[TransIdx].WriteSequences.back().push_back(*RWI); 1586 continue; 1587 } 1588 // Distribute this partial PredTransition across intersecting variants. 1589 // This will push a copies of TransVec[TransIdx] on the back of TransVec. 1590 std::vector<TransVariant> IntersectingVariants; 1591 getIntersectingVariants(SchedRW, TransIdx, IntersectingVariants); 1592 // Now expand each variant on top of its copy of the transition. 1593 for (std::vector<TransVariant>::const_iterator 1594 IVI = IntersectingVariants.begin(), 1595 IVE = IntersectingVariants.end(); 1596 IVI != IVE; ++IVI) { 1597 pushVariant(*IVI, IsRead); 1598 } 1599 } 1600 } 1601 } 1602 1603 // For each variant of a Read/Write in Trans, substitute the sequence of 1604 // Read/Writes guarded by the variant. This is exponential in the number of 1605 // variant Read/Writes, but in practice detection of mutually exclusive 1606 // predicates should result in linear growth in the total number variants. 1607 // 1608 // This is one step in a breadth-first search of nested variants. 1609 void PredTransitions::substituteVariants(const PredTransition &Trans) { 1610 // Build up a set of partial results starting at the back of 1611 // PredTransitions. Remember the first new transition. 1612 unsigned StartIdx = TransVec.size(); 1613 TransVec.emplace_back(); 1614 TransVec.back().PredTerm = Trans.PredTerm; 1615 TransVec.back().ProcIndices = Trans.ProcIndices; 1616 1617 // Visit each original write sequence. 1618 for (SmallVectorImpl<SmallVector<unsigned,4>>::const_iterator 1619 WSI = Trans.WriteSequences.begin(), WSE = Trans.WriteSequences.end(); 1620 WSI != WSE; ++WSI) { 1621 // Push a new (empty) write sequence onto all partial Transitions. 1622 for (std::vector<PredTransition>::iterator I = 1623 TransVec.begin() + StartIdx, E = TransVec.end(); I != E; ++I) { 1624 I->WriteSequences.emplace_back(); 1625 } 1626 substituteVariantOperand(*WSI, /*IsRead=*/false, StartIdx); 1627 } 1628 // Visit each original read sequence. 1629 for (SmallVectorImpl<SmallVector<unsigned,4>>::const_iterator 1630 RSI = Trans.ReadSequences.begin(), RSE = Trans.ReadSequences.end(); 1631 RSI != RSE; ++RSI) { 1632 // Push a new (empty) read sequence onto all partial Transitions. 1633 for (std::vector<PredTransition>::iterator I = 1634 TransVec.begin() + StartIdx, E = TransVec.end(); I != E; ++I) { 1635 I->ReadSequences.emplace_back(); 1636 } 1637 substituteVariantOperand(*RSI, /*IsRead=*/true, StartIdx); 1638 } 1639 } 1640 1641 // Create a new SchedClass for each variant found by inferFromRW. Pass 1642 static void inferFromTransitions(ArrayRef<PredTransition> LastTransitions, 1643 unsigned FromClassIdx, 1644 CodeGenSchedModels &SchedModels) { 1645 // For each PredTransition, create a new CodeGenSchedTransition, which usually 1646 // requires creating a new SchedClass. 1647 for (ArrayRef<PredTransition>::iterator 1648 I = LastTransitions.begin(), E = LastTransitions.end(); I != E; ++I) { 1649 IdxVec OperWritesVariant; 1650 transform(I->WriteSequences, std::back_inserter(OperWritesVariant), 1651 [&SchedModels](ArrayRef<unsigned> WS) { 1652 return SchedModels.findOrInsertRW(WS, /*IsRead=*/false); 1653 }); 1654 IdxVec OperReadsVariant; 1655 transform(I->ReadSequences, std::back_inserter(OperReadsVariant), 1656 [&SchedModels](ArrayRef<unsigned> RS) { 1657 return SchedModels.findOrInsertRW(RS, /*IsRead=*/true); 1658 }); 1659 CodeGenSchedTransition SCTrans; 1660 SCTrans.ToClassIdx = 1661 SchedModels.addSchedClass(/*ItinClassDef=*/nullptr, OperWritesVariant, 1662 OperReadsVariant, I->ProcIndices); 1663 SCTrans.ProcIndices.assign(I->ProcIndices.begin(), I->ProcIndices.end()); 1664 // The final PredTerm is unique set of predicates guarding the transition. 1665 RecVec Preds; 1666 transform(I->PredTerm, std::back_inserter(Preds), 1667 [](const PredCheck &P) { 1668 return P.Predicate; 1669 }); 1670 Preds.erase(std::unique(Preds.begin(), Preds.end()), Preds.end()); 1671 SCTrans.PredTerm = std::move(Preds); 1672 SchedModels.getSchedClass(FromClassIdx) 1673 .Transitions.push_back(std::move(SCTrans)); 1674 } 1675 } 1676 1677 // Create new SchedClasses for the given ReadWrite list. If any of the 1678 // ReadWrites refers to a SchedVariant, create a new SchedClass for each variant 1679 // of the ReadWrite list, following Aliases if necessary. 1680 void CodeGenSchedModels::inferFromRW(ArrayRef<unsigned> OperWrites, 1681 ArrayRef<unsigned> OperReads, 1682 unsigned FromClassIdx, 1683 ArrayRef<unsigned> ProcIndices) { 1684 LLVM_DEBUG(dbgs() << "INFER RW proc("; dumpIdxVec(ProcIndices); 1685 dbgs() << ") "); 1686 1687 // Create a seed transition with an empty PredTerm and the expanded sequences 1688 // of SchedWrites for the current SchedClass. 1689 std::vector<PredTransition> LastTransitions; 1690 LastTransitions.emplace_back(); 1691 LastTransitions.back().ProcIndices.append(ProcIndices.begin(), 1692 ProcIndices.end()); 1693 1694 for (unsigned WriteIdx : OperWrites) { 1695 IdxVec WriteSeq; 1696 expandRWSequence(WriteIdx, WriteSeq, /*IsRead=*/false); 1697 LastTransitions[0].WriteSequences.emplace_back(); 1698 SmallVectorImpl<unsigned> &Seq = LastTransitions[0].WriteSequences.back(); 1699 Seq.append(WriteSeq.begin(), WriteSeq.end()); 1700 LLVM_DEBUG(dbgs() << "("; dumpIdxVec(Seq); dbgs() << ") "); 1701 } 1702 LLVM_DEBUG(dbgs() << " Reads: "); 1703 for (unsigned ReadIdx : OperReads) { 1704 IdxVec ReadSeq; 1705 expandRWSequence(ReadIdx, ReadSeq, /*IsRead=*/true); 1706 LastTransitions[0].ReadSequences.emplace_back(); 1707 SmallVectorImpl<unsigned> &Seq = LastTransitions[0].ReadSequences.back(); 1708 Seq.append(ReadSeq.begin(), ReadSeq.end()); 1709 LLVM_DEBUG(dbgs() << "("; dumpIdxVec(Seq); dbgs() << ") "); 1710 } 1711 LLVM_DEBUG(dbgs() << '\n'); 1712 1713 // Collect all PredTransitions for individual operands. 1714 // Iterate until no variant writes remain. 1715 while (hasVariant(LastTransitions, *this)) { 1716 PredTransitions Transitions(*this); 1717 for (const PredTransition &Trans : LastTransitions) 1718 Transitions.substituteVariants(Trans); 1719 LLVM_DEBUG(Transitions.dump()); 1720 LastTransitions.swap(Transitions.TransVec); 1721 } 1722 // If the first transition has no variants, nothing to do. 1723 if (LastTransitions[0].PredTerm.empty()) 1724 return; 1725 1726 // WARNING: We are about to mutate the SchedClasses vector. Do not refer to 1727 // OperWrites, OperReads, or ProcIndices after calling inferFromTransitions. 1728 inferFromTransitions(LastTransitions, FromClassIdx, *this); 1729 } 1730 1731 // Check if any processor resource group contains all resource records in 1732 // SubUnits. 1733 bool CodeGenSchedModels::hasSuperGroup(RecVec &SubUnits, CodeGenProcModel &PM) { 1734 for (unsigned i = 0, e = PM.ProcResourceDefs.size(); i < e; ++i) { 1735 if (!PM.ProcResourceDefs[i]->isSubClassOf("ProcResGroup")) 1736 continue; 1737 RecVec SuperUnits = 1738 PM.ProcResourceDefs[i]->getValueAsListOfDefs("Resources"); 1739 RecIter RI = SubUnits.begin(), RE = SubUnits.end(); 1740 for ( ; RI != RE; ++RI) { 1741 if (!is_contained(SuperUnits, *RI)) { 1742 break; 1743 } 1744 } 1745 if (RI == RE) 1746 return true; 1747 } 1748 return false; 1749 } 1750 1751 // Verify that overlapping groups have a common supergroup. 1752 void CodeGenSchedModels::verifyProcResourceGroups(CodeGenProcModel &PM) { 1753 for (unsigned i = 0, e = PM.ProcResourceDefs.size(); i < e; ++i) { 1754 if (!PM.ProcResourceDefs[i]->isSubClassOf("ProcResGroup")) 1755 continue; 1756 RecVec CheckUnits = 1757 PM.ProcResourceDefs[i]->getValueAsListOfDefs("Resources"); 1758 for (unsigned j = i+1; j < e; ++j) { 1759 if (!PM.ProcResourceDefs[j]->isSubClassOf("ProcResGroup")) 1760 continue; 1761 RecVec OtherUnits = 1762 PM.ProcResourceDefs[j]->getValueAsListOfDefs("Resources"); 1763 if (std::find_first_of(CheckUnits.begin(), CheckUnits.end(), 1764 OtherUnits.begin(), OtherUnits.end()) 1765 != CheckUnits.end()) { 1766 // CheckUnits and OtherUnits overlap 1767 OtherUnits.insert(OtherUnits.end(), CheckUnits.begin(), 1768 CheckUnits.end()); 1769 if (!hasSuperGroup(OtherUnits, PM)) { 1770 PrintFatalError((PM.ProcResourceDefs[i])->getLoc(), 1771 "proc resource group overlaps with " 1772 + PM.ProcResourceDefs[j]->getName() 1773 + " but no supergroup contains both."); 1774 } 1775 } 1776 } 1777 } 1778 } 1779 1780 // Collect all the RegisterFile definitions available in this target. 1781 void CodeGenSchedModels::collectRegisterFiles() { 1782 RecVec RegisterFileDefs = Records.getAllDerivedDefinitions("RegisterFile"); 1783 1784 // RegisterFiles is the vector of CodeGenRegisterFile. 1785 for (Record *RF : RegisterFileDefs) { 1786 // For each register file definition, construct a CodeGenRegisterFile object 1787 // and add it to the appropriate scheduling model. 1788 CodeGenProcModel &PM = getProcModel(RF->getValueAsDef("SchedModel")); 1789 PM.RegisterFiles.emplace_back(CodeGenRegisterFile(RF->getName(),RF)); 1790 CodeGenRegisterFile &CGRF = PM.RegisterFiles.back(); 1791 CGRF.MaxMovesEliminatedPerCycle = 1792 RF->getValueAsInt("MaxMovesEliminatedPerCycle"); 1793 CGRF.AllowZeroMoveEliminationOnly = 1794 RF->getValueAsBit("AllowZeroMoveEliminationOnly"); 1795 1796 // Now set the number of physical registers as well as the cost of registers 1797 // in each register class. 1798 CGRF.NumPhysRegs = RF->getValueAsInt("NumPhysRegs"); 1799 if (!CGRF.NumPhysRegs) { 1800 PrintFatalError(RF->getLoc(), 1801 "Invalid RegisterFile with zero physical registers"); 1802 } 1803 1804 RecVec RegisterClasses = RF->getValueAsListOfDefs("RegClasses"); 1805 std::vector<int64_t> RegisterCosts = RF->getValueAsListOfInts("RegCosts"); 1806 ListInit *MoveElimInfo = RF->getValueAsListInit("AllowMoveElimination"); 1807 for (unsigned I = 0, E = RegisterClasses.size(); I < E; ++I) { 1808 int Cost = RegisterCosts.size() > I ? RegisterCosts[I] : 1; 1809 1810 bool AllowMoveElim = false; 1811 if (MoveElimInfo->size() > I) { 1812 BitInit *Val = cast<BitInit>(MoveElimInfo->getElement(I)); 1813 AllowMoveElim = Val->getValue(); 1814 } 1815 1816 CGRF.Costs.emplace_back(RegisterClasses[I], Cost, AllowMoveElim); 1817 } 1818 } 1819 } 1820 1821 // Collect and sort WriteRes, ReadAdvance, and ProcResources. 1822 void CodeGenSchedModels::collectProcResources() { 1823 ProcResourceDefs = Records.getAllDerivedDefinitions("ProcResourceUnits"); 1824 ProcResGroups = Records.getAllDerivedDefinitions("ProcResGroup"); 1825 1826 // Add any subtarget-specific SchedReadWrites that are directly associated 1827 // with processor resources. Refer to the parent SchedClass's ProcIndices to 1828 // determine which processors they apply to. 1829 for (const CodeGenSchedClass &SC : 1830 make_range(schedClassBegin(), schedClassEnd())) { 1831 if (SC.ItinClassDef) { 1832 collectItinProcResources(SC.ItinClassDef); 1833 continue; 1834 } 1835 1836 // This class may have a default ReadWrite list which can be overriden by 1837 // InstRW definitions. 1838 for (Record *RW : SC.InstRWs) { 1839 Record *RWModelDef = RW->getValueAsDef("SchedModel"); 1840 unsigned PIdx = getProcModel(RWModelDef).Index; 1841 IdxVec Writes, Reads; 1842 findRWs(RW->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads); 1843 collectRWResources(Writes, Reads, PIdx); 1844 } 1845 1846 collectRWResources(SC.Writes, SC.Reads, SC.ProcIndices); 1847 } 1848 // Add resources separately defined by each subtarget. 1849 RecVec WRDefs = Records.getAllDerivedDefinitions("WriteRes"); 1850 for (Record *WR : WRDefs) { 1851 Record *ModelDef = WR->getValueAsDef("SchedModel"); 1852 addWriteRes(WR, getProcModel(ModelDef).Index); 1853 } 1854 RecVec SWRDefs = Records.getAllDerivedDefinitions("SchedWriteRes"); 1855 for (Record *SWR : SWRDefs) { 1856 Record *ModelDef = SWR->getValueAsDef("SchedModel"); 1857 addWriteRes(SWR, getProcModel(ModelDef).Index); 1858 } 1859 RecVec RADefs = Records.getAllDerivedDefinitions("ReadAdvance"); 1860 for (Record *RA : RADefs) { 1861 Record *ModelDef = RA->getValueAsDef("SchedModel"); 1862 addReadAdvance(RA, getProcModel(ModelDef).Index); 1863 } 1864 RecVec SRADefs = Records.getAllDerivedDefinitions("SchedReadAdvance"); 1865 for (Record *SRA : SRADefs) { 1866 if (SRA->getValueInit("SchedModel")->isComplete()) { 1867 Record *ModelDef = SRA->getValueAsDef("SchedModel"); 1868 addReadAdvance(SRA, getProcModel(ModelDef).Index); 1869 } 1870 } 1871 // Add ProcResGroups that are defined within this processor model, which may 1872 // not be directly referenced but may directly specify a buffer size. 1873 RecVec ProcResGroups = Records.getAllDerivedDefinitions("ProcResGroup"); 1874 for (Record *PRG : ProcResGroups) { 1875 if (!PRG->getValueInit("SchedModel")->isComplete()) 1876 continue; 1877 CodeGenProcModel &PM = getProcModel(PRG->getValueAsDef("SchedModel")); 1878 if (!is_contained(PM.ProcResourceDefs, PRG)) 1879 PM.ProcResourceDefs.push_back(PRG); 1880 } 1881 // Add ProcResourceUnits unconditionally. 1882 for (Record *PRU : Records.getAllDerivedDefinitions("ProcResourceUnits")) { 1883 if (!PRU->getValueInit("SchedModel")->isComplete()) 1884 continue; 1885 CodeGenProcModel &PM = getProcModel(PRU->getValueAsDef("SchedModel")); 1886 if (!is_contained(PM.ProcResourceDefs, PRU)) 1887 PM.ProcResourceDefs.push_back(PRU); 1888 } 1889 // Finalize each ProcModel by sorting the record arrays. 1890 for (CodeGenProcModel &PM : ProcModels) { 1891 llvm::sort(PM.WriteResDefs, LessRecord()); 1892 llvm::sort(PM.ReadAdvanceDefs, LessRecord()); 1893 llvm::sort(PM.ProcResourceDefs, LessRecord()); 1894 LLVM_DEBUG( 1895 PM.dump(); 1896 dbgs() << "WriteResDefs: "; for (RecIter RI = PM.WriteResDefs.begin(), 1897 RE = PM.WriteResDefs.end(); 1898 RI != RE; ++RI) { 1899 if ((*RI)->isSubClassOf("WriteRes")) 1900 dbgs() << (*RI)->getValueAsDef("WriteType")->getName() << " "; 1901 else 1902 dbgs() << (*RI)->getName() << " "; 1903 } dbgs() << "\nReadAdvanceDefs: "; 1904 for (RecIter RI = PM.ReadAdvanceDefs.begin(), 1905 RE = PM.ReadAdvanceDefs.end(); 1906 RI != RE; ++RI) { 1907 if ((*RI)->isSubClassOf("ReadAdvance")) 1908 dbgs() << (*RI)->getValueAsDef("ReadType")->getName() << " "; 1909 else 1910 dbgs() << (*RI)->getName() << " "; 1911 } dbgs() 1912 << "\nProcResourceDefs: "; 1913 for (RecIter RI = PM.ProcResourceDefs.begin(), 1914 RE = PM.ProcResourceDefs.end(); 1915 RI != RE; ++RI) { dbgs() << (*RI)->getName() << " "; } dbgs() 1916 << '\n'); 1917 verifyProcResourceGroups(PM); 1918 } 1919 1920 ProcResourceDefs.clear(); 1921 ProcResGroups.clear(); 1922 } 1923 1924 void CodeGenSchedModels::checkCompleteness() { 1925 bool Complete = true; 1926 bool HadCompleteModel = false; 1927 for (const CodeGenProcModel &ProcModel : procModels()) { 1928 const bool HasItineraries = ProcModel.hasItineraries(); 1929 if (!ProcModel.ModelDef->getValueAsBit("CompleteModel")) 1930 continue; 1931 for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) { 1932 if (Inst->hasNoSchedulingInfo) 1933 continue; 1934 if (ProcModel.isUnsupported(*Inst)) 1935 continue; 1936 unsigned SCIdx = getSchedClassIdx(*Inst); 1937 if (!SCIdx) { 1938 if (Inst->TheDef->isValueUnset("SchedRW") && !HadCompleteModel) { 1939 PrintError("No schedule information for instruction '" 1940 + Inst->TheDef->getName() + "'"); 1941 Complete = false; 1942 } 1943 continue; 1944 } 1945 1946 const CodeGenSchedClass &SC = getSchedClass(SCIdx); 1947 if (!SC.Writes.empty()) 1948 continue; 1949 if (HasItineraries && SC.ItinClassDef != nullptr && 1950 SC.ItinClassDef->getName() != "NoItinerary") 1951 continue; 1952 1953 const RecVec &InstRWs = SC.InstRWs; 1954 auto I = find_if(InstRWs, [&ProcModel](const Record *R) { 1955 return R->getValueAsDef("SchedModel") == ProcModel.ModelDef; 1956 }); 1957 if (I == InstRWs.end()) { 1958 PrintError("'" + ProcModel.ModelName + "' lacks information for '" + 1959 Inst->TheDef->getName() + "'"); 1960 Complete = false; 1961 } 1962 } 1963 HadCompleteModel = true; 1964 } 1965 if (!Complete) { 1966 errs() << "\n\nIncomplete schedule models found.\n" 1967 << "- Consider setting 'CompleteModel = 0' while developing new models.\n" 1968 << "- Pseudo instructions can be marked with 'hasNoSchedulingInfo = 1'.\n" 1969 << "- Instructions should usually have Sched<[...]> as a superclass, " 1970 "you may temporarily use an empty list.\n" 1971 << "- Instructions related to unsupported features can be excluded with " 1972 "list<Predicate> UnsupportedFeatures = [HasA,..,HasY]; in the " 1973 "processor model.\n\n"; 1974 PrintFatalError("Incomplete schedule model"); 1975 } 1976 } 1977 1978 // Collect itinerary class resources for each processor. 1979 void CodeGenSchedModels::collectItinProcResources(Record *ItinClassDef) { 1980 for (unsigned PIdx = 0, PEnd = ProcModels.size(); PIdx != PEnd; ++PIdx) { 1981 const CodeGenProcModel &PM = ProcModels[PIdx]; 1982 // For all ItinRW entries. 1983 bool HasMatch = false; 1984 for (RecIter II = PM.ItinRWDefs.begin(), IE = PM.ItinRWDefs.end(); 1985 II != IE; ++II) { 1986 RecVec Matched = (*II)->getValueAsListOfDefs("MatchedItinClasses"); 1987 if (!std::count(Matched.begin(), Matched.end(), ItinClassDef)) 1988 continue; 1989 if (HasMatch) 1990 PrintFatalError((*II)->getLoc(), "Duplicate itinerary class " 1991 + ItinClassDef->getName() 1992 + " in ItinResources for " + PM.ModelName); 1993 HasMatch = true; 1994 IdxVec Writes, Reads; 1995 findRWs((*II)->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads); 1996 collectRWResources(Writes, Reads, PIdx); 1997 } 1998 } 1999 } 2000 2001 void CodeGenSchedModels::collectRWResources(unsigned RWIdx, bool IsRead, 2002 ArrayRef<unsigned> ProcIndices) { 2003 const CodeGenSchedRW &SchedRW = getSchedRW(RWIdx, IsRead); 2004 if (SchedRW.TheDef) { 2005 if (!IsRead && SchedRW.TheDef->isSubClassOf("SchedWriteRes")) { 2006 for (unsigned Idx : ProcIndices) 2007 addWriteRes(SchedRW.TheDef, Idx); 2008 } 2009 else if (IsRead && SchedRW.TheDef->isSubClassOf("SchedReadAdvance")) { 2010 for (unsigned Idx : ProcIndices) 2011 addReadAdvance(SchedRW.TheDef, Idx); 2012 } 2013 } 2014 for (RecIter AI = SchedRW.Aliases.begin(), AE = SchedRW.Aliases.end(); 2015 AI != AE; ++AI) { 2016 IdxVec AliasProcIndices; 2017 if ((*AI)->getValueInit("SchedModel")->isComplete()) { 2018 AliasProcIndices.push_back( 2019 getProcModel((*AI)->getValueAsDef("SchedModel")).Index); 2020 } 2021 else 2022 AliasProcIndices = ProcIndices; 2023 const CodeGenSchedRW &AliasRW = getSchedRW((*AI)->getValueAsDef("AliasRW")); 2024 assert(AliasRW.IsRead == IsRead && "cannot alias reads to writes"); 2025 2026 IdxVec ExpandedRWs; 2027 expandRWSequence(AliasRW.Index, ExpandedRWs, IsRead); 2028 for (IdxIter SI = ExpandedRWs.begin(), SE = ExpandedRWs.end(); 2029 SI != SE; ++SI) { 2030 collectRWResources(*SI, IsRead, AliasProcIndices); 2031 } 2032 } 2033 } 2034 2035 // Collect resources for a set of read/write types and processor indices. 2036 void CodeGenSchedModels::collectRWResources(ArrayRef<unsigned> Writes, 2037 ArrayRef<unsigned> Reads, 2038 ArrayRef<unsigned> ProcIndices) { 2039 for (unsigned Idx : Writes) 2040 collectRWResources(Idx, /*IsRead=*/false, ProcIndices); 2041 2042 for (unsigned Idx : Reads) 2043 collectRWResources(Idx, /*IsRead=*/true, ProcIndices); 2044 } 2045 2046 // Find the processor's resource units for this kind of resource. 2047 Record *CodeGenSchedModels::findProcResUnits(Record *ProcResKind, 2048 const CodeGenProcModel &PM, 2049 ArrayRef<SMLoc> Loc) const { 2050 if (ProcResKind->isSubClassOf("ProcResourceUnits")) 2051 return ProcResKind; 2052 2053 Record *ProcUnitDef = nullptr; 2054 assert(!ProcResourceDefs.empty()); 2055 assert(!ProcResGroups.empty()); 2056 2057 for (Record *ProcResDef : ProcResourceDefs) { 2058 if (ProcResDef->getValueAsDef("Kind") == ProcResKind 2059 && ProcResDef->getValueAsDef("SchedModel") == PM.ModelDef) { 2060 if (ProcUnitDef) { 2061 PrintFatalError(Loc, 2062 "Multiple ProcessorResourceUnits associated with " 2063 + ProcResKind->getName()); 2064 } 2065 ProcUnitDef = ProcResDef; 2066 } 2067 } 2068 for (Record *ProcResGroup : ProcResGroups) { 2069 if (ProcResGroup == ProcResKind 2070 && ProcResGroup->getValueAsDef("SchedModel") == PM.ModelDef) { 2071 if (ProcUnitDef) { 2072 PrintFatalError(Loc, 2073 "Multiple ProcessorResourceUnits associated with " 2074 + ProcResKind->getName()); 2075 } 2076 ProcUnitDef = ProcResGroup; 2077 } 2078 } 2079 if (!ProcUnitDef) { 2080 PrintFatalError(Loc, 2081 "No ProcessorResources associated with " 2082 + ProcResKind->getName()); 2083 } 2084 return ProcUnitDef; 2085 } 2086 2087 // Iteratively add a resource and its super resources. 2088 void CodeGenSchedModels::addProcResource(Record *ProcResKind, 2089 CodeGenProcModel &PM, 2090 ArrayRef<SMLoc> Loc) { 2091 while (true) { 2092 Record *ProcResUnits = findProcResUnits(ProcResKind, PM, Loc); 2093 2094 // See if this ProcResource is already associated with this processor. 2095 if (is_contained(PM.ProcResourceDefs, ProcResUnits)) 2096 return; 2097 2098 PM.ProcResourceDefs.push_back(ProcResUnits); 2099 if (ProcResUnits->isSubClassOf("ProcResGroup")) 2100 return; 2101 2102 if (!ProcResUnits->getValueInit("Super")->isComplete()) 2103 return; 2104 2105 ProcResKind = ProcResUnits->getValueAsDef("Super"); 2106 } 2107 } 2108 2109 // Add resources for a SchedWrite to this processor if they don't exist. 2110 void CodeGenSchedModels::addWriteRes(Record *ProcWriteResDef, unsigned PIdx) { 2111 assert(PIdx && "don't add resources to an invalid Processor model"); 2112 2113 RecVec &WRDefs = ProcModels[PIdx].WriteResDefs; 2114 if (is_contained(WRDefs, ProcWriteResDef)) 2115 return; 2116 WRDefs.push_back(ProcWriteResDef); 2117 2118 // Visit ProcResourceKinds referenced by the newly discovered WriteRes. 2119 RecVec ProcResDefs = ProcWriteResDef->getValueAsListOfDefs("ProcResources"); 2120 for (RecIter WritePRI = ProcResDefs.begin(), WritePRE = ProcResDefs.end(); 2121 WritePRI != WritePRE; ++WritePRI) { 2122 addProcResource(*WritePRI, ProcModels[PIdx], ProcWriteResDef->getLoc()); 2123 } 2124 } 2125 2126 // Add resources for a ReadAdvance to this processor if they don't exist. 2127 void CodeGenSchedModels::addReadAdvance(Record *ProcReadAdvanceDef, 2128 unsigned PIdx) { 2129 RecVec &RADefs = ProcModels[PIdx].ReadAdvanceDefs; 2130 if (is_contained(RADefs, ProcReadAdvanceDef)) 2131 return; 2132 RADefs.push_back(ProcReadAdvanceDef); 2133 } 2134 2135 unsigned CodeGenProcModel::getProcResourceIdx(Record *PRDef) const { 2136 RecIter PRPos = find(ProcResourceDefs, PRDef); 2137 if (PRPos == ProcResourceDefs.end()) 2138 PrintFatalError(PRDef->getLoc(), "ProcResource def is not included in " 2139 "the ProcResources list for " + ModelName); 2140 // Idx=0 is reserved for invalid. 2141 return 1 + (PRPos - ProcResourceDefs.begin()); 2142 } 2143 2144 bool CodeGenProcModel::isUnsupported(const CodeGenInstruction &Inst) const { 2145 for (const Record *TheDef : UnsupportedFeaturesDefs) { 2146 for (const Record *PredDef : Inst.TheDef->getValueAsListOfDefs("Predicates")) { 2147 if (TheDef->getName() == PredDef->getName()) 2148 return true; 2149 } 2150 } 2151 return false; 2152 } 2153 2154 #ifndef NDEBUG 2155 void CodeGenProcModel::dump() const { 2156 dbgs() << Index << ": " << ModelName << " " 2157 << (ModelDef ? ModelDef->getName() : "inferred") << " " 2158 << (ItinsDef ? ItinsDef->getName() : "no itinerary") << '\n'; 2159 } 2160 2161 void CodeGenSchedRW::dump() const { 2162 dbgs() << Name << (IsVariadic ? " (V) " : " "); 2163 if (IsSequence) { 2164 dbgs() << "("; 2165 dumpIdxVec(Sequence); 2166 dbgs() << ")"; 2167 } 2168 } 2169 2170 void CodeGenSchedClass::dump(const CodeGenSchedModels* SchedModels) const { 2171 dbgs() << "SCHEDCLASS " << Index << ":" << Name << '\n' 2172 << " Writes: "; 2173 for (unsigned i = 0, N = Writes.size(); i < N; ++i) { 2174 SchedModels->getSchedWrite(Writes[i]).dump(); 2175 if (i < N-1) { 2176 dbgs() << '\n'; 2177 dbgs().indent(10); 2178 } 2179 } 2180 dbgs() << "\n Reads: "; 2181 for (unsigned i = 0, N = Reads.size(); i < N; ++i) { 2182 SchedModels->getSchedRead(Reads[i]).dump(); 2183 if (i < N-1) { 2184 dbgs() << '\n'; 2185 dbgs().indent(10); 2186 } 2187 } 2188 dbgs() << "\n ProcIdx: "; dumpIdxVec(ProcIndices); dbgs() << '\n'; 2189 if (!Transitions.empty()) { 2190 dbgs() << "\n Transitions for Proc "; 2191 for (const CodeGenSchedTransition &Transition : Transitions) { 2192 dumpIdxVec(Transition.ProcIndices); 2193 } 2194 } 2195 } 2196 2197 void PredTransitions::dump() const { 2198 dbgs() << "Expanded Variants:\n"; 2199 for (std::vector<PredTransition>::const_iterator 2200 TI = TransVec.begin(), TE = TransVec.end(); TI != TE; ++TI) { 2201 dbgs() << "{"; 2202 for (SmallVectorImpl<PredCheck>::const_iterator 2203 PCI = TI->PredTerm.begin(), PCE = TI->PredTerm.end(); 2204 PCI != PCE; ++PCI) { 2205 if (PCI != TI->PredTerm.begin()) 2206 dbgs() << ", "; 2207 dbgs() << SchedModels.getSchedRW(PCI->RWIdx, PCI->IsRead).Name 2208 << ":" << PCI->Predicate->getName(); 2209 } 2210 dbgs() << "},\n => {"; 2211 for (SmallVectorImpl<SmallVector<unsigned,4>>::const_iterator 2212 WSI = TI->WriteSequences.begin(), WSE = TI->WriteSequences.end(); 2213 WSI != WSE; ++WSI) { 2214 dbgs() << "("; 2215 for (SmallVectorImpl<unsigned>::const_iterator 2216 WI = WSI->begin(), WE = WSI->end(); WI != WE; ++WI) { 2217 if (WI != WSI->begin()) 2218 dbgs() << ", "; 2219 dbgs() << SchedModels.getSchedWrite(*WI).Name; 2220 } 2221 dbgs() << "),"; 2222 } 2223 dbgs() << "}\n"; 2224 } 2225 } 2226 #endif // NDEBUG 2227