1 //===- CodeGenSchedule.cpp - Scheduling MachineModels ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines structures to encapsulate the machine model as described in 11 // the target description. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "CodeGenSchedule.h" 16 #include "CodeGenTarget.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/Support/Debug.h" 19 #include "llvm/Support/Regex.h" 20 #include "llvm/TableGen/Error.h" 21 22 using namespace llvm; 23 24 #define DEBUG_TYPE "subtarget-emitter" 25 26 #ifndef NDEBUG 27 static void dumpIdxVec(const IdxVec &V) { 28 for (unsigned i = 0, e = V.size(); i < e; ++i) { 29 dbgs() << V[i] << ", "; 30 } 31 } 32 static void dumpIdxVec(const SmallVectorImpl<unsigned> &V) { 33 for (unsigned i = 0, e = V.size(); i < e; ++i) { 34 dbgs() << V[i] << ", "; 35 } 36 } 37 #endif 38 39 namespace { 40 // (instrs a, b, ...) Evaluate and union all arguments. Identical to AddOp. 41 struct InstrsOp : public SetTheory::Operator { 42 void apply(SetTheory &ST, DagInit *Expr, SetTheory::RecSet &Elts, 43 ArrayRef<SMLoc> Loc) override { 44 ST.evaluate(Expr->arg_begin(), Expr->arg_end(), Elts, Loc); 45 } 46 }; 47 48 // (instregex "OpcPat",...) Find all instructions matching an opcode pattern. 49 // 50 // TODO: Since this is a prefix match, perform a binary search over the 51 // instruction names using lower_bound. Note that the predefined instrs must be 52 // scanned linearly first. However, this is only safe if the regex pattern has 53 // no top-level bars. The DAG already has a list of patterns, so there's no 54 // reason to use top-level bars, but we need a way to verify they don't exist 55 // before implementing the optimization. 56 struct InstRegexOp : public SetTheory::Operator { 57 const CodeGenTarget &Target; 58 InstRegexOp(const CodeGenTarget &t): Target(t) {} 59 60 void apply(SetTheory &ST, DagInit *Expr, SetTheory::RecSet &Elts, 61 ArrayRef<SMLoc> Loc) override { 62 SmallVector<Regex, 4> RegexList; 63 for (DagInit::const_arg_iterator 64 AI = Expr->arg_begin(), AE = Expr->arg_end(); AI != AE; ++AI) { 65 StringInit *SI = dyn_cast<StringInit>(*AI); 66 if (!SI) 67 PrintFatalError(Loc, "instregex requires pattern string: " 68 + Expr->getAsString()); 69 std::string pat = SI->getValue(); 70 // Implement a python-style prefix match. 71 if (pat[0] != '^') { 72 pat.insert(0, "^("); 73 pat.insert(pat.end(), ')'); 74 } 75 RegexList.push_back(Regex(pat)); 76 } 77 for (const CodeGenInstruction *Inst : Target.instructions()) { 78 for (auto &R : RegexList) { 79 if (R.match(Inst->TheDef->getName())) 80 Elts.insert(Inst->TheDef); 81 } 82 } 83 } 84 }; 85 } // end anonymous namespace 86 87 /// CodeGenModels ctor interprets machine model records and populates maps. 88 CodeGenSchedModels::CodeGenSchedModels(RecordKeeper &RK, 89 const CodeGenTarget &TGT): 90 Records(RK), Target(TGT) { 91 92 Sets.addFieldExpander("InstRW", "Instrs"); 93 94 // Allow Set evaluation to recognize the dags used in InstRW records: 95 // (instrs Op1, Op1...) 96 Sets.addOperator("instrs", llvm::make_unique<InstrsOp>()); 97 Sets.addOperator("instregex", llvm::make_unique<InstRegexOp>(Target)); 98 99 // Instantiate a CodeGenProcModel for each SchedMachineModel with the values 100 // that are explicitly referenced in tablegen records. Resources associated 101 // with each processor will be derived later. Populate ProcModelMap with the 102 // CodeGenProcModel instances. 103 collectProcModels(); 104 105 // Instantiate a CodeGenSchedRW for each SchedReadWrite record explicitly 106 // defined, and populate SchedReads and SchedWrites vectors. Implicit 107 // SchedReadWrites that represent sequences derived from expanded variant will 108 // be inferred later. 109 collectSchedRW(); 110 111 // Instantiate a CodeGenSchedClass for each unique SchedRW signature directly 112 // required by an instruction definition, and populate SchedClassIdxMap. Set 113 // NumItineraryClasses to the number of explicit itinerary classes referenced 114 // by instructions. Set NumInstrSchedClasses to the number of itinerary 115 // classes plus any classes implied by instructions that derive from class 116 // Sched and provide SchedRW list. This does not infer any new classes from 117 // SchedVariant. 118 collectSchedClasses(); 119 120 // Find instruction itineraries for each processor. Sort and populate 121 // CodeGenProcModel::ItinDefList. (Cycle-to-cycle itineraries). This requires 122 // all itinerary classes to be discovered. 123 collectProcItins(); 124 125 // Find ItinRW records for each processor and itinerary class. 126 // (For per-operand resources mapped to itinerary classes). 127 collectProcItinRW(); 128 129 // Infer new SchedClasses from SchedVariant. 130 inferSchedClasses(); 131 132 // Populate each CodeGenProcModel's WriteResDefs, ReadAdvanceDefs, and 133 // ProcResourceDefs. 134 collectProcResources(); 135 } 136 137 /// Gather all processor models. 138 void CodeGenSchedModels::collectProcModels() { 139 RecVec ProcRecords = Records.getAllDerivedDefinitions("Processor"); 140 std::sort(ProcRecords.begin(), ProcRecords.end(), LessRecordFieldName()); 141 142 // Reserve space because we can. Reallocation would be ok. 143 ProcModels.reserve(ProcRecords.size()+1); 144 145 // Use idx=0 for NoModel/NoItineraries. 146 Record *NoModelDef = Records.getDef("NoSchedModel"); 147 Record *NoItinsDef = Records.getDef("NoItineraries"); 148 ProcModels.emplace_back(0, "NoSchedModel", NoModelDef, NoItinsDef); 149 ProcModelMap[NoModelDef] = 0; 150 151 // For each processor, find a unique machine model. 152 for (unsigned i = 0, N = ProcRecords.size(); i < N; ++i) 153 addProcModel(ProcRecords[i]); 154 } 155 156 /// Get a unique processor model based on the defined MachineModel and 157 /// ProcessorItineraries. 158 void CodeGenSchedModels::addProcModel(Record *ProcDef) { 159 Record *ModelKey = getModelOrItinDef(ProcDef); 160 if (!ProcModelMap.insert(std::make_pair(ModelKey, ProcModels.size())).second) 161 return; 162 163 std::string Name = ModelKey->getName(); 164 if (ModelKey->isSubClassOf("SchedMachineModel")) { 165 Record *ItinsDef = ModelKey->getValueAsDef("Itineraries"); 166 ProcModels.emplace_back(ProcModels.size(), Name, ModelKey, ItinsDef); 167 } 168 else { 169 // An itinerary is defined without a machine model. Infer a new model. 170 if (!ModelKey->getValueAsListOfDefs("IID").empty()) 171 Name = Name + "Model"; 172 ProcModels.emplace_back(ProcModels.size(), Name, 173 ProcDef->getValueAsDef("SchedModel"), ModelKey); 174 } 175 DEBUG(ProcModels.back().dump()); 176 } 177 178 // Recursively find all reachable SchedReadWrite records. 179 static void scanSchedRW(Record *RWDef, RecVec &RWDefs, 180 SmallPtrSet<Record*, 16> &RWSet) { 181 if (!RWSet.insert(RWDef).second) 182 return; 183 RWDefs.push_back(RWDef); 184 // Reads don't current have sequence records, but it can be added later. 185 if (RWDef->isSubClassOf("WriteSequence")) { 186 RecVec Seq = RWDef->getValueAsListOfDefs("Writes"); 187 for (RecIter I = Seq.begin(), E = Seq.end(); I != E; ++I) 188 scanSchedRW(*I, RWDefs, RWSet); 189 } 190 else if (RWDef->isSubClassOf("SchedVariant")) { 191 // Visit each variant (guarded by a different predicate). 192 RecVec Vars = RWDef->getValueAsListOfDefs("Variants"); 193 for (RecIter VI = Vars.begin(), VE = Vars.end(); VI != VE; ++VI) { 194 // Visit each RW in the sequence selected by the current variant. 195 RecVec Selected = (*VI)->getValueAsListOfDefs("Selected"); 196 for (RecIter I = Selected.begin(), E = Selected.end(); I != E; ++I) 197 scanSchedRW(*I, RWDefs, RWSet); 198 } 199 } 200 } 201 202 // Collect and sort all SchedReadWrites reachable via tablegen records. 203 // More may be inferred later when inferring new SchedClasses from variants. 204 void CodeGenSchedModels::collectSchedRW() { 205 // Reserve idx=0 for invalid writes/reads. 206 SchedWrites.resize(1); 207 SchedReads.resize(1); 208 209 SmallPtrSet<Record*, 16> RWSet; 210 211 // Find all SchedReadWrites referenced by instruction defs. 212 RecVec SWDefs, SRDefs; 213 for (const CodeGenInstruction *Inst : Target.instructions()) { 214 Record *SchedDef = Inst->TheDef; 215 if (SchedDef->isValueUnset("SchedRW")) 216 continue; 217 RecVec RWs = SchedDef->getValueAsListOfDefs("SchedRW"); 218 for (RecIter RWI = RWs.begin(), RWE = RWs.end(); RWI != RWE; ++RWI) { 219 if ((*RWI)->isSubClassOf("SchedWrite")) 220 scanSchedRW(*RWI, SWDefs, RWSet); 221 else { 222 assert((*RWI)->isSubClassOf("SchedRead") && "Unknown SchedReadWrite"); 223 scanSchedRW(*RWI, SRDefs, RWSet); 224 } 225 } 226 } 227 // Find all ReadWrites referenced by InstRW. 228 RecVec InstRWDefs = Records.getAllDerivedDefinitions("InstRW"); 229 for (RecIter OI = InstRWDefs.begin(), OE = InstRWDefs.end(); OI != OE; ++OI) { 230 // For all OperandReadWrites. 231 RecVec RWDefs = (*OI)->getValueAsListOfDefs("OperandReadWrites"); 232 for (RecIter RWI = RWDefs.begin(), RWE = RWDefs.end(); 233 RWI != RWE; ++RWI) { 234 if ((*RWI)->isSubClassOf("SchedWrite")) 235 scanSchedRW(*RWI, SWDefs, RWSet); 236 else { 237 assert((*RWI)->isSubClassOf("SchedRead") && "Unknown SchedReadWrite"); 238 scanSchedRW(*RWI, SRDefs, RWSet); 239 } 240 } 241 } 242 // Find all ReadWrites referenced by ItinRW. 243 RecVec ItinRWDefs = Records.getAllDerivedDefinitions("ItinRW"); 244 for (RecIter II = ItinRWDefs.begin(), IE = ItinRWDefs.end(); II != IE; ++II) { 245 // For all OperandReadWrites. 246 RecVec RWDefs = (*II)->getValueAsListOfDefs("OperandReadWrites"); 247 for (RecIter RWI = RWDefs.begin(), RWE = RWDefs.end(); 248 RWI != RWE; ++RWI) { 249 if ((*RWI)->isSubClassOf("SchedWrite")) 250 scanSchedRW(*RWI, SWDefs, RWSet); 251 else { 252 assert((*RWI)->isSubClassOf("SchedRead") && "Unknown SchedReadWrite"); 253 scanSchedRW(*RWI, SRDefs, RWSet); 254 } 255 } 256 } 257 // Find all ReadWrites referenced by SchedAlias. AliasDefs needs to be sorted 258 // for the loop below that initializes Alias vectors. 259 RecVec AliasDefs = Records.getAllDerivedDefinitions("SchedAlias"); 260 std::sort(AliasDefs.begin(), AliasDefs.end(), LessRecord()); 261 for (RecIter AI = AliasDefs.begin(), AE = AliasDefs.end(); AI != AE; ++AI) { 262 Record *MatchDef = (*AI)->getValueAsDef("MatchRW"); 263 Record *AliasDef = (*AI)->getValueAsDef("AliasRW"); 264 if (MatchDef->isSubClassOf("SchedWrite")) { 265 if (!AliasDef->isSubClassOf("SchedWrite")) 266 PrintFatalError((*AI)->getLoc(), "SchedWrite Alias must be SchedWrite"); 267 scanSchedRW(AliasDef, SWDefs, RWSet); 268 } 269 else { 270 assert(MatchDef->isSubClassOf("SchedRead") && "Unknown SchedReadWrite"); 271 if (!AliasDef->isSubClassOf("SchedRead")) 272 PrintFatalError((*AI)->getLoc(), "SchedRead Alias must be SchedRead"); 273 scanSchedRW(AliasDef, SRDefs, RWSet); 274 } 275 } 276 // Sort and add the SchedReadWrites directly referenced by instructions or 277 // itinerary resources. Index reads and writes in separate domains. 278 std::sort(SWDefs.begin(), SWDefs.end(), LessRecord()); 279 for (RecIter SWI = SWDefs.begin(), SWE = SWDefs.end(); SWI != SWE; ++SWI) { 280 assert(!getSchedRWIdx(*SWI, /*IsRead=*/false) && "duplicate SchedWrite"); 281 SchedWrites.emplace_back(SchedWrites.size(), *SWI); 282 } 283 std::sort(SRDefs.begin(), SRDefs.end(), LessRecord()); 284 for (RecIter SRI = SRDefs.begin(), SRE = SRDefs.end(); SRI != SRE; ++SRI) { 285 assert(!getSchedRWIdx(*SRI, /*IsRead-*/true) && "duplicate SchedWrite"); 286 SchedReads.emplace_back(SchedReads.size(), *SRI); 287 } 288 // Initialize WriteSequence vectors. 289 for (std::vector<CodeGenSchedRW>::iterator WI = SchedWrites.begin(), 290 WE = SchedWrites.end(); WI != WE; ++WI) { 291 if (!WI->IsSequence) 292 continue; 293 findRWs(WI->TheDef->getValueAsListOfDefs("Writes"), WI->Sequence, 294 /*IsRead=*/false); 295 } 296 // Initialize Aliases vectors. 297 for (RecIter AI = AliasDefs.begin(), AE = AliasDefs.end(); AI != AE; ++AI) { 298 Record *AliasDef = (*AI)->getValueAsDef("AliasRW"); 299 getSchedRW(AliasDef).IsAlias = true; 300 Record *MatchDef = (*AI)->getValueAsDef("MatchRW"); 301 CodeGenSchedRW &RW = getSchedRW(MatchDef); 302 if (RW.IsAlias) 303 PrintFatalError((*AI)->getLoc(), "Cannot Alias an Alias"); 304 RW.Aliases.push_back(*AI); 305 } 306 DEBUG( 307 for (unsigned WIdx = 0, WEnd = SchedWrites.size(); WIdx != WEnd; ++WIdx) { 308 dbgs() << WIdx << ": "; 309 SchedWrites[WIdx].dump(); 310 dbgs() << '\n'; 311 } 312 for (unsigned RIdx = 0, REnd = SchedReads.size(); RIdx != REnd; ++RIdx) { 313 dbgs() << RIdx << ": "; 314 SchedReads[RIdx].dump(); 315 dbgs() << '\n'; 316 } 317 RecVec RWDefs = Records.getAllDerivedDefinitions("SchedReadWrite"); 318 for (RecIter RI = RWDefs.begin(), RE = RWDefs.end(); 319 RI != RE; ++RI) { 320 if (!getSchedRWIdx(*RI, (*RI)->isSubClassOf("SchedRead"))) { 321 const std::string &Name = (*RI)->getName(); 322 if (Name != "NoWrite" && Name != "ReadDefault") 323 dbgs() << "Unused SchedReadWrite " << (*RI)->getName() << '\n'; 324 } 325 }); 326 } 327 328 /// Compute a SchedWrite name from a sequence of writes. 329 std::string CodeGenSchedModels::genRWName(const IdxVec& Seq, bool IsRead) { 330 std::string Name("("); 331 for (IdxIter I = Seq.begin(), E = Seq.end(); I != E; ++I) { 332 if (I != Seq.begin()) 333 Name += '_'; 334 Name += getSchedRW(*I, IsRead).Name; 335 } 336 Name += ')'; 337 return Name; 338 } 339 340 unsigned CodeGenSchedModels::getSchedRWIdx(Record *Def, bool IsRead, 341 unsigned After) const { 342 const std::vector<CodeGenSchedRW> &RWVec = IsRead ? SchedReads : SchedWrites; 343 assert(After < RWVec.size() && "start position out of bounds"); 344 for (std::vector<CodeGenSchedRW>::const_iterator I = RWVec.begin() + After, 345 E = RWVec.end(); I != E; ++I) { 346 if (I->TheDef == Def) 347 return I - RWVec.begin(); 348 } 349 return 0; 350 } 351 352 bool CodeGenSchedModels::hasReadOfWrite(Record *WriteDef) const { 353 for (unsigned i = 0, e = SchedReads.size(); i < e; ++i) { 354 Record *ReadDef = SchedReads[i].TheDef; 355 if (!ReadDef || !ReadDef->isSubClassOf("ProcReadAdvance")) 356 continue; 357 358 RecVec ValidWrites = ReadDef->getValueAsListOfDefs("ValidWrites"); 359 if (std::find(ValidWrites.begin(), ValidWrites.end(), WriteDef) 360 != ValidWrites.end()) { 361 return true; 362 } 363 } 364 return false; 365 } 366 367 namespace llvm { 368 void splitSchedReadWrites(const RecVec &RWDefs, 369 RecVec &WriteDefs, RecVec &ReadDefs) { 370 for (RecIter RWI = RWDefs.begin(), RWE = RWDefs.end(); RWI != RWE; ++RWI) { 371 if ((*RWI)->isSubClassOf("SchedWrite")) 372 WriteDefs.push_back(*RWI); 373 else { 374 assert((*RWI)->isSubClassOf("SchedRead") && "unknown SchedReadWrite"); 375 ReadDefs.push_back(*RWI); 376 } 377 } 378 } 379 } // namespace llvm 380 381 // Split the SchedReadWrites defs and call findRWs for each list. 382 void CodeGenSchedModels::findRWs(const RecVec &RWDefs, 383 IdxVec &Writes, IdxVec &Reads) const { 384 RecVec WriteDefs; 385 RecVec ReadDefs; 386 splitSchedReadWrites(RWDefs, WriteDefs, ReadDefs); 387 findRWs(WriteDefs, Writes, false); 388 findRWs(ReadDefs, Reads, true); 389 } 390 391 // Call getSchedRWIdx for all elements in a sequence of SchedRW defs. 392 void CodeGenSchedModels::findRWs(const RecVec &RWDefs, IdxVec &RWs, 393 bool IsRead) const { 394 for (RecIter RI = RWDefs.begin(), RE = RWDefs.end(); RI != RE; ++RI) { 395 unsigned Idx = getSchedRWIdx(*RI, IsRead); 396 assert(Idx && "failed to collect SchedReadWrite"); 397 RWs.push_back(Idx); 398 } 399 } 400 401 void CodeGenSchedModels::expandRWSequence(unsigned RWIdx, IdxVec &RWSeq, 402 bool IsRead) const { 403 const CodeGenSchedRW &SchedRW = getSchedRW(RWIdx, IsRead); 404 if (!SchedRW.IsSequence) { 405 RWSeq.push_back(RWIdx); 406 return; 407 } 408 int Repeat = 409 SchedRW.TheDef ? SchedRW.TheDef->getValueAsInt("Repeat") : 1; 410 for (int i = 0; i < Repeat; ++i) { 411 for (IdxIter I = SchedRW.Sequence.begin(), E = SchedRW.Sequence.end(); 412 I != E; ++I) { 413 expandRWSequence(*I, RWSeq, IsRead); 414 } 415 } 416 } 417 418 // Expand a SchedWrite as a sequence following any aliases that coincide with 419 // the given processor model. 420 void CodeGenSchedModels::expandRWSeqForProc( 421 unsigned RWIdx, IdxVec &RWSeq, bool IsRead, 422 const CodeGenProcModel &ProcModel) const { 423 424 const CodeGenSchedRW &SchedWrite = getSchedRW(RWIdx, IsRead); 425 Record *AliasDef = nullptr; 426 for (RecIter AI = SchedWrite.Aliases.begin(), AE = SchedWrite.Aliases.end(); 427 AI != AE; ++AI) { 428 const CodeGenSchedRW &AliasRW = getSchedRW((*AI)->getValueAsDef("AliasRW")); 429 if ((*AI)->getValueInit("SchedModel")->isComplete()) { 430 Record *ModelDef = (*AI)->getValueAsDef("SchedModel"); 431 if (&getProcModel(ModelDef) != &ProcModel) 432 continue; 433 } 434 if (AliasDef) 435 PrintFatalError(AliasRW.TheDef->getLoc(), "Multiple aliases " 436 "defined for processor " + ProcModel.ModelName + 437 " Ensure only one SchedAlias exists per RW."); 438 AliasDef = AliasRW.TheDef; 439 } 440 if (AliasDef) { 441 expandRWSeqForProc(getSchedRWIdx(AliasDef, IsRead), 442 RWSeq, IsRead,ProcModel); 443 return; 444 } 445 if (!SchedWrite.IsSequence) { 446 RWSeq.push_back(RWIdx); 447 return; 448 } 449 int Repeat = 450 SchedWrite.TheDef ? SchedWrite.TheDef->getValueAsInt("Repeat") : 1; 451 for (int i = 0; i < Repeat; ++i) { 452 for (IdxIter I = SchedWrite.Sequence.begin(), E = SchedWrite.Sequence.end(); 453 I != E; ++I) { 454 expandRWSeqForProc(*I, RWSeq, IsRead, ProcModel); 455 } 456 } 457 } 458 459 // Find the existing SchedWrite that models this sequence of writes. 460 unsigned CodeGenSchedModels::findRWForSequence(const IdxVec &Seq, 461 bool IsRead) { 462 std::vector<CodeGenSchedRW> &RWVec = IsRead ? SchedReads : SchedWrites; 463 464 for (std::vector<CodeGenSchedRW>::iterator I = RWVec.begin(), E = RWVec.end(); 465 I != E; ++I) { 466 if (I->Sequence == Seq) 467 return I - RWVec.begin(); 468 } 469 // Index zero reserved for invalid RW. 470 return 0; 471 } 472 473 /// Add this ReadWrite if it doesn't already exist. 474 unsigned CodeGenSchedModels::findOrInsertRW(ArrayRef<unsigned> Seq, 475 bool IsRead) { 476 assert(!Seq.empty() && "cannot insert empty sequence"); 477 if (Seq.size() == 1) 478 return Seq.back(); 479 480 unsigned Idx = findRWForSequence(Seq, IsRead); 481 if (Idx) 482 return Idx; 483 484 unsigned RWIdx = IsRead ? SchedReads.size() : SchedWrites.size(); 485 CodeGenSchedRW SchedRW(RWIdx, IsRead, Seq, genRWName(Seq, IsRead)); 486 if (IsRead) 487 SchedReads.push_back(SchedRW); 488 else 489 SchedWrites.push_back(SchedRW); 490 return RWIdx; 491 } 492 493 /// Visit all the instruction definitions for this target to gather and 494 /// enumerate the itinerary classes. These are the explicitly specified 495 /// SchedClasses. More SchedClasses may be inferred. 496 void CodeGenSchedModels::collectSchedClasses() { 497 498 // NoItinerary is always the first class at Idx=0 499 SchedClasses.resize(1); 500 SchedClasses.back().Index = 0; 501 SchedClasses.back().Name = "NoInstrModel"; 502 SchedClasses.back().ItinClassDef = Records.getDef("NoItinerary"); 503 SchedClasses.back().ProcIndices.push_back(0); 504 505 // Create a SchedClass for each unique combination of itinerary class and 506 // SchedRW list. 507 for (const CodeGenInstruction *Inst : Target.instructions()) { 508 Record *ItinDef = Inst->TheDef->getValueAsDef("Itinerary"); 509 IdxVec Writes, Reads; 510 if (!Inst->TheDef->isValueUnset("SchedRW")) 511 findRWs(Inst->TheDef->getValueAsListOfDefs("SchedRW"), Writes, Reads); 512 513 // ProcIdx == 0 indicates the class applies to all processors. 514 IdxVec ProcIndices(1, 0); 515 516 unsigned SCIdx = addSchedClass(ItinDef, Writes, Reads, ProcIndices); 517 InstrClassMap[Inst->TheDef] = SCIdx; 518 } 519 // Create classes for InstRW defs. 520 RecVec InstRWDefs = Records.getAllDerivedDefinitions("InstRW"); 521 std::sort(InstRWDefs.begin(), InstRWDefs.end(), LessRecord()); 522 for (RecIter OI = InstRWDefs.begin(), OE = InstRWDefs.end(); OI != OE; ++OI) 523 createInstRWClass(*OI); 524 525 NumInstrSchedClasses = SchedClasses.size(); 526 527 bool EnableDump = false; 528 DEBUG(EnableDump = true); 529 if (!EnableDump) 530 return; 531 532 for (const CodeGenInstruction *Inst : Target.instructions()) { 533 std::string InstName = Inst->TheDef->getName(); 534 unsigned SCIdx = InstrClassMap.lookup(Inst->TheDef); 535 if (!SCIdx) { 536 dbgs() << "No machine model for " << Inst->TheDef->getName() << '\n'; 537 continue; 538 } 539 CodeGenSchedClass &SC = getSchedClass(SCIdx); 540 if (SC.ProcIndices[0] != 0) 541 PrintFatalError(Inst->TheDef->getLoc(), "Instruction's sched class " 542 "must not be subtarget specific."); 543 544 IdxVec ProcIndices; 545 if (SC.ItinClassDef->getName() != "NoItinerary") { 546 ProcIndices.push_back(0); 547 dbgs() << "Itinerary for " << InstName << ": " 548 << SC.ItinClassDef->getName() << '\n'; 549 } 550 if (!SC.Writes.empty()) { 551 ProcIndices.push_back(0); 552 dbgs() << "SchedRW machine model for " << InstName; 553 for (IdxIter WI = SC.Writes.begin(), WE = SC.Writes.end(); WI != WE; ++WI) 554 dbgs() << " " << SchedWrites[*WI].Name; 555 for (IdxIter RI = SC.Reads.begin(), RE = SC.Reads.end(); RI != RE; ++RI) 556 dbgs() << " " << SchedReads[*RI].Name; 557 dbgs() << '\n'; 558 } 559 const RecVec &RWDefs = SchedClasses[SCIdx].InstRWs; 560 for (RecIter RWI = RWDefs.begin(), RWE = RWDefs.end(); 561 RWI != RWE; ++RWI) { 562 const CodeGenProcModel &ProcModel = 563 getProcModel((*RWI)->getValueAsDef("SchedModel")); 564 ProcIndices.push_back(ProcModel.Index); 565 dbgs() << "InstRW on " << ProcModel.ModelName << " for " << InstName; 566 IdxVec Writes; 567 IdxVec Reads; 568 findRWs((*RWI)->getValueAsListOfDefs("OperandReadWrites"), 569 Writes, Reads); 570 for (IdxIter WI = Writes.begin(), WE = Writes.end(); WI != WE; ++WI) 571 dbgs() << " " << SchedWrites[*WI].Name; 572 for (IdxIter RI = Reads.begin(), RE = Reads.end(); RI != RE; ++RI) 573 dbgs() << " " << SchedReads[*RI].Name; 574 dbgs() << '\n'; 575 } 576 for (std::vector<CodeGenProcModel>::iterator PI = ProcModels.begin(), 577 PE = ProcModels.end(); PI != PE; ++PI) { 578 if (!std::count(ProcIndices.begin(), ProcIndices.end(), PI->Index)) 579 dbgs() << "No machine model for " << Inst->TheDef->getName() 580 << " on processor " << PI->ModelName << '\n'; 581 } 582 } 583 } 584 585 /// Find an SchedClass that has been inferred from a per-operand list of 586 /// SchedWrites and SchedReads. 587 unsigned CodeGenSchedModels::findSchedClassIdx(Record *ItinClassDef, 588 const IdxVec &Writes, 589 const IdxVec &Reads) const { 590 for (SchedClassIter I = schedClassBegin(), E = schedClassEnd(); I != E; ++I) { 591 if (I->ItinClassDef == ItinClassDef 592 && I->Writes == Writes && I->Reads == Reads) { 593 return I - schedClassBegin(); 594 } 595 } 596 return 0; 597 } 598 599 // Get the SchedClass index for an instruction. 600 unsigned CodeGenSchedModels::getSchedClassIdx( 601 const CodeGenInstruction &Inst) const { 602 603 return InstrClassMap.lookup(Inst.TheDef); 604 } 605 606 std::string CodeGenSchedModels::createSchedClassName( 607 Record *ItinClassDef, const IdxVec &OperWrites, const IdxVec &OperReads) { 608 609 std::string Name; 610 if (ItinClassDef && ItinClassDef->getName() != "NoItinerary") 611 Name = ItinClassDef->getName(); 612 for (IdxIter WI = OperWrites.begin(), WE = OperWrites.end(); WI != WE; ++WI) { 613 if (!Name.empty()) 614 Name += '_'; 615 Name += SchedWrites[*WI].Name; 616 } 617 for (IdxIter RI = OperReads.begin(), RE = OperReads.end(); RI != RE; ++RI) { 618 Name += '_'; 619 Name += SchedReads[*RI].Name; 620 } 621 return Name; 622 } 623 624 std::string CodeGenSchedModels::createSchedClassName(const RecVec &InstDefs) { 625 626 std::string Name; 627 for (RecIter I = InstDefs.begin(), E = InstDefs.end(); I != E; ++I) { 628 if (I != InstDefs.begin()) 629 Name += '_'; 630 Name += (*I)->getName(); 631 } 632 return Name; 633 } 634 635 /// Add an inferred sched class from an itinerary class and per-operand list of 636 /// SchedWrites and SchedReads. ProcIndices contains the set of IDs of 637 /// processors that may utilize this class. 638 unsigned CodeGenSchedModels::addSchedClass(Record *ItinClassDef, 639 const IdxVec &OperWrites, 640 const IdxVec &OperReads, 641 const IdxVec &ProcIndices) 642 { 643 assert(!ProcIndices.empty() && "expect at least one ProcIdx"); 644 645 unsigned Idx = findSchedClassIdx(ItinClassDef, OperWrites, OperReads); 646 if (Idx || SchedClasses[0].isKeyEqual(ItinClassDef, OperWrites, OperReads)) { 647 IdxVec PI; 648 std::set_union(SchedClasses[Idx].ProcIndices.begin(), 649 SchedClasses[Idx].ProcIndices.end(), 650 ProcIndices.begin(), ProcIndices.end(), 651 std::back_inserter(PI)); 652 SchedClasses[Idx].ProcIndices.swap(PI); 653 return Idx; 654 } 655 Idx = SchedClasses.size(); 656 SchedClasses.resize(Idx+1); 657 CodeGenSchedClass &SC = SchedClasses.back(); 658 SC.Index = Idx; 659 SC.Name = createSchedClassName(ItinClassDef, OperWrites, OperReads); 660 SC.ItinClassDef = ItinClassDef; 661 SC.Writes = OperWrites; 662 SC.Reads = OperReads; 663 SC.ProcIndices = ProcIndices; 664 665 return Idx; 666 } 667 668 // Create classes for each set of opcodes that are in the same InstReadWrite 669 // definition across all processors. 670 void CodeGenSchedModels::createInstRWClass(Record *InstRWDef) { 671 // ClassInstrs will hold an entry for each subset of Instrs in InstRWDef that 672 // intersects with an existing class via a previous InstRWDef. Instrs that do 673 // not intersect with an existing class refer back to their former class as 674 // determined from ItinDef or SchedRW. 675 SmallVector<std::pair<unsigned, SmallVector<Record *, 8> >, 4> ClassInstrs; 676 // Sort Instrs into sets. 677 const RecVec *InstDefs = Sets.expand(InstRWDef); 678 if (InstDefs->empty()) 679 PrintFatalError(InstRWDef->getLoc(), "No matching instruction opcodes"); 680 681 for (RecIter I = InstDefs->begin(), E = InstDefs->end(); I != E; ++I) { 682 InstClassMapTy::const_iterator Pos = InstrClassMap.find(*I); 683 if (Pos == InstrClassMap.end()) 684 PrintFatalError((*I)->getLoc(), "No sched class for instruction."); 685 unsigned SCIdx = Pos->second; 686 unsigned CIdx = 0, CEnd = ClassInstrs.size(); 687 for (; CIdx != CEnd; ++CIdx) { 688 if (ClassInstrs[CIdx].first == SCIdx) 689 break; 690 } 691 if (CIdx == CEnd) { 692 ClassInstrs.resize(CEnd + 1); 693 ClassInstrs[CIdx].first = SCIdx; 694 } 695 ClassInstrs[CIdx].second.push_back(*I); 696 } 697 // For each set of Instrs, create a new class if necessary, and map or remap 698 // the Instrs to it. 699 unsigned CIdx = 0, CEnd = ClassInstrs.size(); 700 for (; CIdx != CEnd; ++CIdx) { 701 unsigned OldSCIdx = ClassInstrs[CIdx].first; 702 ArrayRef<Record*> InstDefs = ClassInstrs[CIdx].second; 703 // If the all instrs in the current class are accounted for, then leave 704 // them mapped to their old class. 705 if (OldSCIdx) { 706 const RecVec &RWDefs = SchedClasses[OldSCIdx].InstRWs; 707 if (!RWDefs.empty()) { 708 const RecVec *OrigInstDefs = Sets.expand(RWDefs[0]); 709 unsigned OrigNumInstrs = 0; 710 for (RecIter I = OrigInstDefs->begin(), E = OrigInstDefs->end(); 711 I != E; ++I) { 712 if (InstrClassMap[*I] == OldSCIdx) 713 ++OrigNumInstrs; 714 } 715 if (OrigNumInstrs == InstDefs.size()) { 716 assert(SchedClasses[OldSCIdx].ProcIndices[0] == 0 && 717 "expected a generic SchedClass"); 718 DEBUG(dbgs() << "InstRW: Reuse SC " << OldSCIdx << ":" 719 << SchedClasses[OldSCIdx].Name << " on " 720 << InstRWDef->getValueAsDef("SchedModel")->getName() << "\n"); 721 SchedClasses[OldSCIdx].InstRWs.push_back(InstRWDef); 722 continue; 723 } 724 } 725 } 726 unsigned SCIdx = SchedClasses.size(); 727 SchedClasses.resize(SCIdx+1); 728 CodeGenSchedClass &SC = SchedClasses.back(); 729 SC.Index = SCIdx; 730 SC.Name = createSchedClassName(InstDefs); 731 DEBUG(dbgs() << "InstRW: New SC " << SCIdx << ":" << SC.Name << " on " 732 << InstRWDef->getValueAsDef("SchedModel")->getName() << "\n"); 733 734 // Preserve ItinDef and Writes/Reads for processors without an InstRW entry. 735 SC.ItinClassDef = SchedClasses[OldSCIdx].ItinClassDef; 736 SC.Writes = SchedClasses[OldSCIdx].Writes; 737 SC.Reads = SchedClasses[OldSCIdx].Reads; 738 SC.ProcIndices.push_back(0); 739 // Map each Instr to this new class. 740 // Note that InstDefs may be a smaller list than InstRWDef's "Instrs". 741 Record *RWModelDef = InstRWDef->getValueAsDef("SchedModel"); 742 SmallSet<unsigned, 4> RemappedClassIDs; 743 for (ArrayRef<Record*>::const_iterator 744 II = InstDefs.begin(), IE = InstDefs.end(); II != IE; ++II) { 745 unsigned OldSCIdx = InstrClassMap[*II]; 746 if (OldSCIdx && RemappedClassIDs.insert(OldSCIdx).second) { 747 for (RecIter RI = SchedClasses[OldSCIdx].InstRWs.begin(), 748 RE = SchedClasses[OldSCIdx].InstRWs.end(); RI != RE; ++RI) { 749 if ((*RI)->getValueAsDef("SchedModel") == RWModelDef) { 750 PrintFatalError(InstRWDef->getLoc(), "Overlapping InstRW def " + 751 (*II)->getName() + " also matches " + 752 (*RI)->getValue("Instrs")->getValue()->getAsString()); 753 } 754 assert(*RI != InstRWDef && "SchedClass has duplicate InstRW def"); 755 SC.InstRWs.push_back(*RI); 756 } 757 } 758 InstrClassMap[*II] = SCIdx; 759 } 760 SC.InstRWs.push_back(InstRWDef); 761 } 762 } 763 764 // True if collectProcItins found anything. 765 bool CodeGenSchedModels::hasItineraries() const { 766 for (CodeGenSchedModels::ProcIter PI = procModelBegin(), PE = procModelEnd(); 767 PI != PE; ++PI) { 768 if (PI->hasItineraries()) 769 return true; 770 } 771 return false; 772 } 773 774 // Gather the processor itineraries. 775 void CodeGenSchedModels::collectProcItins() { 776 for (CodeGenProcModel &ProcModel : ProcModels) { 777 if (!ProcModel.hasItineraries()) 778 continue; 779 780 RecVec ItinRecords = ProcModel.ItinsDef->getValueAsListOfDefs("IID"); 781 assert(!ItinRecords.empty() && "ProcModel.hasItineraries is incorrect"); 782 783 // Populate ItinDefList with Itinerary records. 784 ProcModel.ItinDefList.resize(NumInstrSchedClasses); 785 786 // Insert each itinerary data record in the correct position within 787 // the processor model's ItinDefList. 788 for (unsigned i = 0, N = ItinRecords.size(); i < N; i++) { 789 Record *ItinData = ItinRecords[i]; 790 Record *ItinDef = ItinData->getValueAsDef("TheClass"); 791 bool FoundClass = false; 792 for (SchedClassIter SCI = schedClassBegin(), SCE = schedClassEnd(); 793 SCI != SCE; ++SCI) { 794 // Multiple SchedClasses may share an itinerary. Update all of them. 795 if (SCI->ItinClassDef == ItinDef) { 796 ProcModel.ItinDefList[SCI->Index] = ItinData; 797 FoundClass = true; 798 } 799 } 800 if (!FoundClass) { 801 DEBUG(dbgs() << ProcModel.ItinsDef->getName() 802 << " missing class for itinerary " << ItinDef->getName() << '\n'); 803 } 804 } 805 // Check for missing itinerary entries. 806 assert(!ProcModel.ItinDefList[0] && "NoItinerary class can't have rec"); 807 DEBUG( 808 for (unsigned i = 1, N = ProcModel.ItinDefList.size(); i < N; ++i) { 809 if (!ProcModel.ItinDefList[i]) 810 dbgs() << ProcModel.ItinsDef->getName() 811 << " missing itinerary for class " 812 << SchedClasses[i].Name << '\n'; 813 }); 814 } 815 } 816 817 // Gather the read/write types for each itinerary class. 818 void CodeGenSchedModels::collectProcItinRW() { 819 RecVec ItinRWDefs = Records.getAllDerivedDefinitions("ItinRW"); 820 std::sort(ItinRWDefs.begin(), ItinRWDefs.end(), LessRecord()); 821 for (RecIter II = ItinRWDefs.begin(), IE = ItinRWDefs.end(); II != IE; ++II) { 822 if (!(*II)->getValueInit("SchedModel")->isComplete()) 823 PrintFatalError((*II)->getLoc(), "SchedModel is undefined"); 824 Record *ModelDef = (*II)->getValueAsDef("SchedModel"); 825 ProcModelMapTy::const_iterator I = ProcModelMap.find(ModelDef); 826 if (I == ProcModelMap.end()) { 827 PrintFatalError((*II)->getLoc(), "Undefined SchedMachineModel " 828 + ModelDef->getName()); 829 } 830 ProcModels[I->second].ItinRWDefs.push_back(*II); 831 } 832 } 833 834 /// Infer new classes from existing classes. In the process, this may create new 835 /// SchedWrites from sequences of existing SchedWrites. 836 void CodeGenSchedModels::inferSchedClasses() { 837 DEBUG(dbgs() << NumInstrSchedClasses << " instr sched classes.\n"); 838 839 // Visit all existing classes and newly created classes. 840 for (unsigned Idx = 0; Idx != SchedClasses.size(); ++Idx) { 841 assert(SchedClasses[Idx].Index == Idx && "bad SCIdx"); 842 843 if (SchedClasses[Idx].ItinClassDef) 844 inferFromItinClass(SchedClasses[Idx].ItinClassDef, Idx); 845 if (!SchedClasses[Idx].InstRWs.empty()) 846 inferFromInstRWs(Idx); 847 if (!SchedClasses[Idx].Writes.empty()) { 848 inferFromRW(SchedClasses[Idx].Writes, SchedClasses[Idx].Reads, 849 Idx, SchedClasses[Idx].ProcIndices); 850 } 851 assert(SchedClasses.size() < (NumInstrSchedClasses*6) && 852 "too many SchedVariants"); 853 } 854 } 855 856 /// Infer classes from per-processor itinerary resources. 857 void CodeGenSchedModels::inferFromItinClass(Record *ItinClassDef, 858 unsigned FromClassIdx) { 859 for (unsigned PIdx = 0, PEnd = ProcModels.size(); PIdx != PEnd; ++PIdx) { 860 const CodeGenProcModel &PM = ProcModels[PIdx]; 861 // For all ItinRW entries. 862 bool HasMatch = false; 863 for (RecIter II = PM.ItinRWDefs.begin(), IE = PM.ItinRWDefs.end(); 864 II != IE; ++II) { 865 RecVec Matched = (*II)->getValueAsListOfDefs("MatchedItinClasses"); 866 if (!std::count(Matched.begin(), Matched.end(), ItinClassDef)) 867 continue; 868 if (HasMatch) 869 PrintFatalError((*II)->getLoc(), "Duplicate itinerary class " 870 + ItinClassDef->getName() 871 + " in ItinResources for " + PM.ModelName); 872 HasMatch = true; 873 IdxVec Writes, Reads; 874 findRWs((*II)->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads); 875 IdxVec ProcIndices(1, PIdx); 876 inferFromRW(Writes, Reads, FromClassIdx, ProcIndices); 877 } 878 } 879 } 880 881 /// Infer classes from per-processor InstReadWrite definitions. 882 void CodeGenSchedModels::inferFromInstRWs(unsigned SCIdx) { 883 for (unsigned I = 0, E = SchedClasses[SCIdx].InstRWs.size(); I != E; ++I) { 884 assert(SchedClasses[SCIdx].InstRWs.size() == E && "InstrRWs was mutated!"); 885 Record *Rec = SchedClasses[SCIdx].InstRWs[I]; 886 const RecVec *InstDefs = Sets.expand(Rec); 887 RecIter II = InstDefs->begin(), IE = InstDefs->end(); 888 for (; II != IE; ++II) { 889 if (InstrClassMap[*II] == SCIdx) 890 break; 891 } 892 // If this class no longer has any instructions mapped to it, it has become 893 // irrelevant. 894 if (II == IE) 895 continue; 896 IdxVec Writes, Reads; 897 findRWs(Rec->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads); 898 unsigned PIdx = getProcModel(Rec->getValueAsDef("SchedModel")).Index; 899 IdxVec ProcIndices(1, PIdx); 900 inferFromRW(Writes, Reads, SCIdx, ProcIndices); // May mutate SchedClasses. 901 } 902 } 903 904 namespace { 905 // Helper for substituteVariantOperand. 906 struct TransVariant { 907 Record *VarOrSeqDef; // Variant or sequence. 908 unsigned RWIdx; // Index of this variant or sequence's matched type. 909 unsigned ProcIdx; // Processor model index or zero for any. 910 unsigned TransVecIdx; // Index into PredTransitions::TransVec. 911 912 TransVariant(Record *def, unsigned rwi, unsigned pi, unsigned ti): 913 VarOrSeqDef(def), RWIdx(rwi), ProcIdx(pi), TransVecIdx(ti) {} 914 }; 915 916 // Associate a predicate with the SchedReadWrite that it guards. 917 // RWIdx is the index of the read/write variant. 918 struct PredCheck { 919 bool IsRead; 920 unsigned RWIdx; 921 Record *Predicate; 922 923 PredCheck(bool r, unsigned w, Record *p): IsRead(r), RWIdx(w), Predicate(p) {} 924 }; 925 926 // A Predicate transition is a list of RW sequences guarded by a PredTerm. 927 struct PredTransition { 928 // A predicate term is a conjunction of PredChecks. 929 SmallVector<PredCheck, 4> PredTerm; 930 SmallVector<SmallVector<unsigned,4>, 16> WriteSequences; 931 SmallVector<SmallVector<unsigned,4>, 16> ReadSequences; 932 SmallVector<unsigned, 4> ProcIndices; 933 }; 934 935 // Encapsulate a set of partially constructed transitions. 936 // The results are built by repeated calls to substituteVariants. 937 class PredTransitions { 938 CodeGenSchedModels &SchedModels; 939 940 public: 941 std::vector<PredTransition> TransVec; 942 943 PredTransitions(CodeGenSchedModels &sm): SchedModels(sm) {} 944 945 void substituteVariantOperand(const SmallVectorImpl<unsigned> &RWSeq, 946 bool IsRead, unsigned StartIdx); 947 948 void substituteVariants(const PredTransition &Trans); 949 950 #ifndef NDEBUG 951 void dump() const; 952 #endif 953 954 private: 955 bool mutuallyExclusive(Record *PredDef, ArrayRef<PredCheck> Term); 956 void getIntersectingVariants( 957 const CodeGenSchedRW &SchedRW, unsigned TransIdx, 958 std::vector<TransVariant> &IntersectingVariants); 959 void pushVariant(const TransVariant &VInfo, bool IsRead); 960 }; 961 } // anonymous 962 963 // Return true if this predicate is mutually exclusive with a PredTerm. This 964 // degenerates into checking if the predicate is mutually exclusive with any 965 // predicate in the Term's conjunction. 966 // 967 // All predicates associated with a given SchedRW are considered mutually 968 // exclusive. This should work even if the conditions expressed by the 969 // predicates are not exclusive because the predicates for a given SchedWrite 970 // are always checked in the order they are defined in the .td file. Later 971 // conditions implicitly negate any prior condition. 972 bool PredTransitions::mutuallyExclusive(Record *PredDef, 973 ArrayRef<PredCheck> Term) { 974 975 for (ArrayRef<PredCheck>::iterator I = Term.begin(), E = Term.end(); 976 I != E; ++I) { 977 if (I->Predicate == PredDef) 978 return false; 979 980 const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(I->RWIdx, I->IsRead); 981 assert(SchedRW.HasVariants && "PredCheck must refer to a SchedVariant"); 982 RecVec Variants = SchedRW.TheDef->getValueAsListOfDefs("Variants"); 983 for (RecIter VI = Variants.begin(), VE = Variants.end(); VI != VE; ++VI) { 984 if ((*VI)->getValueAsDef("Predicate") == PredDef) 985 return true; 986 } 987 } 988 return false; 989 } 990 991 static bool hasAliasedVariants(const CodeGenSchedRW &RW, 992 CodeGenSchedModels &SchedModels) { 993 if (RW.HasVariants) 994 return true; 995 996 for (RecIter I = RW.Aliases.begin(), E = RW.Aliases.end(); I != E; ++I) { 997 const CodeGenSchedRW &AliasRW = 998 SchedModels.getSchedRW((*I)->getValueAsDef("AliasRW")); 999 if (AliasRW.HasVariants) 1000 return true; 1001 if (AliasRW.IsSequence) { 1002 IdxVec ExpandedRWs; 1003 SchedModels.expandRWSequence(AliasRW.Index, ExpandedRWs, AliasRW.IsRead); 1004 for (IdxIter SI = ExpandedRWs.begin(), SE = ExpandedRWs.end(); 1005 SI != SE; ++SI) { 1006 if (hasAliasedVariants(SchedModels.getSchedRW(*SI, AliasRW.IsRead), 1007 SchedModels)) { 1008 return true; 1009 } 1010 } 1011 } 1012 } 1013 return false; 1014 } 1015 1016 static bool hasVariant(ArrayRef<PredTransition> Transitions, 1017 CodeGenSchedModels &SchedModels) { 1018 for (ArrayRef<PredTransition>::iterator 1019 PTI = Transitions.begin(), PTE = Transitions.end(); 1020 PTI != PTE; ++PTI) { 1021 for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator 1022 WSI = PTI->WriteSequences.begin(), WSE = PTI->WriteSequences.end(); 1023 WSI != WSE; ++WSI) { 1024 for (SmallVectorImpl<unsigned>::const_iterator 1025 WI = WSI->begin(), WE = WSI->end(); WI != WE; ++WI) { 1026 if (hasAliasedVariants(SchedModels.getSchedWrite(*WI), SchedModels)) 1027 return true; 1028 } 1029 } 1030 for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator 1031 RSI = PTI->ReadSequences.begin(), RSE = PTI->ReadSequences.end(); 1032 RSI != RSE; ++RSI) { 1033 for (SmallVectorImpl<unsigned>::const_iterator 1034 RI = RSI->begin(), RE = RSI->end(); RI != RE; ++RI) { 1035 if (hasAliasedVariants(SchedModels.getSchedRead(*RI), SchedModels)) 1036 return true; 1037 } 1038 } 1039 } 1040 return false; 1041 } 1042 1043 // Populate IntersectingVariants with any variants or aliased sequences of the 1044 // given SchedRW whose processor indices and predicates are not mutually 1045 // exclusive with the given transition. 1046 void PredTransitions::getIntersectingVariants( 1047 const CodeGenSchedRW &SchedRW, unsigned TransIdx, 1048 std::vector<TransVariant> &IntersectingVariants) { 1049 1050 bool GenericRW = false; 1051 1052 std::vector<TransVariant> Variants; 1053 if (SchedRW.HasVariants) { 1054 unsigned VarProcIdx = 0; 1055 if (SchedRW.TheDef->getValueInit("SchedModel")->isComplete()) { 1056 Record *ModelDef = SchedRW.TheDef->getValueAsDef("SchedModel"); 1057 VarProcIdx = SchedModels.getProcModel(ModelDef).Index; 1058 } 1059 // Push each variant. Assign TransVecIdx later. 1060 const RecVec VarDefs = SchedRW.TheDef->getValueAsListOfDefs("Variants"); 1061 for (RecIter RI = VarDefs.begin(), RE = VarDefs.end(); RI != RE; ++RI) 1062 Variants.push_back(TransVariant(*RI, SchedRW.Index, VarProcIdx, 0)); 1063 if (VarProcIdx == 0) 1064 GenericRW = true; 1065 } 1066 for (RecIter AI = SchedRW.Aliases.begin(), AE = SchedRW.Aliases.end(); 1067 AI != AE; ++AI) { 1068 // If either the SchedAlias itself or the SchedReadWrite that it aliases 1069 // to is defined within a processor model, constrain all variants to 1070 // that processor. 1071 unsigned AliasProcIdx = 0; 1072 if ((*AI)->getValueInit("SchedModel")->isComplete()) { 1073 Record *ModelDef = (*AI)->getValueAsDef("SchedModel"); 1074 AliasProcIdx = SchedModels.getProcModel(ModelDef).Index; 1075 } 1076 const CodeGenSchedRW &AliasRW = 1077 SchedModels.getSchedRW((*AI)->getValueAsDef("AliasRW")); 1078 1079 if (AliasRW.HasVariants) { 1080 const RecVec VarDefs = AliasRW.TheDef->getValueAsListOfDefs("Variants"); 1081 for (RecIter RI = VarDefs.begin(), RE = VarDefs.end(); RI != RE; ++RI) 1082 Variants.push_back(TransVariant(*RI, AliasRW.Index, AliasProcIdx, 0)); 1083 } 1084 if (AliasRW.IsSequence) { 1085 Variants.push_back( 1086 TransVariant(AliasRW.TheDef, SchedRW.Index, AliasProcIdx, 0)); 1087 } 1088 if (AliasProcIdx == 0) 1089 GenericRW = true; 1090 } 1091 for (unsigned VIdx = 0, VEnd = Variants.size(); VIdx != VEnd; ++VIdx) { 1092 TransVariant &Variant = Variants[VIdx]; 1093 // Don't expand variants if the processor models don't intersect. 1094 // A zero processor index means any processor. 1095 SmallVectorImpl<unsigned> &ProcIndices = TransVec[TransIdx].ProcIndices; 1096 if (ProcIndices[0] && Variants[VIdx].ProcIdx) { 1097 unsigned Cnt = std::count(ProcIndices.begin(), ProcIndices.end(), 1098 Variant.ProcIdx); 1099 if (!Cnt) 1100 continue; 1101 if (Cnt > 1) { 1102 const CodeGenProcModel &PM = 1103 *(SchedModels.procModelBegin() + Variant.ProcIdx); 1104 PrintFatalError(Variant.VarOrSeqDef->getLoc(), 1105 "Multiple variants defined for processor " + 1106 PM.ModelName + 1107 " Ensure only one SchedAlias exists per RW."); 1108 } 1109 } 1110 if (Variant.VarOrSeqDef->isSubClassOf("SchedVar")) { 1111 Record *PredDef = Variant.VarOrSeqDef->getValueAsDef("Predicate"); 1112 if (mutuallyExclusive(PredDef, TransVec[TransIdx].PredTerm)) 1113 continue; 1114 } 1115 if (IntersectingVariants.empty()) { 1116 // The first variant builds on the existing transition. 1117 Variant.TransVecIdx = TransIdx; 1118 IntersectingVariants.push_back(Variant); 1119 } 1120 else { 1121 // Push another copy of the current transition for more variants. 1122 Variant.TransVecIdx = TransVec.size(); 1123 IntersectingVariants.push_back(Variant); 1124 TransVec.push_back(TransVec[TransIdx]); 1125 } 1126 } 1127 if (GenericRW && IntersectingVariants.empty()) { 1128 PrintFatalError(SchedRW.TheDef->getLoc(), "No variant of this type has " 1129 "a matching predicate on any processor"); 1130 } 1131 } 1132 1133 // Push the Reads/Writes selected by this variant onto the PredTransition 1134 // specified by VInfo. 1135 void PredTransitions:: 1136 pushVariant(const TransVariant &VInfo, bool IsRead) { 1137 1138 PredTransition &Trans = TransVec[VInfo.TransVecIdx]; 1139 1140 // If this operand transition is reached through a processor-specific alias, 1141 // then the whole transition is specific to this processor. 1142 if (VInfo.ProcIdx != 0) 1143 Trans.ProcIndices.assign(1, VInfo.ProcIdx); 1144 1145 IdxVec SelectedRWs; 1146 if (VInfo.VarOrSeqDef->isSubClassOf("SchedVar")) { 1147 Record *PredDef = VInfo.VarOrSeqDef->getValueAsDef("Predicate"); 1148 Trans.PredTerm.push_back(PredCheck(IsRead, VInfo.RWIdx,PredDef)); 1149 RecVec SelectedDefs = VInfo.VarOrSeqDef->getValueAsListOfDefs("Selected"); 1150 SchedModels.findRWs(SelectedDefs, SelectedRWs, IsRead); 1151 } 1152 else { 1153 assert(VInfo.VarOrSeqDef->isSubClassOf("WriteSequence") && 1154 "variant must be a SchedVariant or aliased WriteSequence"); 1155 SelectedRWs.push_back(SchedModels.getSchedRWIdx(VInfo.VarOrSeqDef, IsRead)); 1156 } 1157 1158 const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(VInfo.RWIdx, IsRead); 1159 1160 SmallVectorImpl<SmallVector<unsigned,4> > &RWSequences = IsRead 1161 ? Trans.ReadSequences : Trans.WriteSequences; 1162 if (SchedRW.IsVariadic) { 1163 unsigned OperIdx = RWSequences.size()-1; 1164 // Make N-1 copies of this transition's last sequence. 1165 for (unsigned i = 1, e = SelectedRWs.size(); i != e; ++i) { 1166 // Create a temporary copy the vector could reallocate. 1167 RWSequences.reserve(RWSequences.size() + 1); 1168 RWSequences.push_back(RWSequences[OperIdx]); 1169 } 1170 // Push each of the N elements of the SelectedRWs onto a copy of the last 1171 // sequence (split the current operand into N operands). 1172 // Note that write sequences should be expanded within this loop--the entire 1173 // sequence belongs to a single operand. 1174 for (IdxIter RWI = SelectedRWs.begin(), RWE = SelectedRWs.end(); 1175 RWI != RWE; ++RWI, ++OperIdx) { 1176 IdxVec ExpandedRWs; 1177 if (IsRead) 1178 ExpandedRWs.push_back(*RWI); 1179 else 1180 SchedModels.expandRWSequence(*RWI, ExpandedRWs, IsRead); 1181 RWSequences[OperIdx].insert(RWSequences[OperIdx].end(), 1182 ExpandedRWs.begin(), ExpandedRWs.end()); 1183 } 1184 assert(OperIdx == RWSequences.size() && "missed a sequence"); 1185 } 1186 else { 1187 // Push this transition's expanded sequence onto this transition's last 1188 // sequence (add to the current operand's sequence). 1189 SmallVectorImpl<unsigned> &Seq = RWSequences.back(); 1190 IdxVec ExpandedRWs; 1191 for (IdxIter RWI = SelectedRWs.begin(), RWE = SelectedRWs.end(); 1192 RWI != RWE; ++RWI) { 1193 if (IsRead) 1194 ExpandedRWs.push_back(*RWI); 1195 else 1196 SchedModels.expandRWSequence(*RWI, ExpandedRWs, IsRead); 1197 } 1198 Seq.insert(Seq.end(), ExpandedRWs.begin(), ExpandedRWs.end()); 1199 } 1200 } 1201 1202 // RWSeq is a sequence of all Reads or all Writes for the next read or write 1203 // operand. StartIdx is an index into TransVec where partial results 1204 // starts. RWSeq must be applied to all transitions between StartIdx and the end 1205 // of TransVec. 1206 void PredTransitions::substituteVariantOperand( 1207 const SmallVectorImpl<unsigned> &RWSeq, bool IsRead, unsigned StartIdx) { 1208 1209 // Visit each original RW within the current sequence. 1210 for (SmallVectorImpl<unsigned>::const_iterator 1211 RWI = RWSeq.begin(), RWE = RWSeq.end(); RWI != RWE; ++RWI) { 1212 const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(*RWI, IsRead); 1213 // Push this RW on all partial PredTransitions or distribute variants. 1214 // New PredTransitions may be pushed within this loop which should not be 1215 // revisited (TransEnd must be loop invariant). 1216 for (unsigned TransIdx = StartIdx, TransEnd = TransVec.size(); 1217 TransIdx != TransEnd; ++TransIdx) { 1218 // In the common case, push RW onto the current operand's sequence. 1219 if (!hasAliasedVariants(SchedRW, SchedModels)) { 1220 if (IsRead) 1221 TransVec[TransIdx].ReadSequences.back().push_back(*RWI); 1222 else 1223 TransVec[TransIdx].WriteSequences.back().push_back(*RWI); 1224 continue; 1225 } 1226 // Distribute this partial PredTransition across intersecting variants. 1227 // This will push a copies of TransVec[TransIdx] on the back of TransVec. 1228 std::vector<TransVariant> IntersectingVariants; 1229 getIntersectingVariants(SchedRW, TransIdx, IntersectingVariants); 1230 // Now expand each variant on top of its copy of the transition. 1231 for (std::vector<TransVariant>::const_iterator 1232 IVI = IntersectingVariants.begin(), 1233 IVE = IntersectingVariants.end(); 1234 IVI != IVE; ++IVI) { 1235 pushVariant(*IVI, IsRead); 1236 } 1237 } 1238 } 1239 } 1240 1241 // For each variant of a Read/Write in Trans, substitute the sequence of 1242 // Read/Writes guarded by the variant. This is exponential in the number of 1243 // variant Read/Writes, but in practice detection of mutually exclusive 1244 // predicates should result in linear growth in the total number variants. 1245 // 1246 // This is one step in a breadth-first search of nested variants. 1247 void PredTransitions::substituteVariants(const PredTransition &Trans) { 1248 // Build up a set of partial results starting at the back of 1249 // PredTransitions. Remember the first new transition. 1250 unsigned StartIdx = TransVec.size(); 1251 TransVec.resize(TransVec.size() + 1); 1252 TransVec.back().PredTerm = Trans.PredTerm; 1253 TransVec.back().ProcIndices = Trans.ProcIndices; 1254 1255 // Visit each original write sequence. 1256 for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator 1257 WSI = Trans.WriteSequences.begin(), WSE = Trans.WriteSequences.end(); 1258 WSI != WSE; ++WSI) { 1259 // Push a new (empty) write sequence onto all partial Transitions. 1260 for (std::vector<PredTransition>::iterator I = 1261 TransVec.begin() + StartIdx, E = TransVec.end(); I != E; ++I) { 1262 I->WriteSequences.resize(I->WriteSequences.size() + 1); 1263 } 1264 substituteVariantOperand(*WSI, /*IsRead=*/false, StartIdx); 1265 } 1266 // Visit each original read sequence. 1267 for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator 1268 RSI = Trans.ReadSequences.begin(), RSE = Trans.ReadSequences.end(); 1269 RSI != RSE; ++RSI) { 1270 // Push a new (empty) read sequence onto all partial Transitions. 1271 for (std::vector<PredTransition>::iterator I = 1272 TransVec.begin() + StartIdx, E = TransVec.end(); I != E; ++I) { 1273 I->ReadSequences.resize(I->ReadSequences.size() + 1); 1274 } 1275 substituteVariantOperand(*RSI, /*IsRead=*/true, StartIdx); 1276 } 1277 } 1278 1279 // Create a new SchedClass for each variant found by inferFromRW. Pass 1280 static void inferFromTransitions(ArrayRef<PredTransition> LastTransitions, 1281 unsigned FromClassIdx, 1282 CodeGenSchedModels &SchedModels) { 1283 // For each PredTransition, create a new CodeGenSchedTransition, which usually 1284 // requires creating a new SchedClass. 1285 for (ArrayRef<PredTransition>::iterator 1286 I = LastTransitions.begin(), E = LastTransitions.end(); I != E; ++I) { 1287 IdxVec OperWritesVariant; 1288 for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator 1289 WSI = I->WriteSequences.begin(), WSE = I->WriteSequences.end(); 1290 WSI != WSE; ++WSI) { 1291 // Create a new write representing the expanded sequence. 1292 OperWritesVariant.push_back( 1293 SchedModels.findOrInsertRW(*WSI, /*IsRead=*/false)); 1294 } 1295 IdxVec OperReadsVariant; 1296 for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator 1297 RSI = I->ReadSequences.begin(), RSE = I->ReadSequences.end(); 1298 RSI != RSE; ++RSI) { 1299 // Create a new read representing the expanded sequence. 1300 OperReadsVariant.push_back( 1301 SchedModels.findOrInsertRW(*RSI, /*IsRead=*/true)); 1302 } 1303 IdxVec ProcIndices(I->ProcIndices.begin(), I->ProcIndices.end()); 1304 CodeGenSchedTransition SCTrans; 1305 SCTrans.ToClassIdx = 1306 SchedModels.addSchedClass(/*ItinClassDef=*/nullptr, OperWritesVariant, 1307 OperReadsVariant, ProcIndices); 1308 SCTrans.ProcIndices = ProcIndices; 1309 // The final PredTerm is unique set of predicates guarding the transition. 1310 RecVec Preds; 1311 for (SmallVectorImpl<PredCheck>::const_iterator 1312 PI = I->PredTerm.begin(), PE = I->PredTerm.end(); PI != PE; ++PI) { 1313 Preds.push_back(PI->Predicate); 1314 } 1315 RecIter PredsEnd = std::unique(Preds.begin(), Preds.end()); 1316 Preds.resize(PredsEnd - Preds.begin()); 1317 SCTrans.PredTerm = Preds; 1318 SchedModels.getSchedClass(FromClassIdx).Transitions.push_back(SCTrans); 1319 } 1320 } 1321 1322 // Create new SchedClasses for the given ReadWrite list. If any of the 1323 // ReadWrites refers to a SchedVariant, create a new SchedClass for each variant 1324 // of the ReadWrite list, following Aliases if necessary. 1325 void CodeGenSchedModels::inferFromRW(const IdxVec &OperWrites, 1326 const IdxVec &OperReads, 1327 unsigned FromClassIdx, 1328 const IdxVec &ProcIndices) { 1329 DEBUG(dbgs() << "INFER RW proc("; dumpIdxVec(ProcIndices); dbgs() << ") "); 1330 1331 // Create a seed transition with an empty PredTerm and the expanded sequences 1332 // of SchedWrites for the current SchedClass. 1333 std::vector<PredTransition> LastTransitions; 1334 LastTransitions.resize(1); 1335 LastTransitions.back().ProcIndices.append(ProcIndices.begin(), 1336 ProcIndices.end()); 1337 1338 for (IdxIter I = OperWrites.begin(), E = OperWrites.end(); I != E; ++I) { 1339 IdxVec WriteSeq; 1340 expandRWSequence(*I, WriteSeq, /*IsRead=*/false); 1341 unsigned Idx = LastTransitions[0].WriteSequences.size(); 1342 LastTransitions[0].WriteSequences.resize(Idx + 1); 1343 SmallVectorImpl<unsigned> &Seq = LastTransitions[0].WriteSequences[Idx]; 1344 for (IdxIter WI = WriteSeq.begin(), WE = WriteSeq.end(); WI != WE; ++WI) 1345 Seq.push_back(*WI); 1346 DEBUG(dbgs() << "("; dumpIdxVec(Seq); dbgs() << ") "); 1347 } 1348 DEBUG(dbgs() << " Reads: "); 1349 for (IdxIter I = OperReads.begin(), E = OperReads.end(); I != E; ++I) { 1350 IdxVec ReadSeq; 1351 expandRWSequence(*I, ReadSeq, /*IsRead=*/true); 1352 unsigned Idx = LastTransitions[0].ReadSequences.size(); 1353 LastTransitions[0].ReadSequences.resize(Idx + 1); 1354 SmallVectorImpl<unsigned> &Seq = LastTransitions[0].ReadSequences[Idx]; 1355 for (IdxIter RI = ReadSeq.begin(), RE = ReadSeq.end(); RI != RE; ++RI) 1356 Seq.push_back(*RI); 1357 DEBUG(dbgs() << "("; dumpIdxVec(Seq); dbgs() << ") "); 1358 } 1359 DEBUG(dbgs() << '\n'); 1360 1361 // Collect all PredTransitions for individual operands. 1362 // Iterate until no variant writes remain. 1363 while (hasVariant(LastTransitions, *this)) { 1364 PredTransitions Transitions(*this); 1365 for (std::vector<PredTransition>::const_iterator 1366 I = LastTransitions.begin(), E = LastTransitions.end(); 1367 I != E; ++I) { 1368 Transitions.substituteVariants(*I); 1369 } 1370 DEBUG(Transitions.dump()); 1371 LastTransitions.swap(Transitions.TransVec); 1372 } 1373 // If the first transition has no variants, nothing to do. 1374 if (LastTransitions[0].PredTerm.empty()) 1375 return; 1376 1377 // WARNING: We are about to mutate the SchedClasses vector. Do not refer to 1378 // OperWrites, OperReads, or ProcIndices after calling inferFromTransitions. 1379 inferFromTransitions(LastTransitions, FromClassIdx, *this); 1380 } 1381 1382 // Check if any processor resource group contains all resource records in 1383 // SubUnits. 1384 bool CodeGenSchedModels::hasSuperGroup(RecVec &SubUnits, CodeGenProcModel &PM) { 1385 for (unsigned i = 0, e = PM.ProcResourceDefs.size(); i < e; ++i) { 1386 if (!PM.ProcResourceDefs[i]->isSubClassOf("ProcResGroup")) 1387 continue; 1388 RecVec SuperUnits = 1389 PM.ProcResourceDefs[i]->getValueAsListOfDefs("Resources"); 1390 RecIter RI = SubUnits.begin(), RE = SubUnits.end(); 1391 for ( ; RI != RE; ++RI) { 1392 if (std::find(SuperUnits.begin(), SuperUnits.end(), *RI) 1393 == SuperUnits.end()) { 1394 break; 1395 } 1396 } 1397 if (RI == RE) 1398 return true; 1399 } 1400 return false; 1401 } 1402 1403 // Verify that overlapping groups have a common supergroup. 1404 void CodeGenSchedModels::verifyProcResourceGroups(CodeGenProcModel &PM) { 1405 for (unsigned i = 0, e = PM.ProcResourceDefs.size(); i < e; ++i) { 1406 if (!PM.ProcResourceDefs[i]->isSubClassOf("ProcResGroup")) 1407 continue; 1408 RecVec CheckUnits = 1409 PM.ProcResourceDefs[i]->getValueAsListOfDefs("Resources"); 1410 for (unsigned j = i+1; j < e; ++j) { 1411 if (!PM.ProcResourceDefs[j]->isSubClassOf("ProcResGroup")) 1412 continue; 1413 RecVec OtherUnits = 1414 PM.ProcResourceDefs[j]->getValueAsListOfDefs("Resources"); 1415 if (std::find_first_of(CheckUnits.begin(), CheckUnits.end(), 1416 OtherUnits.begin(), OtherUnits.end()) 1417 != CheckUnits.end()) { 1418 // CheckUnits and OtherUnits overlap 1419 OtherUnits.insert(OtherUnits.end(), CheckUnits.begin(), 1420 CheckUnits.end()); 1421 if (!hasSuperGroup(OtherUnits, PM)) { 1422 PrintFatalError((PM.ProcResourceDefs[i])->getLoc(), 1423 "proc resource group overlaps with " 1424 + PM.ProcResourceDefs[j]->getName() 1425 + " but no supergroup contains both."); 1426 } 1427 } 1428 } 1429 } 1430 } 1431 1432 // Collect and sort WriteRes, ReadAdvance, and ProcResources. 1433 void CodeGenSchedModels::collectProcResources() { 1434 // Add any subtarget-specific SchedReadWrites that are directly associated 1435 // with processor resources. Refer to the parent SchedClass's ProcIndices to 1436 // determine which processors they apply to. 1437 for (SchedClassIter SCI = schedClassBegin(), SCE = schedClassEnd(); 1438 SCI != SCE; ++SCI) { 1439 if (SCI->ItinClassDef) 1440 collectItinProcResources(SCI->ItinClassDef); 1441 else { 1442 // This class may have a default ReadWrite list which can be overriden by 1443 // InstRW definitions. 1444 if (!SCI->InstRWs.empty()) { 1445 for (RecIter RWI = SCI->InstRWs.begin(), RWE = SCI->InstRWs.end(); 1446 RWI != RWE; ++RWI) { 1447 Record *RWModelDef = (*RWI)->getValueAsDef("SchedModel"); 1448 IdxVec ProcIndices(1, getProcModel(RWModelDef).Index); 1449 IdxVec Writes, Reads; 1450 findRWs((*RWI)->getValueAsListOfDefs("OperandReadWrites"), 1451 Writes, Reads); 1452 collectRWResources(Writes, Reads, ProcIndices); 1453 } 1454 } 1455 collectRWResources(SCI->Writes, SCI->Reads, SCI->ProcIndices); 1456 } 1457 } 1458 // Add resources separately defined by each subtarget. 1459 RecVec WRDefs = Records.getAllDerivedDefinitions("WriteRes"); 1460 for (RecIter WRI = WRDefs.begin(), WRE = WRDefs.end(); WRI != WRE; ++WRI) { 1461 Record *ModelDef = (*WRI)->getValueAsDef("SchedModel"); 1462 addWriteRes(*WRI, getProcModel(ModelDef).Index); 1463 } 1464 RecVec SWRDefs = Records.getAllDerivedDefinitions("SchedWriteRes"); 1465 for (RecIter WRI = SWRDefs.begin(), WRE = SWRDefs.end(); WRI != WRE; ++WRI) { 1466 Record *ModelDef = (*WRI)->getValueAsDef("SchedModel"); 1467 addWriteRes(*WRI, getProcModel(ModelDef).Index); 1468 } 1469 RecVec RADefs = Records.getAllDerivedDefinitions("ReadAdvance"); 1470 for (RecIter RAI = RADefs.begin(), RAE = RADefs.end(); RAI != RAE; ++RAI) { 1471 Record *ModelDef = (*RAI)->getValueAsDef("SchedModel"); 1472 addReadAdvance(*RAI, getProcModel(ModelDef).Index); 1473 } 1474 RecVec SRADefs = Records.getAllDerivedDefinitions("SchedReadAdvance"); 1475 for (RecIter RAI = SRADefs.begin(), RAE = SRADefs.end(); RAI != RAE; ++RAI) { 1476 if ((*RAI)->getValueInit("SchedModel")->isComplete()) { 1477 Record *ModelDef = (*RAI)->getValueAsDef("SchedModel"); 1478 addReadAdvance(*RAI, getProcModel(ModelDef).Index); 1479 } 1480 } 1481 // Add ProcResGroups that are defined within this processor model, which may 1482 // not be directly referenced but may directly specify a buffer size. 1483 RecVec ProcResGroups = Records.getAllDerivedDefinitions("ProcResGroup"); 1484 for (RecIter RI = ProcResGroups.begin(), RE = ProcResGroups.end(); 1485 RI != RE; ++RI) { 1486 if (!(*RI)->getValueInit("SchedModel")->isComplete()) 1487 continue; 1488 CodeGenProcModel &PM = getProcModel((*RI)->getValueAsDef("SchedModel")); 1489 RecIter I = std::find(PM.ProcResourceDefs.begin(), 1490 PM.ProcResourceDefs.end(), *RI); 1491 if (I == PM.ProcResourceDefs.end()) 1492 PM.ProcResourceDefs.push_back(*RI); 1493 } 1494 // Finalize each ProcModel by sorting the record arrays. 1495 for (CodeGenProcModel &PM : ProcModels) { 1496 std::sort(PM.WriteResDefs.begin(), PM.WriteResDefs.end(), 1497 LessRecord()); 1498 std::sort(PM.ReadAdvanceDefs.begin(), PM.ReadAdvanceDefs.end(), 1499 LessRecord()); 1500 std::sort(PM.ProcResourceDefs.begin(), PM.ProcResourceDefs.end(), 1501 LessRecord()); 1502 DEBUG( 1503 PM.dump(); 1504 dbgs() << "WriteResDefs: "; 1505 for (RecIter RI = PM.WriteResDefs.begin(), 1506 RE = PM.WriteResDefs.end(); RI != RE; ++RI) { 1507 if ((*RI)->isSubClassOf("WriteRes")) 1508 dbgs() << (*RI)->getValueAsDef("WriteType")->getName() << " "; 1509 else 1510 dbgs() << (*RI)->getName() << " "; 1511 } 1512 dbgs() << "\nReadAdvanceDefs: "; 1513 for (RecIter RI = PM.ReadAdvanceDefs.begin(), 1514 RE = PM.ReadAdvanceDefs.end(); RI != RE; ++RI) { 1515 if ((*RI)->isSubClassOf("ReadAdvance")) 1516 dbgs() << (*RI)->getValueAsDef("ReadType")->getName() << " "; 1517 else 1518 dbgs() << (*RI)->getName() << " "; 1519 } 1520 dbgs() << "\nProcResourceDefs: "; 1521 for (RecIter RI = PM.ProcResourceDefs.begin(), 1522 RE = PM.ProcResourceDefs.end(); RI != RE; ++RI) { 1523 dbgs() << (*RI)->getName() << " "; 1524 } 1525 dbgs() << '\n'); 1526 verifyProcResourceGroups(PM); 1527 } 1528 } 1529 1530 // Collect itinerary class resources for each processor. 1531 void CodeGenSchedModels::collectItinProcResources(Record *ItinClassDef) { 1532 for (unsigned PIdx = 0, PEnd = ProcModels.size(); PIdx != PEnd; ++PIdx) { 1533 const CodeGenProcModel &PM = ProcModels[PIdx]; 1534 // For all ItinRW entries. 1535 bool HasMatch = false; 1536 for (RecIter II = PM.ItinRWDefs.begin(), IE = PM.ItinRWDefs.end(); 1537 II != IE; ++II) { 1538 RecVec Matched = (*II)->getValueAsListOfDefs("MatchedItinClasses"); 1539 if (!std::count(Matched.begin(), Matched.end(), ItinClassDef)) 1540 continue; 1541 if (HasMatch) 1542 PrintFatalError((*II)->getLoc(), "Duplicate itinerary class " 1543 + ItinClassDef->getName() 1544 + " in ItinResources for " + PM.ModelName); 1545 HasMatch = true; 1546 IdxVec Writes, Reads; 1547 findRWs((*II)->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads); 1548 IdxVec ProcIndices(1, PIdx); 1549 collectRWResources(Writes, Reads, ProcIndices); 1550 } 1551 } 1552 } 1553 1554 void CodeGenSchedModels::collectRWResources(unsigned RWIdx, bool IsRead, 1555 const IdxVec &ProcIndices) { 1556 const CodeGenSchedRW &SchedRW = getSchedRW(RWIdx, IsRead); 1557 if (SchedRW.TheDef) { 1558 if (!IsRead && SchedRW.TheDef->isSubClassOf("SchedWriteRes")) { 1559 for (IdxIter PI = ProcIndices.begin(), PE = ProcIndices.end(); 1560 PI != PE; ++PI) { 1561 addWriteRes(SchedRW.TheDef, *PI); 1562 } 1563 } 1564 else if (IsRead && SchedRW.TheDef->isSubClassOf("SchedReadAdvance")) { 1565 for (IdxIter PI = ProcIndices.begin(), PE = ProcIndices.end(); 1566 PI != PE; ++PI) { 1567 addReadAdvance(SchedRW.TheDef, *PI); 1568 } 1569 } 1570 } 1571 for (RecIter AI = SchedRW.Aliases.begin(), AE = SchedRW.Aliases.end(); 1572 AI != AE; ++AI) { 1573 IdxVec AliasProcIndices; 1574 if ((*AI)->getValueInit("SchedModel")->isComplete()) { 1575 AliasProcIndices.push_back( 1576 getProcModel((*AI)->getValueAsDef("SchedModel")).Index); 1577 } 1578 else 1579 AliasProcIndices = ProcIndices; 1580 const CodeGenSchedRW &AliasRW = getSchedRW((*AI)->getValueAsDef("AliasRW")); 1581 assert(AliasRW.IsRead == IsRead && "cannot alias reads to writes"); 1582 1583 IdxVec ExpandedRWs; 1584 expandRWSequence(AliasRW.Index, ExpandedRWs, IsRead); 1585 for (IdxIter SI = ExpandedRWs.begin(), SE = ExpandedRWs.end(); 1586 SI != SE; ++SI) { 1587 collectRWResources(*SI, IsRead, AliasProcIndices); 1588 } 1589 } 1590 } 1591 1592 // Collect resources for a set of read/write types and processor indices. 1593 void CodeGenSchedModels::collectRWResources(const IdxVec &Writes, 1594 const IdxVec &Reads, 1595 const IdxVec &ProcIndices) { 1596 1597 for (IdxIter WI = Writes.begin(), WE = Writes.end(); WI != WE; ++WI) 1598 collectRWResources(*WI, /*IsRead=*/false, ProcIndices); 1599 1600 for (IdxIter RI = Reads.begin(), RE = Reads.end(); RI != RE; ++RI) 1601 collectRWResources(*RI, /*IsRead=*/true, ProcIndices); 1602 } 1603 1604 1605 // Find the processor's resource units for this kind of resource. 1606 Record *CodeGenSchedModels::findProcResUnits(Record *ProcResKind, 1607 const CodeGenProcModel &PM) const { 1608 if (ProcResKind->isSubClassOf("ProcResourceUnits")) 1609 return ProcResKind; 1610 1611 Record *ProcUnitDef = nullptr; 1612 RecVec ProcResourceDefs = 1613 Records.getAllDerivedDefinitions("ProcResourceUnits"); 1614 1615 for (RecIter RI = ProcResourceDefs.begin(), RE = ProcResourceDefs.end(); 1616 RI != RE; ++RI) { 1617 1618 if ((*RI)->getValueAsDef("Kind") == ProcResKind 1619 && (*RI)->getValueAsDef("SchedModel") == PM.ModelDef) { 1620 if (ProcUnitDef) { 1621 PrintFatalError((*RI)->getLoc(), 1622 "Multiple ProcessorResourceUnits associated with " 1623 + ProcResKind->getName()); 1624 } 1625 ProcUnitDef = *RI; 1626 } 1627 } 1628 RecVec ProcResGroups = Records.getAllDerivedDefinitions("ProcResGroup"); 1629 for (RecIter RI = ProcResGroups.begin(), RE = ProcResGroups.end(); 1630 RI != RE; ++RI) { 1631 1632 if (*RI == ProcResKind 1633 && (*RI)->getValueAsDef("SchedModel") == PM.ModelDef) { 1634 if (ProcUnitDef) { 1635 PrintFatalError((*RI)->getLoc(), 1636 "Multiple ProcessorResourceUnits associated with " 1637 + ProcResKind->getName()); 1638 } 1639 ProcUnitDef = *RI; 1640 } 1641 } 1642 if (!ProcUnitDef) { 1643 PrintFatalError(ProcResKind->getLoc(), 1644 "No ProcessorResources associated with " 1645 + ProcResKind->getName()); 1646 } 1647 return ProcUnitDef; 1648 } 1649 1650 // Iteratively add a resource and its super resources. 1651 void CodeGenSchedModels::addProcResource(Record *ProcResKind, 1652 CodeGenProcModel &PM) { 1653 for (;;) { 1654 Record *ProcResUnits = findProcResUnits(ProcResKind, PM); 1655 1656 // See if this ProcResource is already associated with this processor. 1657 RecIter I = std::find(PM.ProcResourceDefs.begin(), 1658 PM.ProcResourceDefs.end(), ProcResUnits); 1659 if (I != PM.ProcResourceDefs.end()) 1660 return; 1661 1662 PM.ProcResourceDefs.push_back(ProcResUnits); 1663 if (ProcResUnits->isSubClassOf("ProcResGroup")) 1664 return; 1665 1666 if (!ProcResUnits->getValueInit("Super")->isComplete()) 1667 return; 1668 1669 ProcResKind = ProcResUnits->getValueAsDef("Super"); 1670 } 1671 } 1672 1673 // Add resources for a SchedWrite to this processor if they don't exist. 1674 void CodeGenSchedModels::addWriteRes(Record *ProcWriteResDef, unsigned PIdx) { 1675 assert(PIdx && "don't add resources to an invalid Processor model"); 1676 1677 RecVec &WRDefs = ProcModels[PIdx].WriteResDefs; 1678 RecIter WRI = std::find(WRDefs.begin(), WRDefs.end(), ProcWriteResDef); 1679 if (WRI != WRDefs.end()) 1680 return; 1681 WRDefs.push_back(ProcWriteResDef); 1682 1683 // Visit ProcResourceKinds referenced by the newly discovered WriteRes. 1684 RecVec ProcResDefs = ProcWriteResDef->getValueAsListOfDefs("ProcResources"); 1685 for (RecIter WritePRI = ProcResDefs.begin(), WritePRE = ProcResDefs.end(); 1686 WritePRI != WritePRE; ++WritePRI) { 1687 addProcResource(*WritePRI, ProcModels[PIdx]); 1688 } 1689 } 1690 1691 // Add resources for a ReadAdvance to this processor if they don't exist. 1692 void CodeGenSchedModels::addReadAdvance(Record *ProcReadAdvanceDef, 1693 unsigned PIdx) { 1694 RecVec &RADefs = ProcModels[PIdx].ReadAdvanceDefs; 1695 RecIter I = std::find(RADefs.begin(), RADefs.end(), ProcReadAdvanceDef); 1696 if (I != RADefs.end()) 1697 return; 1698 RADefs.push_back(ProcReadAdvanceDef); 1699 } 1700 1701 unsigned CodeGenProcModel::getProcResourceIdx(Record *PRDef) const { 1702 RecIter PRPos = std::find(ProcResourceDefs.begin(), ProcResourceDefs.end(), 1703 PRDef); 1704 if (PRPos == ProcResourceDefs.end()) 1705 PrintFatalError(PRDef->getLoc(), "ProcResource def is not included in " 1706 "the ProcResources list for " + ModelName); 1707 // Idx=0 is reserved for invalid. 1708 return 1 + (PRPos - ProcResourceDefs.begin()); 1709 } 1710 1711 #ifndef NDEBUG 1712 void CodeGenProcModel::dump() const { 1713 dbgs() << Index << ": " << ModelName << " " 1714 << (ModelDef ? ModelDef->getName() : "inferred") << " " 1715 << (ItinsDef ? ItinsDef->getName() : "no itinerary") << '\n'; 1716 } 1717 1718 void CodeGenSchedRW::dump() const { 1719 dbgs() << Name << (IsVariadic ? " (V) " : " "); 1720 if (IsSequence) { 1721 dbgs() << "("; 1722 dumpIdxVec(Sequence); 1723 dbgs() << ")"; 1724 } 1725 } 1726 1727 void CodeGenSchedClass::dump(const CodeGenSchedModels* SchedModels) const { 1728 dbgs() << "SCHEDCLASS " << Index << ":" << Name << '\n' 1729 << " Writes: "; 1730 for (unsigned i = 0, N = Writes.size(); i < N; ++i) { 1731 SchedModels->getSchedWrite(Writes[i]).dump(); 1732 if (i < N-1) { 1733 dbgs() << '\n'; 1734 dbgs().indent(10); 1735 } 1736 } 1737 dbgs() << "\n Reads: "; 1738 for (unsigned i = 0, N = Reads.size(); i < N; ++i) { 1739 SchedModels->getSchedRead(Reads[i]).dump(); 1740 if (i < N-1) { 1741 dbgs() << '\n'; 1742 dbgs().indent(10); 1743 } 1744 } 1745 dbgs() << "\n ProcIdx: "; dumpIdxVec(ProcIndices); dbgs() << '\n'; 1746 if (!Transitions.empty()) { 1747 dbgs() << "\n Transitions for Proc "; 1748 for (std::vector<CodeGenSchedTransition>::const_iterator 1749 TI = Transitions.begin(), TE = Transitions.end(); TI != TE; ++TI) { 1750 dumpIdxVec(TI->ProcIndices); 1751 } 1752 } 1753 } 1754 1755 void PredTransitions::dump() const { 1756 dbgs() << "Expanded Variants:\n"; 1757 for (std::vector<PredTransition>::const_iterator 1758 TI = TransVec.begin(), TE = TransVec.end(); TI != TE; ++TI) { 1759 dbgs() << "{"; 1760 for (SmallVectorImpl<PredCheck>::const_iterator 1761 PCI = TI->PredTerm.begin(), PCE = TI->PredTerm.end(); 1762 PCI != PCE; ++PCI) { 1763 if (PCI != TI->PredTerm.begin()) 1764 dbgs() << ", "; 1765 dbgs() << SchedModels.getSchedRW(PCI->RWIdx, PCI->IsRead).Name 1766 << ":" << PCI->Predicate->getName(); 1767 } 1768 dbgs() << "},\n => {"; 1769 for (SmallVectorImpl<SmallVector<unsigned,4> >::const_iterator 1770 WSI = TI->WriteSequences.begin(), WSE = TI->WriteSequences.end(); 1771 WSI != WSE; ++WSI) { 1772 dbgs() << "("; 1773 for (SmallVectorImpl<unsigned>::const_iterator 1774 WI = WSI->begin(), WE = WSI->end(); WI != WE; ++WI) { 1775 if (WI != WSI->begin()) 1776 dbgs() << ", "; 1777 dbgs() << SchedModels.getSchedWrite(*WI).Name; 1778 } 1779 dbgs() << "),"; 1780 } 1781 dbgs() << "}\n"; 1782 } 1783 } 1784 #endif // NDEBUG 1785