1 //===- jit-runner.cpp - MLIR CPU Execution Driver Library -----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is a library that provides a shared implementation for command line 10 // utilities that execute an MLIR file on the CPU by translating MLIR to LLVM 11 // IR before JIT-compiling and executing the latter. 12 // 13 // The translation can be customized by providing an MLIR to MLIR 14 // transformation. 15 //===----------------------------------------------------------------------===// 16 17 #include "mlir/ExecutionEngine/JitRunner.h" 18 19 #include "mlir/Dialect/LLVMIR/LLVMDialect.h" 20 #include "mlir/ExecutionEngine/ExecutionEngine.h" 21 #include "mlir/ExecutionEngine/OptUtils.h" 22 #include "mlir/IR/BuiltinTypes.h" 23 #include "mlir/IR/MLIRContext.h" 24 #include "mlir/Parser.h" 25 #include "mlir/Support/FileUtilities.h" 26 27 #include "llvm/ADT/STLExtras.h" 28 #include "llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h" 29 #include "llvm/IR/IRBuilder.h" 30 #include "llvm/IR/LLVMContext.h" 31 #include "llvm/IR/LegacyPassNameParser.h" 32 #include "llvm/Support/CommandLine.h" 33 #include "llvm/Support/FileUtilities.h" 34 #include "llvm/Support/SourceMgr.h" 35 #include "llvm/Support/StringSaver.h" 36 #include "llvm/Support/ToolOutputFile.h" 37 #include <cstdint> 38 #include <numeric> 39 40 using namespace mlir; 41 using llvm::Error; 42 43 namespace { 44 /// This options struct prevents the need for global static initializers, and 45 /// is only initialized if the JITRunner is invoked. 46 struct Options { 47 llvm::cl::opt<std::string> inputFilename{llvm::cl::Positional, 48 llvm::cl::desc("<input file>"), 49 llvm::cl::init("-")}; 50 llvm::cl::opt<std::string> mainFuncName{ 51 "e", llvm::cl::desc("The function to be called"), 52 llvm::cl::value_desc("<function name>"), llvm::cl::init("main")}; 53 llvm::cl::opt<std::string> mainFuncType{ 54 "entry-point-result", 55 llvm::cl::desc("Textual description of the function type to be called"), 56 llvm::cl::value_desc("f32 | i32 | i64 | void"), llvm::cl::init("f32")}; 57 58 llvm::cl::OptionCategory optFlags{"opt-like flags"}; 59 60 // CLI list of pass information 61 llvm::cl::list<const llvm::PassInfo *, bool, llvm::PassNameParser> llvmPasses{ 62 llvm::cl::desc("LLVM optimizing passes to run"), llvm::cl::cat(optFlags)}; 63 64 // CLI variables for -On options. 65 llvm::cl::opt<bool> optO0{"O0", 66 llvm::cl::desc("Run opt passes and codegen at O0"), 67 llvm::cl::cat(optFlags)}; 68 llvm::cl::opt<bool> optO1{"O1", 69 llvm::cl::desc("Run opt passes and codegen at O1"), 70 llvm::cl::cat(optFlags)}; 71 llvm::cl::opt<bool> optO2{"O2", 72 llvm::cl::desc("Run opt passes and codegen at O2"), 73 llvm::cl::cat(optFlags)}; 74 llvm::cl::opt<bool> optO3{"O3", 75 llvm::cl::desc("Run opt passes and codegen at O3"), 76 llvm::cl::cat(optFlags)}; 77 78 llvm::cl::OptionCategory clOptionsCategory{"linking options"}; 79 llvm::cl::list<std::string> clSharedLibs{ 80 "shared-libs", llvm::cl::desc("Libraries to link dynamically"), 81 llvm::cl::ZeroOrMore, llvm::cl::MiscFlags::CommaSeparated, 82 llvm::cl::cat(clOptionsCategory)}; 83 84 /// CLI variables for debugging. 85 llvm::cl::opt<bool> dumpObjectFile{ 86 "dump-object-file", 87 llvm::cl::desc("Dump JITted-compiled object to file specified with " 88 "-object-filename (<input file>.o by default).")}; 89 90 llvm::cl::opt<std::string> objectFilename{ 91 "object-filename", 92 llvm::cl::desc("Dump JITted-compiled object to file <input file>.o")}; 93 }; 94 95 struct CompileAndExecuteConfig { 96 /// LLVM module transformer that is passed to ExecutionEngine. 97 llvm::function_ref<llvm::Error(llvm::Module *)> transformer; 98 99 /// A custom function that is passed to ExecutionEngine. It processes MLIR 100 /// module and creates LLVM IR module. 101 llvm::function_ref<std::unique_ptr<llvm::Module>(ModuleOp, 102 llvm::LLVMContext &)> 103 llvmModuleBuilder; 104 105 /// A custom function that is passed to ExecutinEngine to register symbols at 106 /// runtime. 107 llvm::function_ref<llvm::orc::SymbolMap(llvm::orc::MangleAndInterner)> 108 runtimeSymbolMap; 109 }; 110 111 } // end anonymous namespace 112 113 static OwningModuleRef parseMLIRInput(StringRef inputFilename, 114 MLIRContext *context) { 115 // Set up the input file. 116 std::string errorMessage; 117 auto file = openInputFile(inputFilename, &errorMessage); 118 if (!file) { 119 llvm::errs() << errorMessage << "\n"; 120 return nullptr; 121 } 122 123 llvm::SourceMgr sourceMgr; 124 sourceMgr.AddNewSourceBuffer(std::move(file), llvm::SMLoc()); 125 return OwningModuleRef(parseSourceFile(sourceMgr, context)); 126 } 127 128 static inline Error make_string_error(const Twine &message) { 129 return llvm::make_error<llvm::StringError>(message.str(), 130 llvm::inconvertibleErrorCode()); 131 } 132 133 static Optional<unsigned> getCommandLineOptLevel(Options &options) { 134 Optional<unsigned> optLevel; 135 SmallVector<std::reference_wrapper<llvm::cl::opt<bool>>, 4> optFlags{ 136 options.optO0, options.optO1, options.optO2, options.optO3}; 137 138 // Determine if there is an optimization flag present. 139 for (unsigned j = 0; j < 4; ++j) { 140 auto &flag = optFlags[j].get(); 141 if (flag) { 142 optLevel = j; 143 break; 144 } 145 } 146 return optLevel; 147 } 148 149 // JIT-compile the given module and run "entryPoint" with "args" as arguments. 150 static Error compileAndExecute(Options &options, ModuleOp module, 151 StringRef entryPoint, 152 CompileAndExecuteConfig config, void **args) { 153 Optional<llvm::CodeGenOpt::Level> jitCodeGenOptLevel; 154 if (auto clOptLevel = getCommandLineOptLevel(options)) 155 jitCodeGenOptLevel = 156 static_cast<llvm::CodeGenOpt::Level>(clOptLevel.getValue()); 157 158 // If shared library implements custom mlir-runner library init and destroy 159 // functions, we'll use them to register the library with the execution 160 // engine. Otherwise we'll pass library directly to the execution engine. 161 SmallVector<StringRef, 4> libs(options.clSharedLibs.begin(), 162 options.clSharedLibs.end()); 163 164 // Libraries that we'll pass to the ExecutionEngine for loading. 165 SmallVector<StringRef, 4> executionEngineLibs; 166 167 using MlirRunnerInitFn = void (*)(llvm::StringMap<void *> &); 168 using MlirRunnerDestroyFn = void (*)(); 169 170 llvm::StringMap<void *> exportSymbols; 171 SmallVector<MlirRunnerDestroyFn> destroyFns; 172 173 // Handle libraries that do support mlir-runner init/destroy callbacks. 174 for (auto libPath : libs) { 175 auto lib = llvm::sys::DynamicLibrary::getPermanentLibrary(libPath.data()); 176 void *initSym = lib.getAddressOfSymbol("__mlir_runner_init"); 177 void *destroySim = lib.getAddressOfSymbol("__mlir_runner_destroy"); 178 179 // Library does not support mlir runner, load it with ExecutionEngine. 180 if (!initSym || !destroySim) { 181 executionEngineLibs.push_back(libPath); 182 continue; 183 } 184 185 auto initFn = reinterpret_cast<MlirRunnerInitFn>(initSym); 186 initFn(exportSymbols); 187 188 auto destroyFn = reinterpret_cast<MlirRunnerDestroyFn>(destroySim); 189 destroyFns.push_back(destroyFn); 190 } 191 192 // Build a runtime symbol map from the config and exported symbols. 193 auto runtimeSymbolMap = [&](llvm::orc::MangleAndInterner interner) { 194 auto symbolMap = config.runtimeSymbolMap ? config.runtimeSymbolMap(interner) 195 : llvm::orc::SymbolMap(); 196 for (auto &exportSymbol : exportSymbols) 197 symbolMap[interner(exportSymbol.getKey())] = 198 llvm::JITEvaluatedSymbol::fromPointer(exportSymbol.getValue()); 199 return symbolMap; 200 }; 201 202 auto expectedEngine = mlir::ExecutionEngine::create( 203 module, config.llvmModuleBuilder, config.transformer, jitCodeGenOptLevel, 204 executionEngineLibs); 205 if (!expectedEngine) 206 return expectedEngine.takeError(); 207 208 auto engine = std::move(*expectedEngine); 209 engine->registerSymbols(runtimeSymbolMap); 210 211 auto expectedFPtr = engine->lookup(entryPoint); 212 if (!expectedFPtr) 213 return expectedFPtr.takeError(); 214 215 if (options.dumpObjectFile) 216 engine->dumpToObjectFile(options.objectFilename.empty() 217 ? options.inputFilename + ".o" 218 : options.objectFilename); 219 220 void (*fptr)(void **) = *expectedFPtr; 221 (*fptr)(args); 222 223 // Run all dynamic library destroy callbacks to prepare for the shutdown. 224 llvm::for_each(destroyFns, [](MlirRunnerDestroyFn destroy) { destroy(); }); 225 226 return Error::success(); 227 } 228 229 static Error compileAndExecuteVoidFunction(Options &options, ModuleOp module, 230 StringRef entryPoint, 231 CompileAndExecuteConfig config) { 232 auto mainFunction = module.lookupSymbol<LLVM::LLVMFuncOp>(entryPoint); 233 if (!mainFunction || mainFunction.empty()) 234 return make_string_error("entry point not found"); 235 void *empty = nullptr; 236 return compileAndExecute(options, module, entryPoint, config, &empty); 237 } 238 239 template <typename Type> 240 Error checkCompatibleReturnType(LLVM::LLVMFuncOp mainFunction); 241 template <> 242 Error checkCompatibleReturnType<int32_t>(LLVM::LLVMFuncOp mainFunction) { 243 auto resultType = mainFunction.getType() 244 .cast<LLVM::LLVMFunctionType>() 245 .getReturnType() 246 .dyn_cast<IntegerType>(); 247 if (!resultType || resultType.getWidth() != 32) 248 return make_string_error("only single i32 function result supported"); 249 return Error::success(); 250 } 251 template <> 252 Error checkCompatibleReturnType<int64_t>(LLVM::LLVMFuncOp mainFunction) { 253 auto resultType = mainFunction.getType() 254 .cast<LLVM::LLVMFunctionType>() 255 .getReturnType() 256 .dyn_cast<IntegerType>(); 257 if (!resultType || resultType.getWidth() != 64) 258 return make_string_error("only single i64 function result supported"); 259 return Error::success(); 260 } 261 template <> 262 Error checkCompatibleReturnType<float>(LLVM::LLVMFuncOp mainFunction) { 263 if (!mainFunction.getType() 264 .cast<LLVM::LLVMFunctionType>() 265 .getReturnType() 266 .isa<Float32Type>()) 267 return make_string_error("only single f32 function result supported"); 268 return Error::success(); 269 } 270 template <typename Type> 271 Error compileAndExecuteSingleReturnFunction(Options &options, ModuleOp module, 272 StringRef entryPoint, 273 CompileAndExecuteConfig config) { 274 auto mainFunction = module.lookupSymbol<LLVM::LLVMFuncOp>(entryPoint); 275 if (!mainFunction || mainFunction.isExternal()) 276 return make_string_error("entry point not found"); 277 278 if (mainFunction.getType().cast<LLVM::LLVMFunctionType>().getNumParams() != 0) 279 return make_string_error("function inputs not supported"); 280 281 if (Error error = checkCompatibleReturnType<Type>(mainFunction)) 282 return error; 283 284 Type res; 285 struct { 286 void *data; 287 } data; 288 data.data = &res; 289 if (auto error = compileAndExecute(options, module, entryPoint, config, 290 (void **)&data)) 291 return error; 292 293 // Intentional printing of the output so we can test. 294 llvm::outs() << res << '\n'; 295 296 return Error::success(); 297 } 298 299 /// Entry point for all CPU runners. Expects the common argc/argv arguments for 300 /// standard C++ main functions. 301 int mlir::JitRunnerMain(int argc, char **argv, const DialectRegistry ®istry, 302 JitRunnerConfig config) { 303 // Create the options struct containing the command line options for the 304 // runner. This must come before the command line options are parsed. 305 Options options; 306 llvm::cl::ParseCommandLineOptions(argc, argv, "MLIR CPU execution driver\n"); 307 308 Optional<unsigned> optLevel = getCommandLineOptLevel(options); 309 SmallVector<std::reference_wrapper<llvm::cl::opt<bool>>, 4> optFlags{ 310 options.optO0, options.optO1, options.optO2, options.optO3}; 311 unsigned optCLIPosition = 0; 312 // Determine if there is an optimization flag present, and its CLI position 313 // (optCLIPosition). 314 for (unsigned j = 0; j < 4; ++j) { 315 auto &flag = optFlags[j].get(); 316 if (flag) { 317 optCLIPosition = flag.getPosition(); 318 break; 319 } 320 } 321 // Generate vector of pass information, plus the index at which we should 322 // insert any optimization passes in that vector (optPosition). 323 SmallVector<const llvm::PassInfo *, 4> passes; 324 unsigned optPosition = 0; 325 for (unsigned i = 0, e = options.llvmPasses.size(); i < e; ++i) { 326 passes.push_back(options.llvmPasses[i]); 327 if (optCLIPosition < options.llvmPasses.getPosition(i)) { 328 optPosition = i; 329 optCLIPosition = UINT_MAX; // To ensure we never insert again 330 } 331 } 332 333 MLIRContext context(registry); 334 335 auto m = parseMLIRInput(options.inputFilename, &context); 336 if (!m) { 337 llvm::errs() << "could not parse the input IR\n"; 338 return 1; 339 } 340 341 if (config.mlirTransformer) 342 if (failed(config.mlirTransformer(m.get()))) 343 return EXIT_FAILURE; 344 345 auto tmBuilderOrError = llvm::orc::JITTargetMachineBuilder::detectHost(); 346 if (!tmBuilderOrError) { 347 llvm::errs() << "Failed to create a JITTargetMachineBuilder for the host\n"; 348 return EXIT_FAILURE; 349 } 350 auto tmOrError = tmBuilderOrError->createTargetMachine(); 351 if (!tmOrError) { 352 llvm::errs() << "Failed to create a TargetMachine for the host\n"; 353 return EXIT_FAILURE; 354 } 355 356 auto transformer = mlir::makeLLVMPassesTransformer( 357 passes, optLevel, /*targetMachine=*/tmOrError->get(), optPosition); 358 359 CompileAndExecuteConfig compileAndExecuteConfig; 360 compileAndExecuteConfig.transformer = transformer; 361 compileAndExecuteConfig.llvmModuleBuilder = config.llvmModuleBuilder; 362 compileAndExecuteConfig.runtimeSymbolMap = config.runtimesymbolMap; 363 364 // Get the function used to compile and execute the module. 365 using CompileAndExecuteFnT = 366 Error (*)(Options &, ModuleOp, StringRef, CompileAndExecuteConfig); 367 auto compileAndExecuteFn = 368 StringSwitch<CompileAndExecuteFnT>(options.mainFuncType.getValue()) 369 .Case("i32", compileAndExecuteSingleReturnFunction<int32_t>) 370 .Case("i64", compileAndExecuteSingleReturnFunction<int64_t>) 371 .Case("f32", compileAndExecuteSingleReturnFunction<float>) 372 .Case("void", compileAndExecuteVoidFunction) 373 .Default(nullptr); 374 375 Error error = compileAndExecuteFn 376 ? compileAndExecuteFn(options, m.get(), 377 options.mainFuncName.getValue(), 378 compileAndExecuteConfig) 379 : make_string_error("unsupported function type"); 380 381 int exitCode = EXIT_SUCCESS; 382 llvm::handleAllErrors(std::move(error), 383 [&exitCode](const llvm::ErrorInfoBase &info) { 384 llvm::errs() << "Error: "; 385 info.log(llvm::errs()); 386 llvm::errs() << '\n'; 387 exitCode = EXIT_FAILURE; 388 }); 389 390 return exitCode; 391 } 392