1 //===- SPIRVConversion.cpp - SPIR-V Conversion Utilities ------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements utilities used to lower to SPIR-V dialect.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "mlir/Dialect/SPIRV/Transforms/SPIRVConversion.h"
14 #include "mlir/Dialect/SPIRV/IR/SPIRVDialect.h"
15 #include "mlir/Dialect/SPIRV/IR/SPIRVOps.h"
16 #include "mlir/Transforms/DialectConversion.h"
17 #include "llvm/ADT/Sequence.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/Support/Debug.h"
20 
21 #include <functional>
22 
23 #define DEBUG_TYPE "mlir-spirv-conversion"
24 
25 using namespace mlir;
26 
27 //===----------------------------------------------------------------------===//
28 // Utility functions
29 //===----------------------------------------------------------------------===//
30 
31 /// Checks that `candidates` extension requirements are possible to be satisfied
32 /// with the given `targetEnv`.
33 ///
34 ///  `candidates` is a vector of vector for extension requirements following
35 /// ((Extension::A OR Extension::B) AND (Extension::C OR Extension::D))
36 /// convention.
37 template <typename LabelT>
38 static LogicalResult checkExtensionRequirements(
39     LabelT label, const spirv::TargetEnv &targetEnv,
40     const spirv::SPIRVType::ExtensionArrayRefVector &candidates) {
41   for (const auto &ors : candidates) {
42     if (targetEnv.allows(ors))
43       continue;
44 
45     SmallVector<StringRef, 4> extStrings;
46     for (spirv::Extension ext : ors)
47       extStrings.push_back(spirv::stringifyExtension(ext));
48 
49     LLVM_DEBUG(llvm::dbgs()
50                << label << " illegal: requires at least one extension in ["
51                << llvm::join(extStrings, ", ")
52                << "] but none allowed in target environment\n");
53     return failure();
54   }
55   return success();
56 }
57 
58 /// Checks that `candidates`capability requirements are possible to be satisfied
59 /// with the given `isAllowedFn`.
60 ///
61 ///  `candidates` is a vector of vector for capability requirements following
62 /// ((Capability::A OR Capability::B) AND (Capability::C OR Capability::D))
63 /// convention.
64 template <typename LabelT>
65 static LogicalResult checkCapabilityRequirements(
66     LabelT label, const spirv::TargetEnv &targetEnv,
67     const spirv::SPIRVType::CapabilityArrayRefVector &candidates) {
68   for (const auto &ors : candidates) {
69     if (targetEnv.allows(ors))
70       continue;
71 
72     SmallVector<StringRef, 4> capStrings;
73     for (spirv::Capability cap : ors)
74       capStrings.push_back(spirv::stringifyCapability(cap));
75 
76     LLVM_DEBUG(llvm::dbgs()
77                << label << " illegal: requires at least one capability in ["
78                << llvm::join(capStrings, ", ")
79                << "] but none allowed in target environment\n");
80     return failure();
81   }
82   return success();
83 }
84 
85 //===----------------------------------------------------------------------===//
86 // Type Conversion
87 //===----------------------------------------------------------------------===//
88 
89 Type SPIRVTypeConverter::getIndexType(MLIRContext *context) {
90   // Convert to 32-bit integers for now. Might need a way to control this in
91   // future.
92   // TODO: It is probably better to make it 64-bit integers. To
93   // this some support is needed in SPIR-V dialect for Conversion
94   // instructions. The Vulkan spec requires the builtins like
95   // GlobalInvocationID, etc. to be 32-bit (unsigned) integers which should be
96   // SExtended to 64-bit for index computations.
97   return IntegerType::get(context, 32);
98 }
99 
100 /// Mapping between SPIR-V storage classes to memref memory spaces.
101 ///
102 /// Note: memref does not have a defined semantics for each memory space; it
103 /// depends on the context where it is used. There are no particular reasons
104 /// behind the number assignments; we try to follow NVVM conventions and largely
105 /// give common storage classes a smaller number. The hope is use symbolic
106 /// memory space representation eventually after memref supports it.
107 // TODO: swap Generic and StorageBuffer assignment to be more akin
108 // to NVVM.
109 #define STORAGE_SPACE_MAP_LIST(MAP_FN)                                         \
110   MAP_FN(spirv::StorageClass::Generic, 1)                                      \
111   MAP_FN(spirv::StorageClass::StorageBuffer, 0)                                \
112   MAP_FN(spirv::StorageClass::Workgroup, 3)                                    \
113   MAP_FN(spirv::StorageClass::Uniform, 4)                                      \
114   MAP_FN(spirv::StorageClass::Private, 5)                                      \
115   MAP_FN(spirv::StorageClass::Function, 6)                                     \
116   MAP_FN(spirv::StorageClass::PushConstant, 7)                                 \
117   MAP_FN(spirv::StorageClass::UniformConstant, 8)                              \
118   MAP_FN(spirv::StorageClass::Input, 9)                                        \
119   MAP_FN(spirv::StorageClass::Output, 10)                                      \
120   MAP_FN(spirv::StorageClass::CrossWorkgroup, 11)                              \
121   MAP_FN(spirv::StorageClass::AtomicCounter, 12)                               \
122   MAP_FN(spirv::StorageClass::Image, 13)                                       \
123   MAP_FN(spirv::StorageClass::CallableDataNV, 14)                              \
124   MAP_FN(spirv::StorageClass::IncomingCallableDataNV, 15)                      \
125   MAP_FN(spirv::StorageClass::RayPayloadNV, 16)                                \
126   MAP_FN(spirv::StorageClass::HitAttributeNV, 17)                              \
127   MAP_FN(spirv::StorageClass::IncomingRayPayloadNV, 18)                        \
128   MAP_FN(spirv::StorageClass::ShaderRecordBufferNV, 19)                        \
129   MAP_FN(spirv::StorageClass::PhysicalStorageBuffer, 20)
130 
131 unsigned
132 SPIRVTypeConverter::getMemorySpaceForStorageClass(spirv::StorageClass storage) {
133 #define STORAGE_SPACE_MAP_FN(storage, space)                                   \
134   case storage:                                                                \
135     return space;
136 
137   switch (storage) { STORAGE_SPACE_MAP_LIST(STORAGE_SPACE_MAP_FN) }
138 #undef STORAGE_SPACE_MAP_FN
139   llvm_unreachable("unhandled storage class!");
140 }
141 
142 Optional<spirv::StorageClass>
143 SPIRVTypeConverter::getStorageClassForMemorySpace(unsigned space) {
144 #define STORAGE_SPACE_MAP_FN(storage, space)                                   \
145   case space:                                                                  \
146     return storage;
147 
148   switch (space) {
149     STORAGE_SPACE_MAP_LIST(STORAGE_SPACE_MAP_FN)
150   default:
151     return llvm::None;
152   }
153 #undef STORAGE_SPACE_MAP_FN
154 }
155 
156 #undef STORAGE_SPACE_MAP_LIST
157 
158 // TODO: This is a utility function that should probably be
159 // exposed by the SPIR-V dialect. Keeping it local till the use case arises.
160 static Optional<int64_t> getTypeNumBytes(Type t) {
161   if (t.isa<spirv::ScalarType>()) {
162     auto bitWidth = t.getIntOrFloatBitWidth();
163     // According to the SPIR-V spec:
164     // "There is no physical size or bit pattern defined for values with boolean
165     // type. If they are stored (in conjunction with OpVariable), they can only
166     // be used with logical addressing operations, not physical, and only with
167     // non-externally visible shader Storage Classes: Workgroup, CrossWorkgroup,
168     // Private, Function, Input, and Output."
169     if (bitWidth == 1) {
170       return llvm::None;
171     }
172     return bitWidth / 8;
173   }
174   if (auto vecType = t.dyn_cast<VectorType>()) {
175     auto elementSize = getTypeNumBytes(vecType.getElementType());
176     if (!elementSize)
177       return llvm::None;
178     return vecType.getNumElements() * *elementSize;
179   }
180   if (auto memRefType = t.dyn_cast<MemRefType>()) {
181     // TODO: Layout should also be controlled by the ABI attributes. For now
182     // using the layout from MemRef.
183     int64_t offset;
184     SmallVector<int64_t, 4> strides;
185     if (!memRefType.hasStaticShape() ||
186         failed(getStridesAndOffset(memRefType, strides, offset))) {
187       return llvm::None;
188     }
189     // To get the size of the memref object in memory, the total size is the
190     // max(stride * dimension-size) computed for all dimensions times the size
191     // of the element.
192     auto elementSize = getTypeNumBytes(memRefType.getElementType());
193     if (!elementSize) {
194       return llvm::None;
195     }
196     if (memRefType.getRank() == 0) {
197       return elementSize;
198     }
199     auto dims = memRefType.getShape();
200     if (llvm::is_contained(dims, ShapedType::kDynamicSize) ||
201         offset == MemRefType::getDynamicStrideOrOffset() ||
202         llvm::is_contained(strides, MemRefType::getDynamicStrideOrOffset())) {
203       return llvm::None;
204     }
205     int64_t memrefSize = -1;
206     for (auto shape : enumerate(dims)) {
207       memrefSize = std::max(memrefSize, shape.value() * strides[shape.index()]);
208     }
209     return (offset + memrefSize) * elementSize.getValue();
210   } else if (auto tensorType = t.dyn_cast<TensorType>()) {
211     if (!tensorType.hasStaticShape()) {
212       return llvm::None;
213     }
214     auto elementSize = getTypeNumBytes(tensorType.getElementType());
215     if (!elementSize) {
216       return llvm::None;
217     }
218     int64_t size = elementSize.getValue();
219     for (auto shape : tensorType.getShape()) {
220       size *= shape;
221     }
222     return size;
223   }
224   // TODO: Add size computation for other types.
225   return llvm::None;
226 }
227 
228 Optional<int64_t> SPIRVTypeConverter::getConvertedTypeNumBytes(Type t) {
229   return getTypeNumBytes(t);
230 }
231 
232 /// Converts a scalar `type` to a suitable type under the given `targetEnv`.
233 static Optional<Type>
234 convertScalarType(const spirv::TargetEnv &targetEnv, spirv::ScalarType type,
235                   Optional<spirv::StorageClass> storageClass = {}) {
236   // Get extension and capability requirements for the given type.
237   SmallVector<ArrayRef<spirv::Extension>, 1> extensions;
238   SmallVector<ArrayRef<spirv::Capability>, 2> capabilities;
239   type.getExtensions(extensions, storageClass);
240   type.getCapabilities(capabilities, storageClass);
241 
242   // If all requirements are met, then we can accept this type as-is.
243   if (succeeded(checkCapabilityRequirements(type, targetEnv, capabilities)) &&
244       succeeded(checkExtensionRequirements(type, targetEnv, extensions)))
245     return type;
246 
247   // Otherwise we need to adjust the type, which really means adjusting the
248   // bitwidth given this is a scalar type.
249   // TODO: We are unconditionally converting the bitwidth here,
250   // this might be okay for non-interface types (i.e., types used in
251   // Private/Function storage classes), but not for interface types (i.e.,
252   // types used in StorageBuffer/Uniform/PushConstant/etc. storage classes).
253   // This is because the later actually affects the ABI contract with the
254   // runtime. So we may want to expose a control on SPIRVTypeConverter to fail
255   // conversion if we cannot change there.
256 
257   if (auto floatType = type.dyn_cast<FloatType>()) {
258     LLVM_DEBUG(llvm::dbgs() << type << " converted to 32-bit for SPIR-V\n");
259     return Builder(targetEnv.getContext()).getF32Type();
260   }
261 
262   auto intType = type.cast<IntegerType>();
263   LLVM_DEBUG(llvm::dbgs() << type << " converted to 32-bit for SPIR-V\n");
264   return IntegerType::get(targetEnv.getContext(), /*width=*/32,
265                           intType.getSignedness());
266 }
267 
268 /// Converts a vector `type` to a suitable type under the given `targetEnv`.
269 static Optional<Type>
270 convertVectorType(const spirv::TargetEnv &targetEnv, VectorType type,
271                   Optional<spirv::StorageClass> storageClass = {}) {
272   if (type.getRank() == 1 && type.getNumElements() == 1)
273     return type.getElementType();
274 
275   if (!spirv::CompositeType::isValid(type)) {
276     // TODO: Vector types with more than four elements can be translated into
277     // array types.
278     LLVM_DEBUG(llvm::dbgs() << type << " illegal: > 4-element unimplemented\n");
279     return llvm::None;
280   }
281 
282   // Get extension and capability requirements for the given type.
283   SmallVector<ArrayRef<spirv::Extension>, 1> extensions;
284   SmallVector<ArrayRef<spirv::Capability>, 2> capabilities;
285   type.cast<spirv::CompositeType>().getExtensions(extensions, storageClass);
286   type.cast<spirv::CompositeType>().getCapabilities(capabilities, storageClass);
287 
288   // If all requirements are met, then we can accept this type as-is.
289   if (succeeded(checkCapabilityRequirements(type, targetEnv, capabilities)) &&
290       succeeded(checkExtensionRequirements(type, targetEnv, extensions)))
291     return type;
292 
293   auto elementType = convertScalarType(
294       targetEnv, type.getElementType().cast<spirv::ScalarType>(), storageClass);
295   if (elementType)
296     return VectorType::get(type.getShape(), *elementType);
297   return llvm::None;
298 }
299 
300 /// Converts a tensor `type` to a suitable type under the given `targetEnv`.
301 ///
302 /// Note that this is mainly for lowering constant tensors.In SPIR-V one can
303 /// create composite constants with OpConstantComposite to embed relative large
304 /// constant values and use OpCompositeExtract and OpCompositeInsert to
305 /// manipulate, like what we do for vectors.
306 static Optional<Type> convertTensorType(const spirv::TargetEnv &targetEnv,
307                                         TensorType type) {
308   // TODO: Handle dynamic shapes.
309   if (!type.hasStaticShape()) {
310     LLVM_DEBUG(llvm::dbgs()
311                << type << " illegal: dynamic shape unimplemented\n");
312     return llvm::None;
313   }
314 
315   auto scalarType = type.getElementType().dyn_cast<spirv::ScalarType>();
316   if (!scalarType) {
317     LLVM_DEBUG(llvm::dbgs()
318                << type << " illegal: cannot convert non-scalar element type\n");
319     return llvm::None;
320   }
321 
322   Optional<int64_t> scalarSize = getTypeNumBytes(scalarType);
323   Optional<int64_t> tensorSize = getTypeNumBytes(type);
324   if (!scalarSize || !tensorSize) {
325     LLVM_DEBUG(llvm::dbgs()
326                << type << " illegal: cannot deduce element count\n");
327     return llvm::None;
328   }
329 
330   auto arrayElemCount = *tensorSize / *scalarSize;
331   auto arrayElemType = convertScalarType(targetEnv, scalarType);
332   if (!arrayElemType)
333     return llvm::None;
334   Optional<int64_t> arrayElemSize = getTypeNumBytes(*arrayElemType);
335   if (!arrayElemSize) {
336     LLVM_DEBUG(llvm::dbgs()
337                << type << " illegal: cannot deduce converted element size\n");
338     return llvm::None;
339   }
340 
341   return spirv::ArrayType::get(*arrayElemType, arrayElemCount, *arrayElemSize);
342 }
343 
344 static Optional<Type> convertMemrefType(const spirv::TargetEnv &targetEnv,
345                                         MemRefType type) {
346   Optional<spirv::StorageClass> storageClass =
347       SPIRVTypeConverter::getStorageClassForMemorySpace(
348           type.getMemorySpaceAsInt());
349   if (!storageClass) {
350     LLVM_DEBUG(llvm::dbgs()
351                << type << " illegal: cannot convert memory space\n");
352     return llvm::None;
353   }
354 
355   Optional<Type> arrayElemType;
356   Type elementType = type.getElementType();
357   if (auto vecType = elementType.dyn_cast<VectorType>()) {
358     arrayElemType = convertVectorType(targetEnv, vecType, storageClass);
359   } else if (auto scalarType = elementType.dyn_cast<spirv::ScalarType>()) {
360     arrayElemType = convertScalarType(targetEnv, scalarType, storageClass);
361   } else {
362     LLVM_DEBUG(
363         llvm::dbgs()
364         << type
365         << " unhandled: can only convert scalar or vector element type\n");
366     return llvm::None;
367   }
368   if (!arrayElemType)
369     return llvm::None;
370 
371   Optional<int64_t> elementSize = getTypeNumBytes(elementType);
372   if (!elementSize) {
373     LLVM_DEBUG(llvm::dbgs()
374                << type << " illegal: cannot deduce element size\n");
375     return llvm::None;
376   }
377 
378   if (!type.hasStaticShape()) {
379     auto arrayType = spirv::RuntimeArrayType::get(*arrayElemType, *elementSize);
380     // Wrap in a struct to satisfy Vulkan interface requirements.
381     auto structType = spirv::StructType::get(arrayType, 0);
382     return spirv::PointerType::get(structType, *storageClass);
383   }
384 
385   Optional<int64_t> memrefSize = getTypeNumBytes(type);
386   if (!memrefSize) {
387     LLVM_DEBUG(llvm::dbgs()
388                << type << " illegal: cannot deduce element count\n");
389     return llvm::None;
390   }
391 
392   auto arrayElemCount = *memrefSize / *elementSize;
393 
394   Optional<int64_t> arrayElemSize = getTypeNumBytes(*arrayElemType);
395   if (!arrayElemSize) {
396     LLVM_DEBUG(llvm::dbgs()
397                << type << " illegal: cannot deduce converted element size\n");
398     return llvm::None;
399   }
400 
401   auto arrayType =
402       spirv::ArrayType::get(*arrayElemType, arrayElemCount, *arrayElemSize);
403 
404   // Wrap in a struct to satisfy Vulkan interface requirements. Memrefs with
405   // workgroup storage class do not need the struct to be laid out explicitly.
406   auto structType = *storageClass == spirv::StorageClass::Workgroup
407                         ? spirv::StructType::get(arrayType)
408                         : spirv::StructType::get(arrayType, 0);
409   return spirv::PointerType::get(structType, *storageClass);
410 }
411 
412 SPIRVTypeConverter::SPIRVTypeConverter(spirv::TargetEnvAttr targetAttr)
413     : targetEnv(targetAttr) {
414   // Add conversions. The order matters here: later ones will be tried earlier.
415 
416   // All other cases failed. Then we cannot convert this type.
417   addConversion([](Type type) { return llvm::None; });
418 
419   // Allow all SPIR-V dialect specific types. This assumes all builtin types
420   // adopted in the SPIR-V dialect (i.e., IntegerType, FloatType, VectorType)
421   // were tried before.
422   //
423   // TODO: this assumes that the SPIR-V types are valid to use in
424   // the given target environment, which should be the case if the whole
425   // pipeline is driven by the same target environment. Still, we probably still
426   // want to validate and convert to be safe.
427   addConversion([](spirv::SPIRVType type) { return type; });
428 
429   addConversion([](IndexType indexType) {
430     return SPIRVTypeConverter::getIndexType(indexType.getContext());
431   });
432 
433   addConversion([this](IntegerType intType) -> Optional<Type> {
434     if (auto scalarType = intType.dyn_cast<spirv::ScalarType>())
435       return convertScalarType(targetEnv, scalarType);
436     return llvm::None;
437   });
438 
439   addConversion([this](FloatType floatType) -> Optional<Type> {
440     if (auto scalarType = floatType.dyn_cast<spirv::ScalarType>())
441       return convertScalarType(targetEnv, scalarType);
442     return llvm::None;
443   });
444 
445   addConversion([this](VectorType vectorType) {
446     return convertVectorType(targetEnv, vectorType);
447   });
448 
449   addConversion([this](TensorType tensorType) {
450     return convertTensorType(targetEnv, tensorType);
451   });
452 
453   addConversion([this](MemRefType memRefType) {
454     return convertMemrefType(targetEnv, memRefType);
455   });
456 }
457 
458 //===----------------------------------------------------------------------===//
459 // FuncOp Conversion Patterns
460 //===----------------------------------------------------------------------===//
461 
462 namespace {
463 /// A pattern for rewriting function signature to convert arguments of functions
464 /// to be of valid SPIR-V types.
465 class FuncOpConversion final : public OpConversionPattern<FuncOp> {
466 public:
467   using OpConversionPattern<FuncOp>::OpConversionPattern;
468 
469   LogicalResult
470   matchAndRewrite(FuncOp funcOp, ArrayRef<Value> operands,
471                   ConversionPatternRewriter &rewriter) const override;
472 };
473 } // namespace
474 
475 LogicalResult
476 FuncOpConversion::matchAndRewrite(FuncOp funcOp, ArrayRef<Value> operands,
477                                   ConversionPatternRewriter &rewriter) const {
478   auto fnType = funcOp.getType();
479   if (fnType.getNumResults() > 1)
480     return failure();
481 
482   TypeConverter::SignatureConversion signatureConverter(fnType.getNumInputs());
483   for (auto argType : enumerate(fnType.getInputs())) {
484     auto convertedType = getTypeConverter()->convertType(argType.value());
485     if (!convertedType)
486       return failure();
487     signatureConverter.addInputs(argType.index(), convertedType);
488   }
489 
490   Type resultType;
491   if (fnType.getNumResults() == 1)
492     resultType = getTypeConverter()->convertType(fnType.getResult(0));
493 
494   // Create the converted spv.func op.
495   auto newFuncOp = rewriter.create<spirv::FuncOp>(
496       funcOp.getLoc(), funcOp.getName(),
497       rewriter.getFunctionType(signatureConverter.getConvertedTypes(),
498                                resultType ? TypeRange(resultType)
499                                           : TypeRange()));
500 
501   // Copy over all attributes other than the function name and type.
502   for (const auto &namedAttr : funcOp->getAttrs()) {
503     if (namedAttr.first != impl::getTypeAttrName() &&
504         namedAttr.first != SymbolTable::getSymbolAttrName())
505       newFuncOp->setAttr(namedAttr.first, namedAttr.second);
506   }
507 
508   rewriter.inlineRegionBefore(funcOp.getBody(), newFuncOp.getBody(),
509                               newFuncOp.end());
510   if (failed(rewriter.convertRegionTypes(
511           &newFuncOp.getBody(), *getTypeConverter(), &signatureConverter)))
512     return failure();
513   rewriter.eraseOp(funcOp);
514   return success();
515 }
516 
517 void mlir::populateBuiltinFuncToSPIRVPatterns(
518     MLIRContext *context, SPIRVTypeConverter &typeConverter,
519     OwningRewritePatternList &patterns) {
520   patterns.insert<FuncOpConversion>(typeConverter, context);
521 }
522 
523 //===----------------------------------------------------------------------===//
524 // Builtin Variables
525 //===----------------------------------------------------------------------===//
526 
527 static spirv::GlobalVariableOp getBuiltinVariable(Block &body,
528                                                   spirv::BuiltIn builtin) {
529   // Look through all global variables in the given `body` block and check if
530   // there is a spv.GlobalVariable that has the same `builtin` attribute.
531   for (auto varOp : body.getOps<spirv::GlobalVariableOp>()) {
532     if (auto builtinAttr = varOp->getAttrOfType<StringAttr>(
533             spirv::SPIRVDialect::getAttributeName(
534                 spirv::Decoration::BuiltIn))) {
535       auto varBuiltIn = spirv::symbolizeBuiltIn(builtinAttr.getValue());
536       if (varBuiltIn && varBuiltIn.getValue() == builtin) {
537         return varOp;
538       }
539     }
540   }
541   return nullptr;
542 }
543 
544 /// Gets name of global variable for a builtin.
545 static std::string getBuiltinVarName(spirv::BuiltIn builtin) {
546   return std::string("__builtin_var_") + stringifyBuiltIn(builtin).str() + "__";
547 }
548 
549 /// Gets or inserts a global variable for a builtin within `body` block.
550 static spirv::GlobalVariableOp
551 getOrInsertBuiltinVariable(Block &body, Location loc, spirv::BuiltIn builtin,
552                            OpBuilder &builder) {
553   if (auto varOp = getBuiltinVariable(body, builtin))
554     return varOp;
555 
556   OpBuilder::InsertionGuard guard(builder);
557   builder.setInsertionPointToStart(&body);
558 
559   spirv::GlobalVariableOp newVarOp;
560   switch (builtin) {
561   case spirv::BuiltIn::NumWorkgroups:
562   case spirv::BuiltIn::WorkgroupSize:
563   case spirv::BuiltIn::WorkgroupId:
564   case spirv::BuiltIn::LocalInvocationId:
565   case spirv::BuiltIn::GlobalInvocationId: {
566     auto ptrType = spirv::PointerType::get(
567         VectorType::get({3}, builder.getIntegerType(32)),
568         spirv::StorageClass::Input);
569     std::string name = getBuiltinVarName(builtin);
570     newVarOp =
571         builder.create<spirv::GlobalVariableOp>(loc, ptrType, name, builtin);
572     break;
573   }
574   case spirv::BuiltIn::SubgroupId:
575   case spirv::BuiltIn::NumSubgroups:
576   case spirv::BuiltIn::SubgroupSize: {
577     auto ptrType = spirv::PointerType::get(builder.getIntegerType(32),
578                                            spirv::StorageClass::Input);
579     std::string name = getBuiltinVarName(builtin);
580     newVarOp =
581         builder.create<spirv::GlobalVariableOp>(loc, ptrType, name, builtin);
582     break;
583   }
584   default:
585     emitError(loc, "unimplemented builtin variable generation for ")
586         << stringifyBuiltIn(builtin);
587   }
588   return newVarOp;
589 }
590 
591 Value mlir::spirv::getBuiltinVariableValue(Operation *op,
592                                            spirv::BuiltIn builtin,
593                                            OpBuilder &builder) {
594   Operation *parent = SymbolTable::getNearestSymbolTable(op->getParentOp());
595   if (!parent) {
596     op->emitError("expected operation to be within a module-like op");
597     return nullptr;
598   }
599 
600   spirv::GlobalVariableOp varOp = getOrInsertBuiltinVariable(
601       *parent->getRegion(0).begin(), op->getLoc(), builtin, builder);
602   Value ptr = builder.create<spirv::AddressOfOp>(op->getLoc(), varOp);
603   return builder.create<spirv::LoadOp>(op->getLoc(), ptr);
604 }
605 
606 //===----------------------------------------------------------------------===//
607 // Index calculation
608 //===----------------------------------------------------------------------===//
609 
610 Value mlir::spirv::linearizeIndex(ValueRange indices, ArrayRef<int64_t> strides,
611                                   int64_t offset, Location loc,
612                                   OpBuilder &builder) {
613   assert(indices.size() == strides.size() &&
614          "must provide indices for all dimensions");
615 
616   auto indexType = SPIRVTypeConverter::getIndexType(builder.getContext());
617 
618   // TODO: Consider moving to use affine.apply and patterns converting
619   // affine.apply to standard ops. This needs converting to SPIR-V passes to be
620   // broken down into progressive small steps so we can have intermediate steps
621   // using other dialects. At the moment SPIR-V is the final sink.
622 
623   Value linearizedIndex = builder.create<spirv::ConstantOp>(
624       loc, indexType, IntegerAttr::get(indexType, offset));
625   for (auto index : llvm::enumerate(indices)) {
626     Value strideVal = builder.create<spirv::ConstantOp>(
627         loc, indexType, IntegerAttr::get(indexType, strides[index.index()]));
628     Value update = builder.create<spirv::IMulOp>(loc, strideVal, index.value());
629     linearizedIndex =
630         builder.create<spirv::IAddOp>(loc, linearizedIndex, update);
631   }
632   return linearizedIndex;
633 }
634 
635 spirv::AccessChainOp mlir::spirv::getElementPtr(
636     SPIRVTypeConverter &typeConverter, MemRefType baseType, Value basePtr,
637     ValueRange indices, Location loc, OpBuilder &builder) {
638   // Get base and offset of the MemRefType and verify they are static.
639 
640   int64_t offset;
641   SmallVector<int64_t, 4> strides;
642   if (failed(getStridesAndOffset(baseType, strides, offset)) ||
643       llvm::is_contained(strides, MemRefType::getDynamicStrideOrOffset()) ||
644       offset == MemRefType::getDynamicStrideOrOffset()) {
645     return nullptr;
646   }
647 
648   auto indexType = typeConverter.getIndexType(builder.getContext());
649 
650   SmallVector<Value, 2> linearizedIndices;
651   auto zero = spirv::ConstantOp::getZero(indexType, loc, builder);
652 
653   // Add a '0' at the start to index into the struct.
654   linearizedIndices.push_back(zero);
655 
656   if (baseType.getRank() == 0) {
657     linearizedIndices.push_back(zero);
658   } else {
659     linearizedIndices.push_back(
660         linearizeIndex(indices, strides, offset, loc, builder));
661   }
662   return builder.create<spirv::AccessChainOp>(loc, basePtr, linearizedIndices);
663 }
664 
665 //===----------------------------------------------------------------------===//
666 // Set ABI attributes for lowering entry functions.
667 //===----------------------------------------------------------------------===//
668 
669 LogicalResult
670 mlir::spirv::setABIAttrs(spirv::FuncOp funcOp,
671                          spirv::EntryPointABIAttr entryPointInfo,
672                          ArrayRef<spirv::InterfaceVarABIAttr> argABIInfo) {
673   // Set the attributes for argument and the function.
674   StringRef argABIAttrName = spirv::getInterfaceVarABIAttrName();
675   for (auto argIndex : llvm::seq<unsigned>(0, argABIInfo.size())) {
676     funcOp.setArgAttr(argIndex, argABIAttrName, argABIInfo[argIndex]);
677   }
678   funcOp->setAttr(spirv::getEntryPointABIAttrName(), entryPointInfo);
679   return success();
680 }
681 
682 //===----------------------------------------------------------------------===//
683 // SPIR-V ConversionTarget
684 //===----------------------------------------------------------------------===//
685 
686 std::unique_ptr<spirv::SPIRVConversionTarget>
687 spirv::SPIRVConversionTarget::get(spirv::TargetEnvAttr targetAttr) {
688   std::unique_ptr<SPIRVConversionTarget> target(
689       // std::make_unique does not work here because the constructor is private.
690       new SPIRVConversionTarget(targetAttr));
691   SPIRVConversionTarget *targetPtr = target.get();
692   target->addDynamicallyLegalDialect<SPIRVDialect>(
693       // We need to capture the raw pointer here because it is stable:
694       // target will be destroyed once this function is returned.
695       [targetPtr](Operation *op) { return targetPtr->isLegalOp(op); });
696   return target;
697 }
698 
699 spirv::SPIRVConversionTarget::SPIRVConversionTarget(
700     spirv::TargetEnvAttr targetAttr)
701     : ConversionTarget(*targetAttr.getContext()), targetEnv(targetAttr) {}
702 
703 bool spirv::SPIRVConversionTarget::isLegalOp(Operation *op) {
704   // Make sure this op is available at the given version. Ops not implementing
705   // QueryMinVersionInterface/QueryMaxVersionInterface are available to all
706   // SPIR-V versions.
707   if (auto minVersion = dyn_cast<spirv::QueryMinVersionInterface>(op))
708     if (minVersion.getMinVersion() > this->targetEnv.getVersion()) {
709       LLVM_DEBUG(llvm::dbgs()
710                  << op->getName() << " illegal: requiring min version "
711                  << spirv::stringifyVersion(minVersion.getMinVersion())
712                  << "\n");
713       return false;
714     }
715   if (auto maxVersion = dyn_cast<spirv::QueryMaxVersionInterface>(op))
716     if (maxVersion.getMaxVersion() < this->targetEnv.getVersion()) {
717       LLVM_DEBUG(llvm::dbgs()
718                  << op->getName() << " illegal: requiring max version "
719                  << spirv::stringifyVersion(maxVersion.getMaxVersion())
720                  << "\n");
721       return false;
722     }
723 
724   // Make sure this op's required extensions are allowed to use. Ops not
725   // implementing QueryExtensionInterface do not require extensions to be
726   // available.
727   if (auto extensions = dyn_cast<spirv::QueryExtensionInterface>(op))
728     if (failed(checkExtensionRequirements(op->getName(), this->targetEnv,
729                                           extensions.getExtensions())))
730       return false;
731 
732   // Make sure this op's required extensions are allowed to use. Ops not
733   // implementing QueryCapabilityInterface do not require capabilities to be
734   // available.
735   if (auto capabilities = dyn_cast<spirv::QueryCapabilityInterface>(op))
736     if (failed(checkCapabilityRequirements(op->getName(), this->targetEnv,
737                                            capabilities.getCapabilities())))
738       return false;
739 
740   SmallVector<Type, 4> valueTypes;
741   valueTypes.append(op->operand_type_begin(), op->operand_type_end());
742   valueTypes.append(op->result_type_begin(), op->result_type_end());
743 
744   // Special treatment for global variables, whose type requirements are
745   // conveyed by type attributes.
746   if (auto globalVar = dyn_cast<spirv::GlobalVariableOp>(op))
747     valueTypes.push_back(globalVar.type());
748 
749   // Make sure the op's operands/results use types that are allowed by the
750   // target environment.
751   SmallVector<ArrayRef<spirv::Extension>, 4> typeExtensions;
752   SmallVector<ArrayRef<spirv::Capability>, 8> typeCapabilities;
753   for (Type valueType : valueTypes) {
754     typeExtensions.clear();
755     valueType.cast<spirv::SPIRVType>().getExtensions(typeExtensions);
756     if (failed(checkExtensionRequirements(op->getName(), this->targetEnv,
757                                           typeExtensions)))
758       return false;
759 
760     typeCapabilities.clear();
761     valueType.cast<spirv::SPIRVType>().getCapabilities(typeCapabilities);
762     if (failed(checkCapabilityRequirements(op->getName(), this->targetEnv,
763                                            typeCapabilities)))
764       return false;
765   }
766 
767   return true;
768 }
769