1 //===- SPIRVConversion.cpp - SPIR-V Conversion Utilities ------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements utilities used to lower to SPIR-V dialect.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "mlir/Dialect/SPIRV/Transforms/SPIRVConversion.h"
14 #include "mlir/Dialect/SPIRV/IR/SPIRVDialect.h"
15 #include "mlir/Dialect/SPIRV/IR/SPIRVOps.h"
16 #include "mlir/Transforms/DialectConversion.h"
17 #include "llvm/ADT/Sequence.h"
18 #include "llvm/ADT/StringExtras.h"
19 #include "llvm/Support/Debug.h"
20 
21 #include <functional>
22 
23 #define DEBUG_TYPE "mlir-spirv-conversion"
24 
25 using namespace mlir;
26 
27 //===----------------------------------------------------------------------===//
28 // Utility functions
29 //===----------------------------------------------------------------------===//
30 
31 /// Checks that `candidates` extension requirements are possible to be satisfied
32 /// with the given `targetEnv`.
33 ///
34 ///  `candidates` is a vector of vector for extension requirements following
35 /// ((Extension::A OR Extension::B) AND (Extension::C OR Extension::D))
36 /// convention.
37 template <typename LabelT>
38 static LogicalResult checkExtensionRequirements(
39     LabelT label, const spirv::TargetEnv &targetEnv,
40     const spirv::SPIRVType::ExtensionArrayRefVector &candidates) {
41   for (const auto &ors : candidates) {
42     if (targetEnv.allows(ors))
43       continue;
44 
45     SmallVector<StringRef, 4> extStrings;
46     for (spirv::Extension ext : ors)
47       extStrings.push_back(spirv::stringifyExtension(ext));
48 
49     LLVM_DEBUG(llvm::dbgs()
50                << label << " illegal: requires at least one extension in ["
51                << llvm::join(extStrings, ", ")
52                << "] but none allowed in target environment\n");
53     return failure();
54   }
55   return success();
56 }
57 
58 /// Checks that `candidates`capability requirements are possible to be satisfied
59 /// with the given `isAllowedFn`.
60 ///
61 ///  `candidates` is a vector of vector for capability requirements following
62 /// ((Capability::A OR Capability::B) AND (Capability::C OR Capability::D))
63 /// convention.
64 template <typename LabelT>
65 static LogicalResult checkCapabilityRequirements(
66     LabelT label, const spirv::TargetEnv &targetEnv,
67     const spirv::SPIRVType::CapabilityArrayRefVector &candidates) {
68   for (const auto &ors : candidates) {
69     if (targetEnv.allows(ors))
70       continue;
71 
72     SmallVector<StringRef, 4> capStrings;
73     for (spirv::Capability cap : ors)
74       capStrings.push_back(spirv::stringifyCapability(cap));
75 
76     LLVM_DEBUG(llvm::dbgs()
77                << label << " illegal: requires at least one capability in ["
78                << llvm::join(capStrings, ", ")
79                << "] but none allowed in target environment\n");
80     return failure();
81   }
82   return success();
83 }
84 
85 //===----------------------------------------------------------------------===//
86 // Type Conversion
87 //===----------------------------------------------------------------------===//
88 
89 Type SPIRVTypeConverter::getIndexType(MLIRContext *context) {
90   // Convert to 32-bit integers for now. Might need a way to control this in
91   // future.
92   // TODO: It is probably better to make it 64-bit integers. To
93   // this some support is needed in SPIR-V dialect for Conversion
94   // instructions. The Vulkan spec requires the builtins like
95   // GlobalInvocationID, etc. to be 32-bit (unsigned) integers which should be
96   // SExtended to 64-bit for index computations.
97   return IntegerType::get(context, 32);
98 }
99 
100 /// Mapping between SPIR-V storage classes to memref memory spaces.
101 ///
102 /// Note: memref does not have a defined semantics for each memory space; it
103 /// depends on the context where it is used. There are no particular reasons
104 /// behind the number assignments; we try to follow NVVM conventions and largely
105 /// give common storage classes a smaller number. The hope is use symbolic
106 /// memory space representation eventually after memref supports it.
107 // TODO: swap Generic and StorageBuffer assignment to be more akin
108 // to NVVM.
109 #define STORAGE_SPACE_MAP_LIST(MAP_FN)                                         \
110   MAP_FN(spirv::StorageClass::Generic, 1)                                      \
111   MAP_FN(spirv::StorageClass::StorageBuffer, 0)                                \
112   MAP_FN(spirv::StorageClass::Workgroup, 3)                                    \
113   MAP_FN(spirv::StorageClass::Uniform, 4)                                      \
114   MAP_FN(spirv::StorageClass::Private, 5)                                      \
115   MAP_FN(spirv::StorageClass::Function, 6)                                     \
116   MAP_FN(spirv::StorageClass::PushConstant, 7)                                 \
117   MAP_FN(spirv::StorageClass::UniformConstant, 8)                              \
118   MAP_FN(spirv::StorageClass::Input, 9)                                        \
119   MAP_FN(spirv::StorageClass::Output, 10)                                      \
120   MAP_FN(spirv::StorageClass::CrossWorkgroup, 11)                              \
121   MAP_FN(spirv::StorageClass::AtomicCounter, 12)                               \
122   MAP_FN(spirv::StorageClass::Image, 13)                                       \
123   MAP_FN(spirv::StorageClass::CallableDataNV, 14)                              \
124   MAP_FN(spirv::StorageClass::IncomingCallableDataNV, 15)                      \
125   MAP_FN(spirv::StorageClass::RayPayloadNV, 16)                                \
126   MAP_FN(spirv::StorageClass::HitAttributeNV, 17)                              \
127   MAP_FN(spirv::StorageClass::IncomingRayPayloadNV, 18)                        \
128   MAP_FN(spirv::StorageClass::ShaderRecordBufferNV, 19)                        \
129   MAP_FN(spirv::StorageClass::PhysicalStorageBuffer, 20)
130 
131 unsigned
132 SPIRVTypeConverter::getMemorySpaceForStorageClass(spirv::StorageClass storage) {
133 #define STORAGE_SPACE_MAP_FN(storage, space)                                   \
134   case storage:                                                                \
135     return space;
136 
137   switch (storage) { STORAGE_SPACE_MAP_LIST(STORAGE_SPACE_MAP_FN) }
138 #undef STORAGE_SPACE_MAP_FN
139   llvm_unreachable("unhandled storage class!");
140 }
141 
142 Optional<spirv::StorageClass>
143 SPIRVTypeConverter::getStorageClassForMemorySpace(unsigned space) {
144 #define STORAGE_SPACE_MAP_FN(storage, space)                                   \
145   case space:                                                                  \
146     return storage;
147 
148   switch (space) {
149     STORAGE_SPACE_MAP_LIST(STORAGE_SPACE_MAP_FN)
150   default:
151     return llvm::None;
152   }
153 #undef STORAGE_SPACE_MAP_FN
154 }
155 
156 #undef STORAGE_SPACE_MAP_LIST
157 
158 // TODO: This is a utility function that should probably be
159 // exposed by the SPIR-V dialect. Keeping it local till the use case arises.
160 static Optional<int64_t> getTypeNumBytes(Type t) {
161   if (t.isa<spirv::ScalarType>()) {
162     auto bitWidth = t.getIntOrFloatBitWidth();
163     // According to the SPIR-V spec:
164     // "There is no physical size or bit pattern defined for values with boolean
165     // type. If they are stored (in conjunction with OpVariable), they can only
166     // be used with logical addressing operations, not physical, and only with
167     // non-externally visible shader Storage Classes: Workgroup, CrossWorkgroup,
168     // Private, Function, Input, and Output."
169     if (bitWidth == 1) {
170       return llvm::None;
171     }
172     return bitWidth / 8;
173   }
174   if (auto vecType = t.dyn_cast<VectorType>()) {
175     auto elementSize = getTypeNumBytes(vecType.getElementType());
176     if (!elementSize)
177       return llvm::None;
178     return vecType.getNumElements() * *elementSize;
179   }
180   if (auto memRefType = t.dyn_cast<MemRefType>()) {
181     // TODO: Layout should also be controlled by the ABI attributes. For now
182     // using the layout from MemRef.
183     int64_t offset;
184     SmallVector<int64_t, 4> strides;
185     if (!memRefType.hasStaticShape() ||
186         failed(getStridesAndOffset(memRefType, strides, offset))) {
187       return llvm::None;
188     }
189     // To get the size of the memref object in memory, the total size is the
190     // max(stride * dimension-size) computed for all dimensions times the size
191     // of the element.
192     auto elementSize = getTypeNumBytes(memRefType.getElementType());
193     if (!elementSize) {
194       return llvm::None;
195     }
196     if (memRefType.getRank() == 0) {
197       return elementSize;
198     }
199     auto dims = memRefType.getShape();
200     if (llvm::is_contained(dims, ShapedType::kDynamicSize) ||
201         offset == MemRefType::getDynamicStrideOrOffset() ||
202         llvm::is_contained(strides, MemRefType::getDynamicStrideOrOffset())) {
203       return llvm::None;
204     }
205     int64_t memrefSize = -1;
206     for (auto shape : enumerate(dims)) {
207       memrefSize = std::max(memrefSize, shape.value() * strides[shape.index()]);
208     }
209     return (offset + memrefSize) * elementSize.getValue();
210   } else if (auto tensorType = t.dyn_cast<TensorType>()) {
211     if (!tensorType.hasStaticShape()) {
212       return llvm::None;
213     }
214     auto elementSize = getTypeNumBytes(tensorType.getElementType());
215     if (!elementSize) {
216       return llvm::None;
217     }
218     int64_t size = elementSize.getValue();
219     for (auto shape : tensorType.getShape()) {
220       size *= shape;
221     }
222     return size;
223   }
224   // TODO: Add size computation for other types.
225   return llvm::None;
226 }
227 
228 Optional<int64_t> SPIRVTypeConverter::getConvertedTypeNumBytes(Type t) {
229   return getTypeNumBytes(t);
230 }
231 
232 /// Converts a scalar `type` to a suitable type under the given `targetEnv`.
233 static Optional<Type>
234 convertScalarType(const spirv::TargetEnv &targetEnv, spirv::ScalarType type,
235                   Optional<spirv::StorageClass> storageClass = {}) {
236   // Get extension and capability requirements for the given type.
237   SmallVector<ArrayRef<spirv::Extension>, 1> extensions;
238   SmallVector<ArrayRef<spirv::Capability>, 2> capabilities;
239   type.getExtensions(extensions, storageClass);
240   type.getCapabilities(capabilities, storageClass);
241 
242   // If all requirements are met, then we can accept this type as-is.
243   if (succeeded(checkCapabilityRequirements(type, targetEnv, capabilities)) &&
244       succeeded(checkExtensionRequirements(type, targetEnv, extensions)))
245     return type;
246 
247   // Otherwise we need to adjust the type, which really means adjusting the
248   // bitwidth given this is a scalar type.
249   // TODO: We are unconditionally converting the bitwidth here,
250   // this might be okay for non-interface types (i.e., types used in
251   // Private/Function storage classes), but not for interface types (i.e.,
252   // types used in StorageBuffer/Uniform/PushConstant/etc. storage classes).
253   // This is because the later actually affects the ABI contract with the
254   // runtime. So we may want to expose a control on SPIRVTypeConverter to fail
255   // conversion if we cannot change there.
256 
257   if (auto floatType = type.dyn_cast<FloatType>()) {
258     LLVM_DEBUG(llvm::dbgs() << type << " converted to 32-bit for SPIR-V\n");
259     return Builder(targetEnv.getContext()).getF32Type();
260   }
261 
262   auto intType = type.cast<IntegerType>();
263   LLVM_DEBUG(llvm::dbgs() << type << " converted to 32-bit for SPIR-V\n");
264   return IntegerType::get(targetEnv.getContext(), /*width=*/32,
265                           intType.getSignedness());
266 }
267 
268 /// Converts a vector `type` to a suitable type under the given `targetEnv`.
269 static Optional<Type>
270 convertVectorType(const spirv::TargetEnv &targetEnv, VectorType type,
271                   Optional<spirv::StorageClass> storageClass = {}) {
272   if (type.getRank() == 1 && type.getNumElements() == 1)
273     return type.getElementType();
274 
275   if (!spirv::CompositeType::isValid(type)) {
276     // TODO: Vector types with more than four elements can be translated into
277     // array types.
278     LLVM_DEBUG(llvm::dbgs() << type << " illegal: > 4-element unimplemented\n");
279     return llvm::None;
280   }
281 
282   // Get extension and capability requirements for the given type.
283   SmallVector<ArrayRef<spirv::Extension>, 1> extensions;
284   SmallVector<ArrayRef<spirv::Capability>, 2> capabilities;
285   type.cast<spirv::CompositeType>().getExtensions(extensions, storageClass);
286   type.cast<spirv::CompositeType>().getCapabilities(capabilities, storageClass);
287 
288   // If all requirements are met, then we can accept this type as-is.
289   if (succeeded(checkCapabilityRequirements(type, targetEnv, capabilities)) &&
290       succeeded(checkExtensionRequirements(type, targetEnv, extensions)))
291     return type;
292 
293   auto elementType = convertScalarType(
294       targetEnv, type.getElementType().cast<spirv::ScalarType>(), storageClass);
295   if (elementType)
296     return VectorType::get(type.getShape(), *elementType);
297   return llvm::None;
298 }
299 
300 /// Converts a tensor `type` to a suitable type under the given `targetEnv`.
301 ///
302 /// Note that this is mainly for lowering constant tensors.In SPIR-V one can
303 /// create composite constants with OpConstantComposite to embed relative large
304 /// constant values and use OpCompositeExtract and OpCompositeInsert to
305 /// manipulate, like what we do for vectors.
306 static Optional<Type> convertTensorType(const spirv::TargetEnv &targetEnv,
307                                         TensorType type) {
308   // TODO: Handle dynamic shapes.
309   if (!type.hasStaticShape()) {
310     LLVM_DEBUG(llvm::dbgs()
311                << type << " illegal: dynamic shape unimplemented\n");
312     return llvm::None;
313   }
314 
315   auto scalarType = type.getElementType().dyn_cast<spirv::ScalarType>();
316   if (!scalarType) {
317     LLVM_DEBUG(llvm::dbgs()
318                << type << " illegal: cannot convert non-scalar element type\n");
319     return llvm::None;
320   }
321 
322   Optional<int64_t> scalarSize = getTypeNumBytes(scalarType);
323   Optional<int64_t> tensorSize = getTypeNumBytes(type);
324   if (!scalarSize || !tensorSize) {
325     LLVM_DEBUG(llvm::dbgs()
326                << type << " illegal: cannot deduce element count\n");
327     return llvm::None;
328   }
329 
330   auto arrayElemCount = *tensorSize / *scalarSize;
331   auto arrayElemType = convertScalarType(targetEnv, scalarType);
332   if (!arrayElemType)
333     return llvm::None;
334   Optional<int64_t> arrayElemSize = getTypeNumBytes(*arrayElemType);
335   if (!arrayElemSize) {
336     LLVM_DEBUG(llvm::dbgs()
337                << type << " illegal: cannot deduce converted element size\n");
338     return llvm::None;
339   }
340 
341   return spirv::ArrayType::get(*arrayElemType, arrayElemCount, *arrayElemSize);
342 }
343 
344 static Optional<Type> convertMemrefType(const spirv::TargetEnv &targetEnv,
345                                         MemRefType type) {
346   Optional<spirv::StorageClass> storageClass =
347       SPIRVTypeConverter::getStorageClassForMemorySpace(type.getMemorySpace());
348   if (!storageClass) {
349     LLVM_DEBUG(llvm::dbgs()
350                << type << " illegal: cannot convert memory space\n");
351     return llvm::None;
352   }
353 
354   Optional<Type> arrayElemType;
355   Type elementType = type.getElementType();
356   if (auto vecType = elementType.dyn_cast<VectorType>()) {
357     arrayElemType = convertVectorType(targetEnv, vecType, storageClass);
358   } else if (auto scalarType = elementType.dyn_cast<spirv::ScalarType>()) {
359     arrayElemType = convertScalarType(targetEnv, scalarType, storageClass);
360   } else {
361     LLVM_DEBUG(
362         llvm::dbgs()
363         << type
364         << " unhandled: can only convert scalar or vector element type\n");
365     return llvm::None;
366   }
367   if (!arrayElemType)
368     return llvm::None;
369 
370   Optional<int64_t> elementSize = getTypeNumBytes(elementType);
371   if (!elementSize) {
372     LLVM_DEBUG(llvm::dbgs()
373                << type << " illegal: cannot deduce element size\n");
374     return llvm::None;
375   }
376 
377   if (!type.hasStaticShape()) {
378     auto arrayType = spirv::RuntimeArrayType::get(*arrayElemType, *elementSize);
379     // Wrap in a struct to satisfy Vulkan interface requirements.
380     auto structType = spirv::StructType::get(arrayType, 0);
381     return spirv::PointerType::get(structType, *storageClass);
382   }
383 
384   Optional<int64_t> memrefSize = getTypeNumBytes(type);
385   if (!memrefSize) {
386     LLVM_DEBUG(llvm::dbgs()
387                << type << " illegal: cannot deduce element count\n");
388     return llvm::None;
389   }
390 
391   auto arrayElemCount = *memrefSize / *elementSize;
392 
393   Optional<int64_t> arrayElemSize = getTypeNumBytes(*arrayElemType);
394   if (!arrayElemSize) {
395     LLVM_DEBUG(llvm::dbgs()
396                << type << " illegal: cannot deduce converted element size\n");
397     return llvm::None;
398   }
399 
400   auto arrayType =
401       spirv::ArrayType::get(*arrayElemType, arrayElemCount, *arrayElemSize);
402 
403   // Wrap in a struct to satisfy Vulkan interface requirements. Memrefs with
404   // workgroup storage class do not need the struct to be laid out explicitly.
405   auto structType = *storageClass == spirv::StorageClass::Workgroup
406                         ? spirv::StructType::get(arrayType)
407                         : spirv::StructType::get(arrayType, 0);
408   return spirv::PointerType::get(structType, *storageClass);
409 }
410 
411 SPIRVTypeConverter::SPIRVTypeConverter(spirv::TargetEnvAttr targetAttr)
412     : targetEnv(targetAttr) {
413   // Add conversions. The order matters here: later ones will be tried earlier.
414 
415   // All other cases failed. Then we cannot convert this type.
416   addConversion([](Type type) { return llvm::None; });
417 
418   // Allow all SPIR-V dialect specific types. This assumes all builtin types
419   // adopted in the SPIR-V dialect (i.e., IntegerType, FloatType, VectorType)
420   // were tried before.
421   //
422   // TODO: this assumes that the SPIR-V types are valid to use in
423   // the given target environment, which should be the case if the whole
424   // pipeline is driven by the same target environment. Still, we probably still
425   // want to validate and convert to be safe.
426   addConversion([](spirv::SPIRVType type) { return type; });
427 
428   addConversion([](IndexType indexType) {
429     return SPIRVTypeConverter::getIndexType(indexType.getContext());
430   });
431 
432   addConversion([this](IntegerType intType) -> Optional<Type> {
433     if (auto scalarType = intType.dyn_cast<spirv::ScalarType>())
434       return convertScalarType(targetEnv, scalarType);
435     return llvm::None;
436   });
437 
438   addConversion([this](FloatType floatType) -> Optional<Type> {
439     if (auto scalarType = floatType.dyn_cast<spirv::ScalarType>())
440       return convertScalarType(targetEnv, scalarType);
441     return llvm::None;
442   });
443 
444   addConversion([this](VectorType vectorType) {
445     return convertVectorType(targetEnv, vectorType);
446   });
447 
448   addConversion([this](TensorType tensorType) {
449     return convertTensorType(targetEnv, tensorType);
450   });
451 
452   addConversion([this](MemRefType memRefType) {
453     return convertMemrefType(targetEnv, memRefType);
454   });
455 }
456 
457 //===----------------------------------------------------------------------===//
458 // FuncOp Conversion Patterns
459 //===----------------------------------------------------------------------===//
460 
461 namespace {
462 /// A pattern for rewriting function signature to convert arguments of functions
463 /// to be of valid SPIR-V types.
464 class FuncOpConversion final : public OpConversionPattern<FuncOp> {
465 public:
466   using OpConversionPattern<FuncOp>::OpConversionPattern;
467 
468   LogicalResult
469   matchAndRewrite(FuncOp funcOp, ArrayRef<Value> operands,
470                   ConversionPatternRewriter &rewriter) const override;
471 };
472 } // namespace
473 
474 LogicalResult
475 FuncOpConversion::matchAndRewrite(FuncOp funcOp, ArrayRef<Value> operands,
476                                   ConversionPatternRewriter &rewriter) const {
477   auto fnType = funcOp.getType();
478   if (fnType.getNumResults() > 1)
479     return failure();
480 
481   TypeConverter::SignatureConversion signatureConverter(fnType.getNumInputs());
482   for (auto argType : enumerate(fnType.getInputs())) {
483     auto convertedType = getTypeConverter()->convertType(argType.value());
484     if (!convertedType)
485       return failure();
486     signatureConverter.addInputs(argType.index(), convertedType);
487   }
488 
489   Type resultType;
490   if (fnType.getNumResults() == 1)
491     resultType = getTypeConverter()->convertType(fnType.getResult(0));
492 
493   // Create the converted spv.func op.
494   auto newFuncOp = rewriter.create<spirv::FuncOp>(
495       funcOp.getLoc(), funcOp.getName(),
496       rewriter.getFunctionType(signatureConverter.getConvertedTypes(),
497                                resultType ? TypeRange(resultType)
498                                           : TypeRange()));
499 
500   // Copy over all attributes other than the function name and type.
501   for (const auto &namedAttr : funcOp.getAttrs()) {
502     if (namedAttr.first != impl::getTypeAttrName() &&
503         namedAttr.first != SymbolTable::getSymbolAttrName())
504       newFuncOp->setAttr(namedAttr.first, namedAttr.second);
505   }
506 
507   rewriter.inlineRegionBefore(funcOp.getBody(), newFuncOp.getBody(),
508                               newFuncOp.end());
509   if (failed(rewriter.convertRegionTypes(
510           &newFuncOp.getBody(), *getTypeConverter(), &signatureConverter)))
511     return failure();
512   rewriter.eraseOp(funcOp);
513   return success();
514 }
515 
516 void mlir::populateBuiltinFuncToSPIRVPatterns(
517     MLIRContext *context, SPIRVTypeConverter &typeConverter,
518     OwningRewritePatternList &patterns) {
519   patterns.insert<FuncOpConversion>(typeConverter, context);
520 }
521 
522 //===----------------------------------------------------------------------===//
523 // Builtin Variables
524 //===----------------------------------------------------------------------===//
525 
526 static spirv::GlobalVariableOp getBuiltinVariable(Block &body,
527                                                   spirv::BuiltIn builtin) {
528   // Look through all global variables in the given `body` block and check if
529   // there is a spv.globalVariable that has the same `builtin` attribute.
530   for (auto varOp : body.getOps<spirv::GlobalVariableOp>()) {
531     if (auto builtinAttr = varOp->getAttrOfType<StringAttr>(
532             spirv::SPIRVDialect::getAttributeName(
533                 spirv::Decoration::BuiltIn))) {
534       auto varBuiltIn = spirv::symbolizeBuiltIn(builtinAttr.getValue());
535       if (varBuiltIn && varBuiltIn.getValue() == builtin) {
536         return varOp;
537       }
538     }
539   }
540   return nullptr;
541 }
542 
543 /// Gets name of global variable for a builtin.
544 static std::string getBuiltinVarName(spirv::BuiltIn builtin) {
545   return std::string("__builtin_var_") + stringifyBuiltIn(builtin).str() + "__";
546 }
547 
548 /// Gets or inserts a global variable for a builtin within `body` block.
549 static spirv::GlobalVariableOp
550 getOrInsertBuiltinVariable(Block &body, Location loc, spirv::BuiltIn builtin,
551                            OpBuilder &builder) {
552   if (auto varOp = getBuiltinVariable(body, builtin))
553     return varOp;
554 
555   OpBuilder::InsertionGuard guard(builder);
556   builder.setInsertionPointToStart(&body);
557 
558   spirv::GlobalVariableOp newVarOp;
559   switch (builtin) {
560   case spirv::BuiltIn::NumWorkgroups:
561   case spirv::BuiltIn::WorkgroupSize:
562   case spirv::BuiltIn::WorkgroupId:
563   case spirv::BuiltIn::LocalInvocationId:
564   case spirv::BuiltIn::GlobalInvocationId: {
565     auto ptrType = spirv::PointerType::get(
566         VectorType::get({3}, builder.getIntegerType(32)),
567         spirv::StorageClass::Input);
568     std::string name = getBuiltinVarName(builtin);
569     newVarOp =
570         builder.create<spirv::GlobalVariableOp>(loc, ptrType, name, builtin);
571     break;
572   }
573   case spirv::BuiltIn::SubgroupId:
574   case spirv::BuiltIn::NumSubgroups:
575   case spirv::BuiltIn::SubgroupSize: {
576     auto ptrType = spirv::PointerType::get(builder.getIntegerType(32),
577                                            spirv::StorageClass::Input);
578     std::string name = getBuiltinVarName(builtin);
579     newVarOp =
580         builder.create<spirv::GlobalVariableOp>(loc, ptrType, name, builtin);
581     break;
582   }
583   default:
584     emitError(loc, "unimplemented builtin variable generation for ")
585         << stringifyBuiltIn(builtin);
586   }
587   return newVarOp;
588 }
589 
590 Value mlir::spirv::getBuiltinVariableValue(Operation *op,
591                                            spirv::BuiltIn builtin,
592                                            OpBuilder &builder) {
593   Operation *parent = SymbolTable::getNearestSymbolTable(op->getParentOp());
594   if (!parent) {
595     op->emitError("expected operation to be within a module-like op");
596     return nullptr;
597   }
598 
599   spirv::GlobalVariableOp varOp = getOrInsertBuiltinVariable(
600       *parent->getRegion(0).begin(), op->getLoc(), builtin, builder);
601   Value ptr = builder.create<spirv::AddressOfOp>(op->getLoc(), varOp);
602   return builder.create<spirv::LoadOp>(op->getLoc(), ptr);
603 }
604 
605 //===----------------------------------------------------------------------===//
606 // Index calculation
607 //===----------------------------------------------------------------------===//
608 
609 spirv::AccessChainOp mlir::spirv::getElementPtr(
610     SPIRVTypeConverter &typeConverter, MemRefType baseType, Value basePtr,
611     ValueRange indices, Location loc, OpBuilder &builder) {
612   // Get base and offset of the MemRefType and verify they are static.
613 
614   int64_t offset;
615   SmallVector<int64_t, 4> strides;
616   if (failed(getStridesAndOffset(baseType, strides, offset)) ||
617       llvm::is_contained(strides, MemRefType::getDynamicStrideOrOffset()) ||
618       offset == MemRefType::getDynamicStrideOrOffset()) {
619     return nullptr;
620   }
621 
622   auto indexType = typeConverter.getIndexType(builder.getContext());
623 
624   SmallVector<Value, 2> linearizedIndices;
625   // Add a '0' at the start to index into the struct.
626   auto zero = spirv::ConstantOp::getZero(indexType, loc, builder);
627   linearizedIndices.push_back(zero);
628 
629   if (baseType.getRank() == 0) {
630     linearizedIndices.push_back(zero);
631   } else {
632     // TODO: Instead of this logic, use affine.apply and add patterns for
633     // lowering affine.apply to standard ops. These will get lowered to SPIR-V
634     // ops by the DialectConversion framework.
635     Value ptrLoc = builder.create<spirv::ConstantOp>(
636         loc, indexType, IntegerAttr::get(indexType, offset));
637     assert(indices.size() == strides.size() &&
638            "must provide indices for all dimensions");
639     for (auto index : llvm::enumerate(indices)) {
640       Value strideVal = builder.create<spirv::ConstantOp>(
641           loc, indexType, IntegerAttr::get(indexType, strides[index.index()]));
642       Value update =
643           builder.create<spirv::IMulOp>(loc, strideVal, index.value());
644       ptrLoc = builder.create<spirv::IAddOp>(loc, ptrLoc, update);
645     }
646     linearizedIndices.push_back(ptrLoc);
647   }
648   return builder.create<spirv::AccessChainOp>(loc, basePtr, linearizedIndices);
649 }
650 
651 //===----------------------------------------------------------------------===//
652 // Set ABI attributes for lowering entry functions.
653 //===----------------------------------------------------------------------===//
654 
655 LogicalResult
656 mlir::spirv::setABIAttrs(spirv::FuncOp funcOp,
657                          spirv::EntryPointABIAttr entryPointInfo,
658                          ArrayRef<spirv::InterfaceVarABIAttr> argABIInfo) {
659   // Set the attributes for argument and the function.
660   StringRef argABIAttrName = spirv::getInterfaceVarABIAttrName();
661   for (auto argIndex : llvm::seq<unsigned>(0, argABIInfo.size())) {
662     funcOp.setArgAttr(argIndex, argABIAttrName, argABIInfo[argIndex]);
663   }
664   funcOp->setAttr(spirv::getEntryPointABIAttrName(), entryPointInfo);
665   return success();
666 }
667 
668 //===----------------------------------------------------------------------===//
669 // SPIR-V ConversionTarget
670 //===----------------------------------------------------------------------===//
671 
672 std::unique_ptr<spirv::SPIRVConversionTarget>
673 spirv::SPIRVConversionTarget::get(spirv::TargetEnvAttr targetAttr) {
674   std::unique_ptr<SPIRVConversionTarget> target(
675       // std::make_unique does not work here because the constructor is private.
676       new SPIRVConversionTarget(targetAttr));
677   SPIRVConversionTarget *targetPtr = target.get();
678   target->addDynamicallyLegalDialect<SPIRVDialect>(
679       // We need to capture the raw pointer here because it is stable:
680       // target will be destroyed once this function is returned.
681       [targetPtr](Operation *op) { return targetPtr->isLegalOp(op); });
682   return target;
683 }
684 
685 spirv::SPIRVConversionTarget::SPIRVConversionTarget(
686     spirv::TargetEnvAttr targetAttr)
687     : ConversionTarget(*targetAttr.getContext()), targetEnv(targetAttr) {}
688 
689 bool spirv::SPIRVConversionTarget::isLegalOp(Operation *op) {
690   // Make sure this op is available at the given version. Ops not implementing
691   // QueryMinVersionInterface/QueryMaxVersionInterface are available to all
692   // SPIR-V versions.
693   if (auto minVersion = dyn_cast<spirv::QueryMinVersionInterface>(op))
694     if (minVersion.getMinVersion() > this->targetEnv.getVersion()) {
695       LLVM_DEBUG(llvm::dbgs()
696                  << op->getName() << " illegal: requiring min version "
697                  << spirv::stringifyVersion(minVersion.getMinVersion())
698                  << "\n");
699       return false;
700     }
701   if (auto maxVersion = dyn_cast<spirv::QueryMaxVersionInterface>(op))
702     if (maxVersion.getMaxVersion() < this->targetEnv.getVersion()) {
703       LLVM_DEBUG(llvm::dbgs()
704                  << op->getName() << " illegal: requiring max version "
705                  << spirv::stringifyVersion(maxVersion.getMaxVersion())
706                  << "\n");
707       return false;
708     }
709 
710   // Make sure this op's required extensions are allowed to use. Ops not
711   // implementing QueryExtensionInterface do not require extensions to be
712   // available.
713   if (auto extensions = dyn_cast<spirv::QueryExtensionInterface>(op))
714     if (failed(checkExtensionRequirements(op->getName(), this->targetEnv,
715                                           extensions.getExtensions())))
716       return false;
717 
718   // Make sure this op's required extensions are allowed to use. Ops not
719   // implementing QueryCapabilityInterface do not require capabilities to be
720   // available.
721   if (auto capabilities = dyn_cast<spirv::QueryCapabilityInterface>(op))
722     if (failed(checkCapabilityRequirements(op->getName(), this->targetEnv,
723                                            capabilities.getCapabilities())))
724       return false;
725 
726   SmallVector<Type, 4> valueTypes;
727   valueTypes.append(op->operand_type_begin(), op->operand_type_end());
728   valueTypes.append(op->result_type_begin(), op->result_type_end());
729 
730   // Special treatment for global variables, whose type requirements are
731   // conveyed by type attributes.
732   if (auto globalVar = dyn_cast<spirv::GlobalVariableOp>(op))
733     valueTypes.push_back(globalVar.type());
734 
735   // Make sure the op's operands/results use types that are allowed by the
736   // target environment.
737   SmallVector<ArrayRef<spirv::Extension>, 4> typeExtensions;
738   SmallVector<ArrayRef<spirv::Capability>, 8> typeCapabilities;
739   for (Type valueType : valueTypes) {
740     typeExtensions.clear();
741     valueType.cast<spirv::SPIRVType>().getExtensions(typeExtensions);
742     if (failed(checkExtensionRequirements(op->getName(), this->targetEnv,
743                                           typeExtensions)))
744       return false;
745 
746     typeCapabilities.clear();
747     valueType.cast<spirv::SPIRVType>().getCapabilities(typeCapabilities);
748     if (failed(checkCapabilityRequirements(op->getName(), this->targetEnv,
749                                            typeCapabilities)))
750       return false;
751   }
752 
753   return true;
754 }
755