1 //===- BufferizableOpInterface.cpp - Bufferizable Ops  ---=----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
10 #include "mlir/Dialect/Bufferization/IR/Bufferization.h"
11 #include "mlir/Dialect/MemRef/IR/MemRef.h"
12 #include "mlir/IR/AsmState.h"
13 #include "mlir/IR/BlockAndValueMapping.h"
14 #include "mlir/IR/BuiltinOps.h"
15 #include "mlir/IR/Operation.h"
16 #include "mlir/IR/TypeUtilities.h"
17 #include "mlir/IR/Value.h"
18 #include "llvm/Support/Debug.h"
19 
20 namespace mlir {
21 namespace bufferization {
22 
23 #include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.cpp.inc"
24 
25 } // namespace bufferization
26 } // namespace mlir
27 
28 #define DEBUG_TYPE "bufferizable-op-interface"
29 #define DBGS() (llvm::dbgs() << '[' << DEBUG_TYPE << "] ")
30 #define LDBG(X) LLVM_DEBUG(DBGS() << (X))
31 
32 using namespace mlir;
33 using namespace bufferization;
34 
35 /// Attribute name used to mark the bufferization layout for region
36 /// arguments during linalg comprehensive bufferization.
37 constexpr const ::llvm::StringLiteral
38     bufferization::BufferizableOpInterface::kBufferLayoutAttrName;
39 
40 /// Attribute name used to mark region arguments that can be bufferized
41 /// in-place during linalg comprehensive bufferization.
42 constexpr const ::llvm::StringLiteral
43     bufferization::BufferizableOpInterface::kInplaceableAttrName;
44 
45 //===----------------------------------------------------------------------===//
46 // BufferizationOptions
47 //===----------------------------------------------------------------------===//
48 
49 // Default constructor for BufferizationOptions.
50 BufferizationOptions::BufferizationOptions() = default;
51 
52 BufferizableOpInterface
53 BufferizationOptions::dynCastBufferizableOp(Operation *op) const {
54   if (isOpAllowed(op))
55     return dyn_cast<BufferizableOpInterface>(op);
56   return nullptr;
57 }
58 
59 BufferizableOpInterface
60 BufferizationOptions::dynCastBufferizableOp(Value value) const {
61   if (auto bufferizableOp = value.getDefiningOp<BufferizableOpInterface>())
62     if (isOpAllowed(bufferizableOp.getOperation()))
63       return bufferizableOp;
64   return nullptr;
65 }
66 
67 //===----------------------------------------------------------------------===//
68 // Helper functions for BufferizableOpInterface
69 //===----------------------------------------------------------------------===//
70 
71 static void setInsertionPointAfter(OpBuilder &b, Value value) {
72   if (auto bbArg = value.dyn_cast<BlockArgument>()) {
73     b.setInsertionPointToStart(bbArg.getOwner());
74   } else {
75     b.setInsertionPointAfter(value.getDefiningOp());
76   }
77 }
78 
79 /// Determine which OpOperand* will alias with `result` if the op is bufferized
80 /// in place. Return an empty vector if the op is not bufferizable.
81 SmallVector<OpOperand *>
82 BufferizationState::getAliasingOpOperand(OpResult result) const {
83   if (Operation *op = result.getDefiningOp())
84     if (auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op))
85       return bufferizableOp.getAliasingOpOperand(result, *this);
86   return {};
87 }
88 
89 /// Determine which OpResult will alias with `opOperand` if the op is bufferized
90 /// in place. Return an empty vector if the op is not bufferizable.
91 SmallVector<OpResult>
92 BufferizationState::getAliasingOpResult(OpOperand &opOperand) const {
93   if (auto bufferizableOp =
94           dyn_cast<BufferizableOpInterface>(opOperand.getOwner()))
95     return bufferizableOp.getAliasingOpResult(opOperand, *this);
96   return {};
97 }
98 
99 /// Return true if `opOperand` bufferizes to a memory read. Return `true` if the
100 /// op is not bufferizable.
101 bool BufferizationState::bufferizesToMemoryRead(OpOperand &opOperand) const {
102   if (auto bufferizableOp =
103           dyn_cast<BufferizableOpInterface>(opOperand.getOwner()))
104     return bufferizableOp.bufferizesToMemoryRead(opOperand, *this);
105 
106   // Unknown op that returns a tensor. The inplace analysis does not support it.
107   // Conservatively return true.
108   return true;
109 }
110 
111 /// Return true if `opOperand` bufferizes to a memory write. Return
112 /// `true` if the op is not bufferizable.
113 bool BufferizationState::bufferizesToMemoryWrite(OpOperand &opOperand) const {
114   if (auto bufferizableOp =
115           dyn_cast<BufferizableOpInterface>(opOperand.getOwner()))
116     return bufferizableOp.bufferizesToMemoryWrite(opOperand, *this);
117 
118   // Unknown op that returns a tensor. The inplace analysis does not support it.
119   // Conservatively return true.
120   return true;
121 }
122 
123 /// Return true if `opOperand` does neither read nor write but bufferizes to an
124 /// alias. Return false if the op is not bufferizable.
125 bool BufferizationState::bufferizesToAliasOnly(OpOperand &opOperand) const {
126   if (auto bufferizableOp =
127           dyn_cast<BufferizableOpInterface>(opOperand.getOwner()))
128     return bufferizableOp.bufferizesToAliasOnly(opOperand, *this);
129 
130   // Unknown op that returns a tensor. The inplace analysis does not support it.
131   // Conservatively return false.
132   return false;
133 }
134 
135 /// Return true if the given value is read by an op that bufferizes to a memory
136 /// read. Also takes into account ops that create an alias but do not read by
137 /// themselves (e.g., ExtractSliceOp).
138 bool BufferizationState::isValueRead(Value value) const {
139   assert(value.getType().isa<TensorType>() && "expected TensorType");
140   SmallVector<OpOperand *> workingSet;
141   for (OpOperand &use : value.getUses())
142     workingSet.push_back(&use);
143 
144   while (!workingSet.empty()) {
145     OpOperand *uMaybeReading = workingSet.pop_back_val();
146     // Skip over all ops that neither read nor write (but create an alias).
147     if (bufferizesToAliasOnly(*uMaybeReading))
148       for (OpResult opResult : getAliasingOpResult(*uMaybeReading))
149         for (OpOperand &use : opResult.getUses())
150           workingSet.push_back(&use);
151     if (bufferizesToMemoryRead(*uMaybeReading))
152       return true;
153   }
154 
155   return false;
156 }
157 
158 // Starting from `value`, follow the use-def chain in reverse, always selecting
159 // the aliasing OpOperands. Find and return Values for which `condition`
160 // evaluates to true. OpOperands of such matching Values are not traversed any
161 // further.
162 llvm::SetVector<Value> BufferizationState::findValueInReverseUseDefChain(
163     Value value, llvm::function_ref<bool(Value)> condition) const {
164   llvm::SetVector<Value> result, workingSet;
165   workingSet.insert(value);
166 
167   while (!workingSet.empty()) {
168     Value value = workingSet.pop_back_val();
169     if (condition(value) || value.isa<BlockArgument>()) {
170       result.insert(value);
171       continue;
172     }
173 
174     OpResult opResult = value.cast<OpResult>();
175     SmallVector<OpOperand *> opOperands = getAliasingOpOperand(opResult);
176     if (opOperands.empty() || !options.isOpAllowed(value.getDefiningOp())) {
177       result.insert(value);
178       continue;
179     }
180 
181     for (OpOperand *o : opOperands)
182       workingSet.insert(o->get());
183   }
184 
185   return result;
186 }
187 
188 // Find the Values of the last preceding write of a given Value.
189 llvm::SetVector<Value>
190 BufferizationState::findLastPrecedingWrite(Value value) const {
191   return findValueInReverseUseDefChain(value, [&](Value value) {
192     Operation *op = value.getDefiningOp();
193     if (!op)
194       return true;
195     auto bufferizableOp = options.dynCastBufferizableOp(op);
196     if (!bufferizableOp)
197       return true;
198     return bufferizableOp.isMemoryWrite(value.cast<OpResult>(), *this);
199   });
200 }
201 
202 BufferizationState::BufferizationState(const BufferizationOptions &options)
203     : options(options) {}
204 
205 // bufferization.to_memref is not allowed to change the rank.
206 static void ensureToMemrefOpIsValid(Value tensor, Type memrefType) {
207 #ifndef NDEBUG
208   auto rankedTensorType = tensor.getType().dyn_cast<RankedTensorType>();
209   assert((!rankedTensorType || memrefType.cast<MemRefType>().getRank() ==
210                                    rankedTensorType.getRank()) &&
211          "to_memref would be invalid: mismatching ranks");
212 #endif
213 }
214 
215 static Value lookupBuffer(RewriterBase &rewriter, Value tensor,
216                           const BufferizationOptions &options) {
217   auto tensorType = tensor.getType().dyn_cast<TensorType>();
218   assert(tensorType && "unexpected non-tensor type");
219 
220   // Replace "%t = to_tensor %m" with %m.
221   if (auto toTensorOp = tensor.getDefiningOp<bufferization::ToTensorOp>())
222     return toTensorOp.memref();
223 
224   // Insert to_memref op.
225   OpBuilder::InsertionGuard g(rewriter);
226   setInsertionPointAfter(rewriter, tensor);
227   Type memrefType = getMemRefType(tensorType, options);
228   ensureToMemrefOpIsValid(tensor, memrefType);
229   return rewriter.create<bufferization::ToMemrefOp>(tensor.getLoc(), memrefType,
230                                                     tensor);
231 }
232 
233 /// Return the result buffer (memref) for a given OpResult (tensor). Allocate
234 /// a new buffer and copy over data from the existing buffer if out-of-place
235 /// bufferization is necessary.
236 FailureOr<Value> BufferizationState::getBuffer(
237     RewriterBase &rewriter, OpOperand &opOperand, bool forceInPlace,
238     Optional<Operation *> customCopyInsertionPoint) const {
239   OpBuilder::InsertionGuard guard(rewriter);
240   Operation *op = opOperand.getOwner();
241   Location loc = op->getLoc();
242   Value operand = opOperand.get();
243   Value operandBuffer = lookupBuffer(rewriter, operand, options);
244 
245   if (forceInPlace || isInPlace(opOperand))
246     return operandBuffer;
247 
248   // Bufferizing out-of-place: Allocate a new buffer.
249   // Move insertion point right after `operandBuffer`. That is where the
250   // allocation should be inserted (in the absence of allocation hoisting).
251   setInsertionPointAfter(rewriter, operandBuffer);
252   // Allocate the result buffer.
253   FailureOr<Value> resultBuffer = createAlloc(rewriter, loc, operandBuffer,
254                                               options.createDeallocs, options);
255   if (failed(resultBuffer))
256     return failure();
257   // Do not copy if the last preceding writes of `operand` are ops that do
258   // not write (skipping ops that merely create aliases). E.g., InitTensorOp.
259   // Note: If `findLastPrecedingWrite` reaches the end of the reverse SSA
260   // use-def chain, it returns that value, regardless of whether it is a
261   // memory write or not.
262   SetVector<Value> lastWrites = findLastPrecedingWrite(operand);
263   if (llvm::none_of(lastWrites, [&](Value lastWrite) {
264         if (auto bufferizableOp = options.dynCastBufferizableOp(lastWrite))
265           return bufferizableOp.isMemoryWrite(lastWrite.cast<OpResult>(),
266                                               *this);
267         return true;
268       }))
269     return resultBuffer;
270   // Do not copy if the copied data is never read.
271   SmallVector<OpResult> aliasingOpResults = getAliasingOpResult(opOperand);
272   if (!aliasingOpResults.empty() && !bufferizesToMemoryRead(opOperand) &&
273       llvm::none_of(aliasingOpResults,
274                     [&](OpResult opResult) { return isValueRead(opResult); }))
275     return resultBuffer;
276   // Do not copy if this op does not read the data, but writes it.
277   if (bufferizesToMemoryWrite(opOperand) && !bufferizesToMemoryRead(opOperand))
278     return resultBuffer;
279 
280   if (customCopyInsertionPoint) {
281     rewriter.setInsertionPoint(*customCopyInsertionPoint);
282   } else {
283     // The copy happens right before the op that is bufferized.
284     rewriter.setInsertionPoint(op);
285   }
286   if (failed(
287           createMemCpy(rewriter, loc, operandBuffer, *resultBuffer, options)))
288     return failure();
289 
290   return resultBuffer;
291 }
292 
293 void bufferization::replaceOpWithBufferizedValues(RewriterBase &rewriter,
294                                                   Operation *op,
295                                                   ValueRange values) {
296   assert(values.size() == op->getNumResults() &&
297          "expected one value per OpResult");
298   OpBuilder::InsertionGuard g(rewriter);
299 
300   // Replace all OpResults with the given values.
301   SmallVector<Value> replacements;
302   for (OpResult opResult : op->getOpResults()) {
303     Value replacement = values[opResult.getResultNumber()];
304     if (opResult.getType().isa<TensorType>()) {
305       // The OpResult is a tensor. Such values are replaced with memrefs during
306       // bufferization.
307       assert((replacement.getType().isa<MemRefType>() ||
308               replacement.getType().isa<UnrankedMemRefType>()) &&
309              "tensor op result should be replaced with a memref value");
310       // The existing uses of the OpResult still expect a tensor. Insert a
311       // ToTensorOp. Throughout bufferization, this ToTensorOp will gradually
312       // loose all of its users and eventually DCE away.
313       rewriter.setInsertionPointAfter(op);
314       replacement = rewriter.create<bufferization::ToTensorOp>(
315           replacement.getLoc(), replacement);
316     }
317     replacements.push_back(replacement);
318   }
319 
320   rewriter.replaceOp(op, replacements);
321 }
322 
323 AlwaysCopyBufferizationState::AlwaysCopyBufferizationState(
324     const BufferizationOptions &options)
325     : BufferizationState(options) {}
326 
327 /// Return `true` if the given OpResult has been decided to bufferize inplace.
328 bool AlwaysCopyBufferizationState::isInPlace(OpOperand &opOperand) const {
329   // OpOperands that bufferize to a memory write are out-of-place, i.e., an
330   // alloc and copy is inserted.
331   return !bufferizesToMemoryWrite(opOperand);
332 }
333 
334 /// Return true if `v1` and `v2` bufferize to equivalent buffers.
335 bool AlwaysCopyBufferizationState::areEquivalentBufferizedValues(
336     Value v1, Value v2) const {
337   // There is no analysis, so we do not know if the values are equivalent. The
338   // conservative answer is "false".
339   return false;
340 }
341 
342 //===----------------------------------------------------------------------===//
343 // Bufferization-specific scoped alloc/dealloc insertion support.
344 //===----------------------------------------------------------------------===//
345 
346 /// Move the insertion point of the given builder to the beginning of a
347 /// surrounding block as much as possible, while not crossing any allocation
348 /// hoisting barriers.
349 static void moveInsertionPointToAllocationHoistingBarrier(OpBuilder &b) {
350   Operation *op = b.getInsertionBlock()->getParentOp();
351   while (op) {
352     if (auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op))
353       if (bufferizableOp.isAllocationHoistingBarrier())
354         break;
355     op = op->getParentOp();
356   }
357 
358   if (!op) {
359     // No allocation hoisting barrier found. Hoist to FuncOp.
360     op = b.getInsertionBlock()->getParentOp();
361     if (!isa<FuncOp>(op))
362       op = op->getParentOfType<FuncOp>();
363     assert(op && "could not find enclosing FuncOp");
364   }
365 
366   // TODO: Handle cases where allocation hoisting barrier has more than one
367   // region or block.
368   assert(op->getNumRegions() == 1 &&
369          "allocation hoisting barriers with >1 regions not supported");
370   assert(op->getRegion(0).getBlocks().size() == 1 &&
371          "allocation hoisting barriers with >1 blocks not supported");
372   b.setInsertionPointToStart(&(op->getRegion(0).front()));
373 }
374 
375 /// Compute the type of the `memref` to use for allocating the buffer for
376 /// `shapedValue`. Also returns (by reference in `dynShape`), the value for the
377 /// dynamic dimensions in the returned `memref` type. The function may also set
378 /// the insertion point to an earlier location, where the allocation should
379 /// happen ("allocation hoisting").
380 static MemRefType getAllocationTypeAndShape(OpBuilder &b, Location loc,
381                                             Value shapedValue,
382                                             SmallVectorImpl<Value> &dynShape) {
383   MemRefType allocMemRefType =
384       getContiguousMemRefType(shapedValue.getType().cast<ShapedType>());
385 
386   // Compute the dynamic part of the shape.
387   bool reifiedShapes = false;
388   if (auto rankedOp = dyn_cast_or_null<ReifyRankedShapedTypeOpInterface>(
389           shapedValue.getDefiningOp())) {
390     ReifiedRankedShapedTypeDims resultDims;
391     if (succeeded(rankedOp.reifyResultShapes(b, resultDims))) {
392       reifiedShapes = true;
393       OpResult resultValue = shapedValue.dyn_cast<OpResult>();
394       auto &shape = resultDims[resultValue.getResultNumber()];
395       for (const auto &dim : enumerate(allocMemRefType.getShape()))
396         if (ShapedType::isDynamic(dim.value()))
397           dynShape.push_back(shape[dim.index()]);
398     }
399   }
400 
401   if (!reifiedShapes) {
402     for (const auto &dim : enumerate(allocMemRefType.getShape()))
403       if (ShapedType::isDynamic(dim.value())) {
404         assert((shapedValue.getType().isa<UnrankedMemRefType>() ||
405                 shapedValue.getType().isa<MemRefType>()) &&
406                "expected MemRef type");
407         dynShape.push_back(
408             b.create<memref::DimOp>(loc, shapedValue, dim.index()));
409       }
410   }
411 
412   // If the buffer is statically shaped, try to hoist it to the first enclosing
413   // parallel region.
414   // TODO: also hoist in the dynamic case. For now this relies on subsequent
415   // calls to LICM and buffer hoisting which will most likely not succeed.
416   // TODO: when packing, allocate a static bounding box which will enable more
417   // hoisting.
418   if (dynShape.empty())
419     moveInsertionPointToAllocationHoistingBarrier(b);
420 
421   return allocMemRefType;
422 }
423 
424 /// Create an AllocOp/DeallocOp pair, where the AllocOp is after
425 /// `shapedValue.getDefiningOp` (or at the top of the block in case of a
426 /// bbArg) and the DeallocOp is at the end of the block.
427 FailureOr<Value>
428 bufferization::createAlloc(OpBuilder &b, Location loc, Value shapedValue,
429                            bool deallocMemref,
430                            const BufferizationOptions &options) {
431   // Take a guard before anything else.
432   OpBuilder::InsertionGuard g(b);
433 
434   // 1. Create memory allocation.
435   assert(shapedValue.getType().isa<ShapedType>());
436   MemRefType memRefType = shapedValue.getType().dyn_cast<MemRefType>();
437   SmallVector<Value> dynShape;
438   // Note: getAllocationTypeAndShape also sets the insertion point.
439   MemRefType allocMemRefType =
440       getAllocationTypeAndShape(b, loc, shapedValue, dynShape);
441   FailureOr<Value> allocated =
442       createAlloc(b, loc, allocMemRefType, dynShape, options);
443   if (failed(allocated))
444     return failure();
445   Value casted = allocated.getValue();
446   if (memRefType && memRefType != allocMemRefType) {
447     assert(memref::CastOp::areCastCompatible(allocated.getValue().getType(),
448                                              memRefType) &&
449            "createAlloc: cast incompatible");
450     casted = b.create<memref::CastOp>(loc, memRefType, allocated.getValue());
451   }
452 
453   if (deallocMemref) {
454     // 2. Create memory deallocation.
455     b.setInsertionPoint(allocated.getValue().getParentBlock()->getTerminator());
456     if (failed(createDealloc(b, loc, allocated.getValue(), options)))
457       return failure();
458   }
459 
460   return casted;
461 }
462 
463 /// Create a memref allocation with the given type and dynamic extents.
464 FailureOr<Value>
465 bufferization::createAlloc(OpBuilder &b, Location loc, MemRefType type,
466                            ValueRange dynShape,
467                            const BufferizationOptions &options) {
468   if (options.allocationFn)
469     return (*options.allocationFn)(b, loc, type, dynShape,
470                                    options.bufferAlignment);
471 
472   // Default bufferallocation via AllocOp.
473   Value allocated = b.create<memref::AllocOp>(
474       loc, type, dynShape, b.getI64IntegerAttr(options.bufferAlignment));
475   return allocated;
476 }
477 
478 /// Create a memref allocation with the given type and dynamic extents. May also
479 /// deallocate the memref again.
480 FailureOr<Value>
481 bufferization::createAlloc(OpBuilder &b, Location loc, MemRefType type,
482                            ValueRange dynShape, bool deallocMemref,
483                            const BufferizationOptions &options) {
484   OpBuilder::InsertionGuard g(b);
485 
486   FailureOr<Value> alloc = createAlloc(b, loc, type, dynShape, options);
487   if (failed(alloc))
488     return failure();
489 
490   if (deallocMemref) {
491     // Dealloc at the end of the block.
492     b.setInsertionPoint(alloc.getValue().getParentBlock()->getTerminator());
493     if (failed(createDealloc(b, loc, *alloc, options)))
494       return failure();
495   }
496 
497   return alloc;
498 }
499 
500 /// Create a memref deallocation.
501 LogicalResult
502 bufferization::createDealloc(OpBuilder &b, Location loc, Value allocatedBuffer,
503                              const BufferizationOptions &options) {
504   if (options.deallocationFn)
505     return (*options.deallocationFn)(b, loc, allocatedBuffer);
506 
507   // Default buffer deallocation via DeallocOp.
508   b.create<memref::DeallocOp>(loc, allocatedBuffer);
509   return success();
510 }
511 
512 /// Create a memory copy between two memref buffers.
513 LogicalResult bufferization::createMemCpy(OpBuilder &b, Location loc,
514                                           Value from, Value to,
515                                           const BufferizationOptions &options) {
516   if (options.memCpyFn)
517     return (*options.memCpyFn)(b, loc, from, to);
518 
519   b.create<memref::CopyOp>(loc, from, to);
520   return success();
521 }
522 
523 //===----------------------------------------------------------------------===//
524 // Bufferization-specific BlockAndValueMapping support with debugging.
525 //===----------------------------------------------------------------------===//
526 
527 bool bufferization::isFunctionArgument(Value value) {
528   auto bbArg = value.dyn_cast<BlockArgument>();
529   if (!bbArg)
530     return false;
531   return isa<FuncOp>(bbArg.getOwner()->getParentOp());
532 }
533 
534 MemRefType bufferization::getContiguousMemRefType(ShapedType shapedType,
535                                                   Attribute memorySpace) {
536   MemRefLayoutAttrInterface layout = {};
537   return MemRefType::get(shapedType.getShape(), shapedType.getElementType(),
538                          layout, memorySpace);
539 }
540 
541 BaseMemRefType bufferization::getMemRefType(TensorType tensorType,
542                                             const BufferizationOptions &options,
543                                             MemRefLayoutAttrInterface layout,
544                                             Attribute memorySpace) {
545   // Case 1: Unranked memref type.
546   if (auto unrankedTensorType = tensorType.dyn_cast<UnrankedTensorType>()) {
547     assert(!layout && "UnrankedTensorType cannot have a layout map");
548     return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
549                                    memorySpace);
550   }
551 
552   // Case 2: Ranked memref type with specified layout. If fully dynamic layout
553   // maps are not requested, generate a type with `layout`, which is empty (no
554   // layout map) by default.
555   auto rankedTensorType = tensorType.cast<RankedTensorType>();
556   if (layout || !options.fullyDynamicLayoutMaps) {
557     return MemRefType::get(rankedTensorType.getShape(),
558                            rankedTensorType.getElementType(), layout,
559                            memorySpace);
560   }
561 
562   // Case 3: Ranked memref type with unspecified layout. Choose the most dynamic
563   // one.
564   // TODO: address space decisions to connect with the actual alloc.
565   int64_t dynamicOffset = ShapedType::kDynamicStrideOrOffset;
566   SmallVector<int64_t> dynamicStrides(rankedTensorType.getRank(),
567                                       ShapedType::kDynamicStrideOrOffset);
568   AffineMap stridedLayout = makeStridedLinearLayoutMap(
569       dynamicStrides, dynamicOffset, rankedTensorType.getContext());
570   return MemRefType::get(rankedTensorType.getShape(),
571                          rankedTensorType.getElementType(), stridedLayout,
572                          memorySpace);
573 }
574