1 //===- BufferizableOpInterface.cpp - Bufferizable Ops  ---=----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
10 #include "mlir/Dialect/Bufferization/IR/Bufferization.h"
11 #include "mlir/Dialect/MemRef/IR/MemRef.h"
12 #include "mlir/IR/AsmState.h"
13 #include "mlir/IR/BlockAndValueMapping.h"
14 #include "mlir/IR/BuiltinOps.h"
15 #include "mlir/IR/Operation.h"
16 #include "mlir/IR/TypeUtilities.h"
17 #include "mlir/IR/Value.h"
18 #include "llvm/Support/Debug.h"
19 
20 namespace mlir {
21 namespace bufferization {
22 
23 #include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.cpp.inc"
24 
25 } // namespace bufferization
26 } // namespace mlir
27 
28 #define DEBUG_TYPE "bufferizable-op-interface"
29 #define DBGS() (llvm::dbgs() << '[' << DEBUG_TYPE << "] ")
30 #define LDBG(X) LLVM_DEBUG(DBGS() << (X))
31 
32 using namespace mlir;
33 using namespace bufferization;
34 
35 /// Attribute name used to mark the bufferization layout for region
36 /// arguments during linalg comprehensive bufferization.
37 constexpr const ::llvm::StringLiteral
38     bufferization::BufferizableOpInterface::kBufferLayoutAttrName;
39 
40 /// Attribute name used to mark region arguments that can be bufferized
41 /// in-place during linalg comprehensive bufferization.
42 constexpr const ::llvm::StringLiteral
43     bufferization::BufferizableOpInterface::kInplaceableAttrName;
44 
45 //===----------------------------------------------------------------------===//
46 // BufferizationOptions
47 //===----------------------------------------------------------------------===//
48 
49 // Default constructor for BufferizationOptions.
50 BufferizationOptions::BufferizationOptions() = default;
51 
52 BufferizableOpInterface
53 BufferizationOptions::dynCastBufferizableOp(Operation *op) const {
54   if (isOpAllowed(op))
55     return dyn_cast<BufferizableOpInterface>(op);
56   return nullptr;
57 }
58 
59 BufferizableOpInterface
60 BufferizationOptions::dynCastBufferizableOp(Value value) const {
61   if (auto bufferizableOp = value.getDefiningOp<BufferizableOpInterface>())
62     if (isOpAllowed(bufferizableOp.getOperation()))
63       return bufferizableOp;
64   return nullptr;
65 }
66 
67 void BufferizationOptions::addDialectStateInitializer(
68     StringRef name, const DialectStateInitFn &fn) {
69   stateInitializers.push_back(
70       [=](BufferizationState &state) { state.insertDialectState(name, fn()); });
71 }
72 
73 //===----------------------------------------------------------------------===//
74 // Helper functions for BufferizableOpInterface
75 //===----------------------------------------------------------------------===//
76 
77 static void setInsertionPointAfter(OpBuilder &b, Value value) {
78   if (auto bbArg = value.dyn_cast<BlockArgument>()) {
79     b.setInsertionPointToStart(bbArg.getOwner());
80   } else {
81     b.setInsertionPointAfter(value.getDefiningOp());
82   }
83 }
84 
85 /// Determine which OpOperand* will alias with `result` if the op is bufferized
86 /// in place. Return an empty vector if the op is not bufferizable.
87 SmallVector<OpOperand *>
88 BufferizationState::getAliasingOpOperand(OpResult result) const {
89   if (Operation *op = result.getDefiningOp())
90     if (auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op))
91       return bufferizableOp.getAliasingOpOperand(result, *this);
92   return {};
93 }
94 
95 /// Determine which OpResult will alias with `opOperand` if the op is bufferized
96 /// in place. Return an empty vector if the op is not bufferizable.
97 SmallVector<OpResult>
98 BufferizationState::getAliasingOpResult(OpOperand &opOperand) const {
99   if (auto bufferizableOp =
100           dyn_cast<BufferizableOpInterface>(opOperand.getOwner()))
101     return bufferizableOp.getAliasingOpResult(opOperand, *this);
102   return {};
103 }
104 
105 /// Return true if `opOperand` bufferizes to a memory read. Return `true` if the
106 /// op is not bufferizable.
107 bool BufferizationState::bufferizesToMemoryRead(OpOperand &opOperand) const {
108   if (auto bufferizableOp =
109           dyn_cast<BufferizableOpInterface>(opOperand.getOwner()))
110     return bufferizableOp.bufferizesToMemoryRead(opOperand, *this);
111 
112   // Unknown op that returns a tensor. The inplace analysis does not support it.
113   // Conservatively return true.
114   return true;
115 }
116 
117 /// Return true if `opOperand` bufferizes to a memory write. Return
118 /// `true` if the op is not bufferizable.
119 bool BufferizationState::bufferizesToMemoryWrite(OpOperand &opOperand) const {
120   if (auto bufferizableOp =
121           dyn_cast<BufferizableOpInterface>(opOperand.getOwner()))
122     return bufferizableOp.bufferizesToMemoryWrite(opOperand, *this);
123 
124   // Unknown op that returns a tensor. The inplace analysis does not support it.
125   // Conservatively return true.
126   return true;
127 }
128 
129 /// Return true if `opOperand` does neither read nor write but bufferizes to an
130 /// alias. Return false if the op is not bufferizable.
131 bool BufferizationState::bufferizesToAliasOnly(OpOperand &opOperand) const {
132   if (auto bufferizableOp =
133           dyn_cast<BufferizableOpInterface>(opOperand.getOwner()))
134     return bufferizableOp.bufferizesToAliasOnly(opOperand, *this);
135 
136   // Unknown op that returns a tensor. The inplace analysis does not support it.
137   // Conservatively return false.
138   return false;
139 }
140 
141 /// Return true if the given value is read by an op that bufferizes to a memory
142 /// read. Also takes into account ops that create an alias but do not read by
143 /// themselves (e.g., ExtractSliceOp).
144 bool BufferizationState::isValueRead(Value value) const {
145   assert(value.getType().isa<TensorType>() && "expected TensorType");
146   SmallVector<OpOperand *> workingSet;
147   for (OpOperand &use : value.getUses())
148     workingSet.push_back(&use);
149 
150   while (!workingSet.empty()) {
151     OpOperand *uMaybeReading = workingSet.pop_back_val();
152     // Skip over all ops that neither read nor write (but create an alias).
153     if (bufferizesToAliasOnly(*uMaybeReading))
154       for (OpResult opResult : getAliasingOpResult(*uMaybeReading))
155         for (OpOperand &use : opResult.getUses())
156           workingSet.push_back(&use);
157     if (bufferizesToMemoryRead(*uMaybeReading))
158       return true;
159   }
160 
161   return false;
162 }
163 
164 // Starting from `value`, follow the use-def chain in reverse, always selecting
165 // the aliasing OpOperands. Find and return Values for which `condition`
166 // evaluates to true. OpOperands of such matching Values are not traversed any
167 // further.
168 llvm::SetVector<Value> BufferizationState::findValueInReverseUseDefChain(
169     Value value, llvm::function_ref<bool(Value)> condition) const {
170   llvm::SetVector<Value> result, workingSet;
171   workingSet.insert(value);
172 
173   while (!workingSet.empty()) {
174     Value value = workingSet.pop_back_val();
175     if (condition(value) || value.isa<BlockArgument>()) {
176       result.insert(value);
177       continue;
178     }
179 
180     OpResult opResult = value.cast<OpResult>();
181     SmallVector<OpOperand *> opOperands = getAliasingOpOperand(opResult);
182     if (opOperands.empty() || !options.isOpAllowed(value.getDefiningOp())) {
183       result.insert(value);
184       continue;
185     }
186 
187     for (OpOperand *o : opOperands)
188       workingSet.insert(o->get());
189   }
190 
191   return result;
192 }
193 
194 // Find the Values of the last preceding write of a given Value.
195 llvm::SetVector<Value>
196 BufferizationState::findLastPrecedingWrite(Value value) const {
197   return findValueInReverseUseDefChain(value, [&](Value value) {
198     Operation *op = value.getDefiningOp();
199     if (!op)
200       return true;
201     auto bufferizableOp = options.dynCastBufferizableOp(op);
202     if (!bufferizableOp)
203       return true;
204     return bufferizableOp.isMemoryWrite(value.cast<OpResult>(), *this);
205   });
206 }
207 
208 BufferizationState::BufferizationState(const BufferizationOptions &options)
209     : options(options) {
210   for (const BufferizationOptions::BufferizationStateInitFn &fn :
211        options.stateInitializers)
212     fn(*this);
213 }
214 
215 // bufferization.to_memref is not allowed to change the rank.
216 static void ensureToMemrefOpIsValid(Value tensor, Type memrefType) {
217 #ifndef NDEBUG
218   auto rankedTensorType = tensor.getType().dyn_cast<RankedTensorType>();
219   assert((!rankedTensorType || memrefType.cast<MemRefType>().getRank() ==
220                                    rankedTensorType.getRank()) &&
221          "to_memref would be invalid: mismatching ranks");
222 #endif
223 }
224 
225 Value mlir::bufferization::lookupBuffer(RewriterBase &rewriter, Value tensor,
226                                         const BufferizationOptions &options) {
227   auto tensorType = tensor.getType().dyn_cast<TensorType>();
228   assert(tensorType && "unexpected non-tensor type");
229 
230   // Replace "%t = to_tensor %m" with %m.
231   if (auto toTensorOp = tensor.getDefiningOp<bufferization::ToTensorOp>())
232     return toTensorOp.memref();
233 
234   // Insert to_memref op.
235   OpBuilder::InsertionGuard g(rewriter);
236   setInsertionPointAfter(rewriter, tensor);
237   Type memrefType = getMemRefType(tensorType, options);
238   ensureToMemrefOpIsValid(tensor, memrefType);
239   return rewriter.create<bufferization::ToMemrefOp>(tensor.getLoc(), memrefType,
240                                                     tensor);
241 }
242 
243 /// Return the result buffer (memref) for a given OpResult (tensor). Allocate
244 /// a new buffer and copy over data from the existing buffer if out-of-place
245 /// bufferization is necessary.
246 FailureOr<Value> BufferizationState::getBuffer(
247     RewriterBase &rewriter, OpOperand &opOperand, bool forceInPlace,
248     Optional<Operation *> customCopyInsertionPoint) const {
249   OpBuilder::InsertionGuard guard(rewriter);
250   Operation *op = opOperand.getOwner();
251   Location loc = op->getLoc();
252   Value operand = opOperand.get();
253   Value operandBuffer = lookupBuffer(rewriter, operand, options);
254 
255   if (forceInPlace || isInPlace(opOperand))
256     return operandBuffer;
257 
258   // Bufferizing out-of-place: Allocate a new buffer.
259   // Move insertion point right after `operandBuffer`. That is where the
260   // allocation should be inserted (in the absence of allocation hoisting).
261   setInsertionPointAfter(rewriter, operandBuffer);
262   // Allocate the result buffer.
263   FailureOr<Value> resultBuffer = createAlloc(rewriter, loc, operandBuffer,
264                                               options.createDeallocs, options);
265   if (failed(resultBuffer))
266     return failure();
267   // Do not copy if the last preceding writes of `operand` are ops that do
268   // not write (skipping ops that merely create aliases). E.g., InitTensorOp.
269   // Note: If `findLastPrecedingWrite` reaches the end of the reverse SSA
270   // use-def chain, it returns that value, regardless of whether it is a
271   // memory write or not.
272   SetVector<Value> lastWrites = findLastPrecedingWrite(operand);
273   if (llvm::none_of(lastWrites, [&](Value lastWrite) {
274         if (auto bufferizableOp = options.dynCastBufferizableOp(lastWrite))
275           return bufferizableOp.isMemoryWrite(lastWrite.cast<OpResult>(),
276                                               *this);
277         return true;
278       }))
279     return resultBuffer;
280   // Do not copy if the copied data is never read.
281   SmallVector<OpResult> aliasingOpResults = getAliasingOpResult(opOperand);
282   if (!aliasingOpResults.empty() && !bufferizesToMemoryRead(opOperand) &&
283       llvm::none_of(aliasingOpResults,
284                     [&](OpResult opResult) { return isValueRead(opResult); }))
285     return resultBuffer;
286   // Do not copy if this op does not read the data, but writes it.
287   if (bufferizesToMemoryWrite(opOperand) && !bufferizesToMemoryRead(opOperand))
288     return resultBuffer;
289 
290   if (customCopyInsertionPoint) {
291     rewriter.setInsertionPoint(*customCopyInsertionPoint);
292   } else {
293     // The copy happens right before the op that is bufferized.
294     rewriter.setInsertionPoint(op);
295   }
296   if (failed(
297           createMemCpy(rewriter, loc, operandBuffer, *resultBuffer, options)))
298     return failure();
299 
300   return resultBuffer;
301 }
302 
303 void bufferization::replaceOpWithBufferizedValues(RewriterBase &rewriter,
304                                                   Operation *op,
305                                                   ValueRange values) {
306   assert(values.size() == op->getNumResults() &&
307          "expected one value per OpResult");
308   OpBuilder::InsertionGuard g(rewriter);
309 
310   // Replace all OpResults with the given values.
311   SmallVector<Value> replacements;
312   for (OpResult opResult : op->getOpResults()) {
313     Value replacement = values[opResult.getResultNumber()];
314     if (opResult.getType().isa<TensorType>()) {
315       // The OpResult is a tensor. Such values are replaced with memrefs during
316       // bufferization.
317       assert((replacement.getType().isa<MemRefType>() ||
318               replacement.getType().isa<UnrankedMemRefType>()) &&
319              "tensor op result should be replaced with a memref value");
320       // The existing uses of the OpResult still expect a tensor. Insert a
321       // ToTensorOp. Throughout bufferization, this ToTensorOp will gradually
322       // loose all of its users and eventually DCE away.
323       rewriter.setInsertionPointAfter(op);
324       replacement = rewriter.create<bufferization::ToTensorOp>(
325           replacement.getLoc(), replacement);
326     }
327     replacements.push_back(replacement);
328   }
329 
330   rewriter.replaceOp(op, replacements);
331 }
332 
333 AlwaysCopyBufferizationState::AlwaysCopyBufferizationState(
334     const BufferizationOptions &options)
335     : BufferizationState(options) {}
336 
337 /// Return `true` if the given OpResult has been decided to bufferize inplace.
338 bool AlwaysCopyBufferizationState::isInPlace(OpOperand &opOperand) const {
339   // OpOperands that bufferize to a memory write are out-of-place, i.e., an
340   // alloc and copy is inserted.
341   return !bufferizesToMemoryWrite(opOperand);
342 }
343 
344 /// Return true if `v1` and `v2` bufferize to equivalent buffers.
345 bool AlwaysCopyBufferizationState::areEquivalentBufferizedValues(
346     Value v1, Value v2) const {
347   // There is no analysis, so we do not know if the values are equivalent. The
348   // conservative answer is "false".
349   return false;
350 }
351 
352 //===----------------------------------------------------------------------===//
353 // Bufferization-specific scoped alloc/dealloc insertion support.
354 //===----------------------------------------------------------------------===//
355 
356 /// Move the insertion point of the given builder to the beginning of a
357 /// surrounding block as much as possible, while not crossing any allocation
358 /// hoisting barriers.
359 static void moveInsertionPointToAllocationHoistingBarrier(OpBuilder &b) {
360   Operation *op = b.getInsertionBlock()->getParentOp();
361   while (op) {
362     if (auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op))
363       if (bufferizableOp.isAllocationHoistingBarrier())
364         break;
365     op = op->getParentOp();
366   }
367 
368   if (!op) {
369     // No allocation hoisting barrier found. Hoist to FuncOp.
370     op = b.getInsertionBlock()->getParentOp();
371     if (!isa<FuncOp>(op))
372       op = op->getParentOfType<FuncOp>();
373     assert(op && "could not find enclosing FuncOp");
374   }
375 
376   // TODO: Handle cases where allocation hoisting barrier has more than one
377   // region or block.
378   assert(op->getNumRegions() == 1 &&
379          "allocation hoisting barriers with >1 regions not supported");
380   assert(op->getRegion(0).getBlocks().size() == 1 &&
381          "allocation hoisting barriers with >1 blocks not supported");
382   b.setInsertionPointToStart(&(op->getRegion(0).front()));
383 }
384 
385 /// Compute the type of the `memref` to use for allocating the buffer for
386 /// `shapedValue`. Also returns (by reference in `dynShape`), the value for the
387 /// dynamic dimensions in the returned `memref` type. The function may also set
388 /// the insertion point to an earlier location, where the allocation should
389 /// happen ("allocation hoisting").
390 static MemRefType getAllocationTypeAndShape(OpBuilder &b, Location loc,
391                                             Value shapedValue,
392                                             SmallVectorImpl<Value> &dynShape) {
393   MemRefType allocMemRefType =
394       getContiguousMemRefType(shapedValue.getType().cast<ShapedType>());
395 
396   // Compute the dynamic part of the shape.
397   bool reifiedShapes = false;
398   if (auto rankedOp = dyn_cast_or_null<ReifyRankedShapedTypeOpInterface>(
399           shapedValue.getDefiningOp())) {
400     ReifiedRankedShapedTypeDims resultDims;
401     if (succeeded(rankedOp.reifyResultShapes(b, resultDims))) {
402       reifiedShapes = true;
403       OpResult resultValue = shapedValue.dyn_cast<OpResult>();
404       auto &shape = resultDims[resultValue.getResultNumber()];
405       for (const auto &dim : enumerate(allocMemRefType.getShape()))
406         if (ShapedType::isDynamic(dim.value()))
407           dynShape.push_back(shape[dim.index()]);
408     }
409   }
410 
411   if (!reifiedShapes) {
412     for (const auto &dim : enumerate(allocMemRefType.getShape()))
413       if (ShapedType::isDynamic(dim.value())) {
414         assert((shapedValue.getType().isa<UnrankedMemRefType>() ||
415                 shapedValue.getType().isa<MemRefType>()) &&
416                "expected MemRef type");
417         dynShape.push_back(
418             b.create<memref::DimOp>(loc, shapedValue, dim.index()));
419       }
420   }
421 
422   // If the buffer is statically shaped, try to hoist it to the first enclosing
423   // parallel region.
424   // TODO: also hoist in the dynamic case. For now this relies on subsequent
425   // calls to LICM and buffer hoisting which will most likely not succeed.
426   // TODO: when packing, allocate a static bounding box which will enable more
427   // hoisting.
428   if (dynShape.empty())
429     moveInsertionPointToAllocationHoistingBarrier(b);
430 
431   return allocMemRefType;
432 }
433 
434 /// Create an AllocOp/DeallocOp pair, where the AllocOp is after
435 /// `shapedValue.getDefiningOp` (or at the top of the block in case of a
436 /// bbArg) and the DeallocOp is at the end of the block.
437 FailureOr<Value>
438 bufferization::createAlloc(OpBuilder &b, Location loc, Value shapedValue,
439                            bool deallocMemref,
440                            const BufferizationOptions &options) {
441   // Take a guard before anything else.
442   OpBuilder::InsertionGuard g(b);
443 
444   // 1. Create memory allocation.
445   assert(shapedValue.getType().isa<ShapedType>());
446   MemRefType memRefType = shapedValue.getType().dyn_cast<MemRefType>();
447   SmallVector<Value> dynShape;
448   // Note: getAllocationTypeAndShape also sets the insertion point.
449   MemRefType allocMemRefType =
450       getAllocationTypeAndShape(b, loc, shapedValue, dynShape);
451   FailureOr<Value> allocated =
452       createAlloc(b, loc, allocMemRefType, dynShape, options);
453   if (failed(allocated))
454     return failure();
455   Value casted = allocated.getValue();
456   if (memRefType && memRefType != allocMemRefType) {
457     assert(memref::CastOp::areCastCompatible(allocated.getValue().getType(),
458                                              memRefType) &&
459            "createAlloc: cast incompatible");
460     casted = b.create<memref::CastOp>(loc, memRefType, allocated.getValue());
461   }
462 
463   if (deallocMemref) {
464     // 2. Create memory deallocation.
465     b.setInsertionPoint(allocated.getValue().getParentBlock()->getTerminator());
466     if (failed(createDealloc(b, loc, allocated.getValue(), options)))
467       return failure();
468   }
469 
470   return casted;
471 }
472 
473 /// Create a memref allocation with the given type and dynamic extents.
474 FailureOr<Value>
475 bufferization::createAlloc(OpBuilder &b, Location loc, MemRefType type,
476                            ValueRange dynShape,
477                            const BufferizationOptions &options) {
478   if (options.allocationFn)
479     return (*options.allocationFn)(b, loc, type, dynShape,
480                                    options.bufferAlignment);
481 
482   // Default bufferallocation via AllocOp.
483   Value allocated = b.create<memref::AllocOp>(
484       loc, type, dynShape, b.getI64IntegerAttr(options.bufferAlignment));
485   return allocated;
486 }
487 
488 /// Create a memref allocation with the given type and dynamic extents. May also
489 /// deallocate the memref again.
490 FailureOr<Value>
491 bufferization::createAlloc(OpBuilder &b, Location loc, MemRefType type,
492                            ValueRange dynShape, bool deallocMemref,
493                            const BufferizationOptions &options) {
494   OpBuilder::InsertionGuard g(b);
495 
496   FailureOr<Value> alloc = createAlloc(b, loc, type, dynShape, options);
497   if (failed(alloc))
498     return failure();
499 
500   if (deallocMemref) {
501     // Dealloc at the end of the block.
502     b.setInsertionPoint(alloc.getValue().getParentBlock()->getTerminator());
503     if (failed(createDealloc(b, loc, *alloc, options)))
504       return failure();
505   }
506 
507   return alloc;
508 }
509 
510 /// Create a memref deallocation.
511 LogicalResult
512 bufferization::createDealloc(OpBuilder &b, Location loc, Value allocatedBuffer,
513                              const BufferizationOptions &options) {
514   if (options.deallocationFn)
515     return (*options.deallocationFn)(b, loc, allocatedBuffer);
516 
517   // Default buffer deallocation via DeallocOp.
518   b.create<memref::DeallocOp>(loc, allocatedBuffer);
519   return success();
520 }
521 
522 /// Create a memory copy between two memref buffers.
523 LogicalResult bufferization::createMemCpy(OpBuilder &b, Location loc,
524                                           Value from, Value to,
525                                           const BufferizationOptions &options) {
526   if (options.memCpyFn)
527     return (*options.memCpyFn)(b, loc, from, to);
528 
529   b.create<memref::CopyOp>(loc, from, to);
530   return success();
531 }
532 
533 //===----------------------------------------------------------------------===//
534 // Bufferization-specific BlockAndValueMapping support with debugging.
535 //===----------------------------------------------------------------------===//
536 
537 bool bufferization::isFunctionArgument(Value value) {
538   auto bbArg = value.dyn_cast<BlockArgument>();
539   if (!bbArg)
540     return false;
541   return isa<FuncOp>(bbArg.getOwner()->getParentOp());
542 }
543 
544 MemRefType bufferization::getContiguousMemRefType(ShapedType shapedType,
545                                                   Attribute memorySpace) {
546   MemRefLayoutAttrInterface layout = {};
547   return MemRefType::get(shapedType.getShape(), shapedType.getElementType(),
548                          layout, memorySpace);
549 }
550 
551 BaseMemRefType bufferization::getMemRefType(TensorType tensorType,
552                                             const BufferizationOptions &options,
553                                             MemRefLayoutAttrInterface layout,
554                                             Attribute memorySpace) {
555   // Case 1: Unranked memref type.
556   if (auto unrankedTensorType = tensorType.dyn_cast<UnrankedTensorType>()) {
557     assert(!layout && "UnrankedTensorType cannot have a layout map");
558     return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
559                                    memorySpace);
560   }
561 
562   // Case 2: Ranked memref type with specified layout. If fully dynamic layout
563   // maps are not requested, generate a type with `layout`, which is empty (no
564   // layout map) by default.
565   auto rankedTensorType = tensorType.cast<RankedTensorType>();
566   if (layout || !options.fullyDynamicLayoutMaps) {
567     return MemRefType::get(rankedTensorType.getShape(),
568                            rankedTensorType.getElementType(), layout,
569                            memorySpace);
570   }
571 
572   // Case 3: Ranked memref type with unspecified layout. Choose the most dynamic
573   // one.
574   // TODO: address space decisions to connect with the actual alloc.
575   int64_t dynamicOffset = ShapedType::kDynamicStrideOrOffset;
576   SmallVector<int64_t> dynamicStrides(rankedTensorType.getRank(),
577                                       ShapedType::kDynamicStrideOrOffset);
578   AffineMap stridedLayout = makeStridedLinearLayoutMap(
579       dynamicStrides, dynamicOffset, rankedTensorType.getContext());
580   return MemRefType::get(rankedTensorType.getShape(),
581                          rankedTensorType.getElementType(), stridedLayout,
582                          memorySpace);
583 }
584