1 //===- LinalgTransforms.cpp - Linalg transformations as patterns ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements logic and helpers to expose Linalg transforms as rewrite
10 // patterns.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
15 #include "mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h"
16 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
17 #include "mlir/Dialect/Linalg/Utils/Utils.h"
18 #include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
19 #include "mlir/Dialect/Utils/StructuredOpsUtils.h"
20 #include "mlir/Dialect/Vector/EDSC/Intrinsics.h"
21 #include "mlir/Dialect/Vector/VectorOps.h"
22 #include "mlir/IR/AffineExpr.h"
23 #include "mlir/IR/Matchers.h"
24 #include "mlir/Pass/Pass.h"
25 #include "mlir/Support/LLVM.h"
26 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include <type_traits>
30 
31 #define DEBUG_TYPE "linalg-transforms"
32 
33 using namespace mlir;
34 using namespace mlir::edsc;
35 using namespace mlir::edsc::intrinsics;
36 using namespace mlir::linalg;
37 
38 #define DBGS() (llvm::dbgs() << "[" DEBUG_TYPE << "]: ")
39 
40 //===----------------------------------------------------------------------===//
41 // Transformations exposed as rewrite patterns.
42 //===----------------------------------------------------------------------===//
43 // Marker used as attribute name in generated Linalg rewriting transformations.
44 const StringLiteral mlir::linalg::LinalgTransforms::kLinalgTransformMarker =
45     "__internal_linalg_transform__";
46 
47 mlir::linalg::LinalgMarker::LinalgMarker(ArrayRef<Identifier> matchDisjunction,
48                                          Optional<Identifier> replacement)
49     : matchDisjunction(matchDisjunction.begin(), matchDisjunction.end()),
50       replacement(replacement) {}
51 
52 LogicalResult
53 mlir::linalg::LinalgMarker::checkAndNotify(PatternRewriter &rewriter,
54                                            Operation *op) const {
55   auto attr = op->template getAttrOfType<StringAttr>(
56       LinalgTransforms::kLinalgTransformMarker);
57 
58   if (!attr) {
59     // 1. Has no marker case and matchDisjunction is empty.
60     if (matchDisjunction.empty())
61       return success();
62 
63     // 2. Has no marker but was expecting a marker.
64     return rewriter.notifyMatchFailure(op, [&](Diagnostic &diag) {
65       diag << " does not have any marker from list: ";
66       interleaveComma(matchDisjunction, diag);
67     });
68   }
69 
70   // 4. Match explicit marker.
71   for (auto marker : matchDisjunction)
72     if (attr.getValue() == marker)
73       return success();
74 
75   // 5. Fail to match.
76   return rewriter.notifyMatchFailure(op, [&](Diagnostic &diag) {
77     diag << " does not have any marker from list: ";
78     interleaveComma(matchDisjunction, diag);
79   });
80 }
81 
82 void mlir::linalg::LinalgMarker::replaceLinalgMarker(PatternRewriter &rewriter,
83                                                      Operation *op) const {
84   if (replacement.hasValue())
85     op->setAttr(LinalgTransforms::kLinalgTransformMarker,
86                 rewriter.getStringAttr(replacement.getValue()));
87   else
88     op->removeAttr(Identifier::get(LinalgTransforms::kLinalgTransformMarker,
89                                    rewriter.getContext()));
90 }
91 
92 LinalgTilingOptions &
93 mlir::linalg::LinalgTilingOptions::setTileSizes(ArrayRef<int64_t> ts) {
94   SmallVector<int64_t, 4> tileSizes(ts.begin(), ts.end());
95   tileSizeComputationFunction = [tileSizes](OpBuilder &b, Operation *op) {
96     OpBuilder::InsertionGuard guard(b);
97     b.setInsertionPointToStart(
98         &op->getParentOfType<FuncOp>().getBody().front());
99     return llvm::to_vector<4>(map_range(tileSizes, [&](int64_t s) {
100       Value v = b.create<ConstantIndexOp>(op->getLoc(), s);
101       return v;
102     }));
103   };
104   return *this;
105 }
106 
107 /// Linalg base tiling pattern.
108 mlir::linalg::LinalgBaseTilingPattern::LinalgBaseTilingPattern(
109     StringRef opName, MLIRContext *context, LinalgTilingOptions options,
110     LinalgMarker marker, PatternBenefit benefit)
111     : RewritePattern(opName, {}, benefit, context), marker(marker),
112       options(options) {}
113 
114 LogicalResult mlir::linalg::LinalgBaseTilingPattern::matchAndRewriteBase(
115     Operation *op, PatternRewriter &rewriter,
116     SmallVectorImpl<Value> &tensorResults) const {
117   LinalgOp linalgOp = dyn_cast<LinalgOp>(op);
118   if (!linalgOp)
119     return failure();
120   if (failed(marker.checkAndNotify(rewriter, linalgOp)))
121     return failure();
122 
123   // If LinalgOp has results, they must all be tied to init tensors.
124   // We enforce this to ensure all tiled ops have been rewritten in
125   // "init tensor" form. This ensures tiling has anchor values into which to
126   // subtensor / subtensor_insert. Otherwise tiling would need to allocate which
127   // is not acceptable.
128   // This would not be the case with a special terminator op that generates the
129   // whole tensor (instead of inserting a subtensor). But the generator-based
130   // abstraction has other issues.
131   if (linalgOp.getNumInitTensors() != linalgOp.getOperation()->getNumResults())
132     return failure();
133 
134   Optional<TiledLinalgOp> res = tileLinalgOp(rewriter, linalgOp, options);
135 
136   if (!res)
137     return failure();
138 
139   // Return relevant information to derived pattern.
140   tensorResults = res->tensorResults;
141 
142   // New marker if specified.
143   marker.replaceLinalgMarker(rewriter, res->op.getOperation());
144   return success();
145 }
146 
147 mlir::linalg::LinalgBaseTileAndFusePattern::LinalgBaseTileAndFusePattern(
148     StringRef opName, MLIRContext *context,
149     const LinalgDependenceGraph &dependenceGraph,
150     LinalgTilingOptions tilingOptions, LinalgFusionOptions fusionOptions,
151     LinalgMarker marker, LinalgMarker fusedOpMarker,
152     LinalgMarker originalOpMarker, PatternBenefit benefit)
153     : RewritePattern(opName, {}, benefit, context),
154       dependenceGraph(dependenceGraph), tilingOptions(tilingOptions),
155       fusionOptions(fusionOptions), marker(marker),
156       fusedOpMarker(fusedOpMarker), originalOpMarker(originalOpMarker) {}
157 
158 LogicalResult mlir::linalg::LinalgBaseTileAndFusePattern::matchAndRewrite(
159     Operation *op, PatternRewriter &rewriter) const {
160   LinalgOp linalgOp = dyn_cast<LinalgOp>(op);
161   if (!linalgOp)
162     return failure();
163   if (failed(marker.checkAndNotify(rewriter, linalgOp)))
164     return failure();
165   if (!linalgOp.hasBufferSemantics())
166     return failure();
167 
168   DenseSet<Operation *> producers;
169   producers.insert(linalgOp);
170   for (auto dependence : dependenceGraph.getDependentOperations(linalgOp)) {
171     if (!fusionOptions.indicesToFuse.count(
172             dependence.indexingOpView.operandIndex))
173       continue;
174     if (isa<LinalgOp>(dependence.dependentOpView.op))
175       producers.insert(dependence.dependentOpView.op);
176   }
177 
178   SmallVector<LinalgOp, 1> fusionOps;
179   for (auto it = op->getBlock()->begin(), ie = Block::iterator(op); it != ie;
180        ++it) {
181     auto producerLinalgOp = dyn_cast<LinalgOp>(&(*it));
182     if (producerLinalgOp && producers.count(producerLinalgOp))
183       fusionOps.push_back(producerLinalgOp);
184   }
185   fusionOps.push_back(linalgOp);
186 
187   SmallVector<Value, 4> tileSizes =
188       tilingOptions.tileSizeComputationFunction(rewriter, op);
189   LinalgTilingOptions instanceTilingOptions = tilingOptions;
190   instanceTilingOptions.setTileSizes(tileSizes);
191   Optional<TiledAndFusedLinalgOps> tiledAndFusedOps = tileAndFuseLinalgOps(
192       rewriter, fusionOps, dependenceGraph, instanceTilingOptions);
193   if (!tiledAndFusedOps)
194     return failure();
195 
196   // Tile the unfused loops;
197   SmallVector<Value, 4> unfusedLoopTileSizes;
198   Value zero = rewriter.create<ConstantIndexOp>(op->getLoc(), 0);
199   for (auto tileSize : enumerate(tileSizes)) {
200     if (tiledAndFusedOps->fusedLoopDims.count(tileSize.index()))
201       unfusedLoopTileSizes.push_back(zero);
202     else
203       unfusedLoopTileSizes.push_back(tileSize.value());
204   }
205   // Tile the loop only if there is a non-zero tile size.
206   if (unfusedLoopTileSizes.size() > linalgOp.getNumLoops())
207     unfusedLoopTileSizes.resize(linalgOp.getNumLoops());
208   if (llvm::any_of(unfusedLoopTileSizes, [](Value val) {
209         if (auto cst = val.getDefiningOp<ConstantIndexOp>())
210           return cst.getValue() != 0;
211         return true;
212       })) {
213     LinalgTilingOptions unfusedTilingOptions = tilingOptions;
214     unfusedTilingOptions.setTileSizes(unfusedLoopTileSizes);
215     Optional<TiledLinalgOp> unfusedTiledOp =
216         tileLinalgOp(rewriter, tiledAndFusedOps->op, unfusedTilingOptions);
217     if (!unfusedTiledOp)
218       return failure();
219     rewriter.eraseOp(tiledAndFusedOps->op);
220     tiledAndFusedOps->op = unfusedTiledOp->op;
221   }
222 
223   marker.replaceLinalgMarker(rewriter, tiledAndFusedOps->op.getOperation());
224   for (auto fusedOp : tiledAndFusedOps->fusedProducers) {
225     fusedOpMarker.replaceLinalgMarker(rewriter, fusedOp.getOperation());
226   }
227   for (auto origProducerOp : ArrayRef<LinalgOp>(fusionOps).drop_back()) {
228     originalOpMarker.replaceLinalgMarker(rewriter,
229                                          origProducerOp.getOperation());
230   }
231   rewriter.updateRootInPlace(
232       op, [&]() { originalOpMarker.replaceLinalgMarker(rewriter, op); });
233   return success();
234 }
235 
236 /// Linalg base interchange pattern.
237 mlir::linalg::LinalgBaseInterchangePattern::LinalgBaseInterchangePattern(
238     StringRef opName, MLIRContext *context,
239     ArrayRef<unsigned> interchangeVector, LinalgMarker marker,
240     PatternBenefit benefit)
241     : RewritePattern(opName, {}, benefit, context), marker(marker),
242       interchangeVector(interchangeVector.begin(), interchangeVector.end()) {}
243 
244 LogicalResult mlir::linalg::LinalgBaseInterchangePattern::matchAndRewrite(
245     Operation *op, PatternRewriter &rewriter) const {
246   LinalgOp linalgOp = dyn_cast<LinalgOp>(op);
247   if (!linalgOp)
248     return failure();
249   if (failed(marker.checkAndNotify(rewriter, linalgOp)))
250     return failure();
251   if (failed(interchangeGenericLinalgOpPrecondition(op, interchangeVector)))
252     return failure();
253 
254   // TODO: figure out how this interplays with named ops. In particular this
255   // should break the named op property.
256   rewriter.updateRootInPlace(op, [&]() {
257     interchange(linalgOp, interchangeVector);
258     // New marker if specified.
259     marker.replaceLinalgMarker(rewriter, op);
260   });
261   return success();
262 }
263 
264 mlir::linalg::LinalgBasePromotionPattern::LinalgBasePromotionPattern(
265     StringRef opName, MLIRContext *context, LinalgPromotionOptions options,
266     LinalgMarker marker, PatternBenefit benefit)
267     : RewritePattern(opName, {}, benefit, context), marker(marker),
268       options(options) {}
269 
270 LogicalResult mlir::linalg::LinalgBasePromotionPattern::matchAndRewrite(
271     Operation *op, PatternRewriter &rewriter) const {
272   if (failed(marker.checkAndNotify(rewriter, op)))
273     return failure();
274   if (failed(promoteSubviewsPrecondition(op, options)))
275     return failure();
276 
277   // TODO: We cannot use root update here. This pattern is creating other ops,
278   // so if the promotion fails, those need to be cleaned up, which doesnt seem
279   // to be happening here. So to fail properly, we should be cloning the op and
280   // deleting the previous op. This needs more investigation.
281   rewriter.startRootUpdate(op);
282   Optional<LinalgOp> promotedOp = promoteSubViews(rewriter, op, options);
283   if (!promotedOp) {
284     rewriter.cancelRootUpdate(op);
285     return op->emitError("subview promotion failed");
286   }
287   rewriter.finalizeRootUpdate(op);
288   marker.replaceLinalgMarker(rewriter, op);
289   return success();
290 }
291 
292 mlir::linalg::LinalgBaseVectorizationPattern::LinalgBaseVectorizationPattern(
293     StringRef opName, MLIRContext *context, LinalgMarker marker,
294     PatternBenefit benefit)
295     : RewritePattern(opName, {}, benefit, context), marker(marker) {}
296 
297 LogicalResult mlir::linalg::LinalgBaseVectorizationPattern::matchAndRewrite(
298     Operation *op, PatternRewriter &rewriter) const {
299   LinalgOp linalgOp = dyn_cast<LinalgOp>(op);
300   if (!linalgOp)
301     return failure();
302   if (failed(marker.checkAndNotify(rewriter, linalgOp)))
303     return failure();
304   if (failed(vectorizeLinalgOpPrecondition(op)))
305     return failure();
306   vectorizeLinalgOp(rewriter, op);
307   rewriter.eraseOp(op);
308   return success();
309 }
310 
311 LogicalResult mlir::linalg::applyStagedPatterns(
312     Operation *op, ArrayRef<FrozenRewritePatternList> stage1Patterns,
313     const FrozenRewritePatternList &stage2Patterns,
314     function_ref<LogicalResult(Operation *)> stage3Lambda) {
315   unsigned iteration = 0;
316   (void)iteration;
317   for (const auto &patterns : stage1Patterns) {
318     LLVM_DEBUG(DBGS() << "Before 1st stage, iter: " << ++iteration << "\n"
319                       << *op);
320     if (failed(applyPatternsAndFoldGreedily(op, patterns))) {
321       LLVM_DEBUG(DBGS() << "Underlying first stage rewrite did not converge");
322       return failure();
323     }
324     LLVM_DEBUG(DBGS() << "After 1st stage, iter: " << ++iteration << "\n"
325                       << *op);
326     if (failed(applyPatternsAndFoldGreedily(op, stage2Patterns))) {
327       LLVM_DEBUG(DBGS() << "Underlying 2nd stage rewrite did not converge");
328       return failure();
329     }
330     LLVM_DEBUG(DBGS() << "After 2nd stage, iter : " << iteration << "\n"
331                       << *op);
332     if (stage3Lambda) {
333       if (failed(stage3Lambda(op)))
334         return failure();
335       LLVM_DEBUG(DBGS() << "After 3rd stage, iter : " << iteration << "\n"
336                         << *op);
337     }
338   }
339   return success();
340 }
341 
342 /// Traverse `e` and return an AffineExpr where all occurrences of `dim` have
343 /// been replaced by either:
344 ///  - `min` if `positivePath` is true when we reach an occurrence of `dim`
345 ///  - `max` if `positivePath` is true when we reach an occurrence of `dim`
346 /// `positivePath` is negated each time we hit a multiplicative or divisive
347 /// binary op with a constant negative coefficient.
348 static AffineExpr substWithMin(AffineExpr e, AffineExpr dim, AffineExpr min,
349                                AffineExpr max, bool positivePath = true) {
350   if (e == dim)
351     return positivePath ? min : max;
352   if (auto bin = e.dyn_cast<AffineBinaryOpExpr>()) {
353     AffineExpr lhs = bin.getLHS();
354     AffineExpr rhs = bin.getRHS();
355     if (bin.getKind() == mlir::AffineExprKind::Add)
356       return substWithMin(lhs, dim, min, max, positivePath) +
357              substWithMin(rhs, dim, min, max, positivePath);
358 
359     auto c1 = bin.getLHS().dyn_cast<AffineConstantExpr>();
360     auto c2 = bin.getRHS().dyn_cast<AffineConstantExpr>();
361     if (c1 && c1.getValue() < 0)
362       return getAffineBinaryOpExpr(
363           bin.getKind(), c1, substWithMin(rhs, dim, min, max, !positivePath));
364     if (c2 && c2.getValue() < 0)
365       return getAffineBinaryOpExpr(
366           bin.getKind(), substWithMin(lhs, dim, min, max, !positivePath), c2);
367     return getAffineBinaryOpExpr(
368         bin.getKind(), substWithMin(lhs, dim, min, max, positivePath),
369         substWithMin(rhs, dim, min, max, positivePath));
370   }
371   return e;
372 }
373 
374 /// Given the `lbVal`, `ubVal` and `stepVal` of a loop, append `lbVal` and
375 /// `ubVal` to `dims` and `stepVal` to `symbols`.
376 /// Create new AffineDimExpr (`%lb` and `%ub`) and AffineSymbolExpr (`%step`)
377 /// with positions matching the newly appended values. Substitute occurrences of
378 /// `dimExpr` by either the min expression (i.e. `%lb`) or the max expression
379 /// (i.e. `%lb + %step * floordiv(%ub -1 - %lb, %step)`), depending on whether
380 /// the induction variable is used with a positive or negative  coefficient.
381 static AffineExpr substituteLoopInExpr(AffineExpr expr, AffineExpr dimExpr,
382                                        Value lbVal, Value ubVal, Value stepVal,
383                                        SmallVectorImpl<Value> &dims,
384                                        SmallVectorImpl<Value> &symbols) {
385   MLIRContext *ctx = lbVal.getContext();
386   AffineExpr lb = getAffineDimExpr(dims.size(), ctx);
387   dims.push_back(lbVal);
388   AffineExpr ub = getAffineDimExpr(dims.size(), ctx);
389   dims.push_back(ubVal);
390   AffineExpr step = getAffineSymbolExpr(symbols.size(), ctx);
391   symbols.push_back(stepVal);
392   LLVM_DEBUG(DBGS() << "Before: " << expr << "\n");
393   AffineExpr ee = substWithMin(expr, dimExpr, lb,
394                                lb + step * ((ub - 1) - lb).floorDiv(step));
395   LLVM_DEBUG(DBGS() << "After: " << expr << "\n");
396   return ee;
397 }
398 
399 /// Traverse the `dims` and substitute known min or max expressions in place of
400 /// induction variables in `exprs`.
401 static AffineMap substitute(AffineMap map, SmallVectorImpl<Value> &dims,
402                             SmallVectorImpl<Value> &symbols) {
403   auto exprs = llvm::to_vector<4>(map.getResults());
404   for (AffineExpr &expr : exprs) {
405     bool substituted = true;
406     while (substituted) {
407       substituted = false;
408       for (unsigned dimIdx = 0; dimIdx < dims.size(); ++dimIdx) {
409         Value dim = dims[dimIdx];
410         AffineExpr dimExpr = getAffineDimExpr(dimIdx, expr.getContext());
411         LLVM_DEBUG(DBGS() << "Subst: " << dim << " @ " << dimExpr << "\n");
412         AffineExpr substitutedExpr;
413         if (auto forOp = scf::getForInductionVarOwner(dim))
414           substitutedExpr = substituteLoopInExpr(
415               expr, dimExpr, forOp.lowerBound(), forOp.upperBound(),
416               forOp.step(), dims, symbols);
417 
418         if (auto parallelForOp = scf::getParallelForInductionVarOwner(dim))
419           for (unsigned idx = 0, e = parallelForOp.getNumLoops(); idx < e;
420                ++idx)
421             substitutedExpr = substituteLoopInExpr(
422                 expr, dimExpr, parallelForOp.lowerBound()[idx],
423                 parallelForOp.upperBound()[idx], parallelForOp.step()[idx],
424                 dims, symbols);
425 
426         if (!substitutedExpr)
427           continue;
428 
429         substituted = (substitutedExpr != expr);
430         expr = substitutedExpr;
431       }
432     }
433 
434     // Cleanup and simplify the results.
435     // This needs to happen outside of the loop iterating on dims.size() since
436     // it modifies dims.
437     SmallVector<Value, 4> operands(dims.begin(), dims.end());
438     operands.append(symbols.begin(), symbols.end());
439     auto map = AffineMap::get(dims.size(), symbols.size(), exprs,
440                               exprs.front().getContext());
441 
442     LLVM_DEBUG(DBGS() << "Map to simplify: " << map << "\n");
443 
444     // Pull in affine.apply operations and compose them fully into the
445     // result.
446     fullyComposeAffineMapAndOperands(&map, &operands);
447     canonicalizeMapAndOperands(&map, &operands);
448     map = simplifyAffineMap(map);
449     // Assign the results.
450     exprs.assign(map.getResults().begin(), map.getResults().end());
451     dims.assign(operands.begin(), operands.begin() + map.getNumDims());
452     symbols.assign(operands.begin() + map.getNumDims(), operands.end());
453 
454     LLVM_DEBUG(DBGS() << "Map simplified: " << map << "\n");
455   }
456 
457   assert(!exprs.empty() && "Unexpected empty exprs");
458   return AffineMap::get(dims.size(), symbols.size(), exprs, map.getContext());
459 }
460 
461 LogicalResult AffineMinSCFCanonicalizationPattern::matchAndRewrite(
462     AffineMinOp minOp, PatternRewriter &rewriter) const {
463   LLVM_DEBUG(DBGS() << "Canonicalize AffineMinSCF: " << *minOp.getOperation()
464                     << "\n");
465 
466   SmallVector<Value, 4> dims(minOp.getDimOperands()),
467       symbols(minOp.getSymbolOperands());
468   AffineMap map = substitute(minOp.getAffineMap(), dims, symbols);
469 
470   LLVM_DEBUG(DBGS() << "Resulting map: " << map << "\n");
471 
472   // Check whether any of the expressions, when subtracted from all other
473   // expressions, produces only >= 0 constants. If so, it is the min.
474   for (auto e : minOp.getAffineMap().getResults()) {
475     LLVM_DEBUG(DBGS() << "Candidate min: " << e << "\n");
476     if (!e.isSymbolicOrConstant())
477       continue;
478 
479     auto isNonPositive = [](AffineExpr e) {
480       if (auto cst = e.dyn_cast<AffineConstantExpr>())
481         return cst.getValue() < 0;
482       return true;
483     };
484 
485     // Build the subMap and check everything is statically known to be
486     // positive.
487     SmallVector<AffineExpr, 4> subExprs;
488     subExprs.reserve(map.getNumResults());
489     for (auto ee : map.getResults())
490       subExprs.push_back(ee - e);
491     MLIRContext *ctx = minOp.getContext();
492     AffineMap subMap = simplifyAffineMap(
493         AffineMap::get(map.getNumDims(), map.getNumSymbols(), subExprs, ctx));
494     LLVM_DEBUG(DBGS() << "simplified subMap: " << subMap << "\n");
495     if (llvm::any_of(subMap.getResults(), isNonPositive))
496       continue;
497 
498     // Static min found.
499     if (auto cst = e.dyn_cast<AffineConstantExpr>()) {
500       rewriter.replaceOpWithNewOp<ConstantIndexOp>(minOp, cst.getValue());
501     } else {
502       auto resultMap = AffineMap::get(0, map.getNumSymbols(), {e}, ctx);
503       SmallVector<Value, 4> resultOperands = dims;
504       resultOperands.append(symbols.begin(), symbols.end());
505       canonicalizeMapAndOperands(&resultMap, &resultOperands);
506       resultMap = simplifyAffineMap(resultMap);
507       rewriter.replaceOpWithNewOp<AffineApplyOp>(minOp, resultMap,
508                                                  resultOperands);
509     }
510     return success();
511   }
512 
513   return failure();
514 }
515