Lines Matching refs:codegen
423 static Value genVectorReducInit(CodeGen &codegen, OpBuilder &builder, in genVectorReducInit() argument
425 Value r = codegen.redVal; in genVectorReducInit()
426 switch (codegen.redKind) { in genVectorReducInit()
448 static Value genVectorReducEnd(CodeGen &codegen, OpBuilder &builder, in genVectorReducEnd() argument
450 vector::CombiningKind kind = getCombiningKind(codegen.redKind); in genVectorReducEnd()
451 return builder.create<vector::ReductionOp>(loc, kind, codegen.redVal); in genVectorReducEnd()
455 static void updateReduc(Merger &merger, CodeGen &codegen, Value reduc) { in updateReduc() argument
456 assert(codegen.redKind != kNoReduc); in updateReduc()
457 codegen.redVal = merger.exp(codegen.redExp).val = reduc; in updateReduc()
470 static Value genOutputBuffer(CodeGen &codegen, OpBuilder &builder, in genOutputBuffer() argument
495 static void genBuffers(Merger &merger, CodeGen &codegen, OpBuilder &builder, in genBuffers() argument
523 codegen.pointers[tensor][idx] = in genBuffers()
525 codegen.indices[tensor][idx] = in genBuffers()
533 assert(codegen.highs[tensor][idx] == nullptr); in genBuffers()
534 codegen.sizes[idx] = codegen.highs[tensor][idx] = up; in genBuffers()
545 codegen.buffers[tensor] = in genBuffers()
548 codegen.buffers[tensor] = in genBuffers()
549 genOutputBuffer(codegen, builder, op, denseTp, args); in genBuffers()
550 } else if (t == codegen.sparseOut) { in genBuffers()
555 codegen.lexIdx = builder.create<memref::AllocaOp>(loc, memTp, rank); in genBuffers()
556 codegen.lexVal = builder.create<memref::AllocaOp>( in genBuffers()
562 codegen.buffers[tensor] = in genBuffers()
569 static VectorType vectorType(CodeGen &codegen, Type etp) { in vectorType() argument
570 unsigned numScalableDims = codegen.options.enableVLAVectorization; in vectorType()
571 return VectorType::get(codegen.curVecLength, etp, numScalableDims); in vectorType()
575 static VectorType vectorType(CodeGen &codegen, Value ptr) { in vectorType() argument
576 return vectorType(codegen, ptr.getType().cast<MemRefType>().getElementType()); in vectorType()
580 static Value genVectorMask(CodeGen &codegen, OpBuilder &builder, Value iv, in genVectorMask() argument
583 VectorType mtp = vectorType(codegen, builder.getI1Type()); in genVectorMask()
611 static Value genVectorLoad(CodeGen &codegen, OpBuilder &builder, Value ptr, in genVectorLoad() argument
614 VectorType vtp = vectorType(codegen, ptr); in genVectorLoad()
621 codegen.curVecMask, pass); in genVectorLoad()
624 codegen.curVecMask, pass); in genVectorLoad()
628 static void genVectorStore(CodeGen &codegen, OpBuilder &builder, Value rhs, in genVectorStore() argument
636 codegen.curVecMask, rhs); in genVectorStore()
639 builder.create<vector::MaskedStoreOp>(loc, ptr, args, codegen.curVecMask, in genVectorStore()
645 static Value genVectorInvariantValue(CodeGen &codegen, OpBuilder &builder, in genVectorInvariantValue() argument
647 VectorType vtp = vectorType(codegen, val.getType()); in genVectorInvariantValue()
655 static Value genAffine(CodeGen &codegen, OpBuilder &builder, AffineExpr a, in genAffine() argument
660 return codegen.loops[idx]; // universal dense index in genAffine()
665 loc, genAffine(codegen, builder, binOp.getLHS(), loc), in genAffine()
666 genAffine(codegen, builder, binOp.getRHS(), loc)); in genAffine()
671 loc, genAffine(codegen, builder, binOp.getLHS(), loc), in genAffine()
672 genAffine(codegen, builder, binOp.getRHS(), loc)); in genAffine()
684 static Value genIndex(CodeGen &codegen, linalg::GenericOp op, OpOperand *t) { in genIndex() argument
690 return codegen.loops[idx]; in genIndex()
694 static Value genSubscript(CodeGen &codegen, OpBuilder &builder, in genSubscript() argument
707 assert(codegen.pidxs[tensor][idx] != nullptr); in genSubscript()
708 args.push_back(codegen.pidxs[tensor][idx]); // position index in genSubscript()
712 args.push_back(genAffine(codegen, builder, a, op.getLoc())); in genSubscript()
715 return codegen.buffers[tensor]; in genSubscript()
719 static Value genInsertionLoad(CodeGen &codegen, OpBuilder &builder, in genInsertionLoad() argument
723 if (!codegen.expValues) { in genInsertionLoad()
728 Value index = genIndex(codegen, op, t); in genInsertionLoad()
729 return builder.create<memref::LoadOp>(loc, codegen.expValues, index); in genInsertionLoad()
733 static void genInsertionStore(CodeGen &codegen, OpBuilder &builder, in genInsertionStore() argument
737 if (!codegen.expValues) { in genInsertionStore()
738 builder.create<memref::StoreOp>(loc, rhs, codegen.lexVal); in genInsertionStore()
739 builder.create<LexInsertOp>(loc, t->get(), codegen.lexIdx, codegen.lexVal); in genInsertionStore()
748 Value index = genIndex(codegen, op, t); in genInsertionStore()
752 Value filled = builder.create<memref::LoadOp>(loc, codegen.expFilled, index); in genInsertionStore()
759 builder.create<memref::StoreOp>(loc, tval, codegen.expFilled, index); in genInsertionStore()
760 builder.create<memref::StoreOp>(loc, index, codegen.expAdded, in genInsertionStore()
761 codegen.expCount); in genInsertionStore()
763 Value add = builder.create<arith::AddIOp>(loc, codegen.expCount, one); in genInsertionStore()
767 builder.create<scf::YieldOp>(loc, codegen.expCount); in genInsertionStore()
770 codegen.expCount = ifOp.getResult(0); in genInsertionStore()
771 builder.create<memref::StoreOp>(loc, rhs, codegen.expValues, index); in genInsertionStore()
775 static Value genTensorLoad(Merger &merger, CodeGen &codegen, OpBuilder &builder, in genTensorLoad() argument
780 if (codegen.curVecLength > 1 && !val.getType().isa<VectorType>()) in genTensorLoad()
781 return genVectorInvariantValue(codegen, builder, val); in genTensorLoad()
786 if (t == codegen.sparseOut) in genTensorLoad()
787 return genInsertionLoad(codegen, builder, op, t); in genTensorLoad()
790 Value ptr = genSubscript(codegen, builder, op, t, args); in genTensorLoad()
791 if (codegen.curVecLength > 1) in genTensorLoad()
792 return genVectorLoad(codegen, builder, ptr, args); in genTensorLoad()
797 static void genTensorStore(Merger &merger, CodeGen &codegen, OpBuilder &builder, in genTensorStore() argument
801 if (codegen.redVal) { in genTensorStore()
802 if (codegen.curVecLength > 1) in genTensorStore()
803 rhs = builder.create<arith::SelectOp>(loc, codegen.curVecMask, rhs, in genTensorStore()
804 codegen.redVal); in genTensorStore()
805 updateReduc(merger, codegen, rhs); in genTensorStore()
810 if (t == codegen.sparseOut) { in genTensorStore()
816 genInsertionStore(codegen, builder, op, t, rhs); in genTensorStore()
822 Value ptr = genSubscript(codegen, builder, op, t, args); in genTensorStore()
823 if (codegen.curVecLength > 1) in genTensorStore()
824 genVectorStore(codegen, builder, rhs, ptr, args); in genTensorStore()
832 static Value genLoad(CodeGen &codegen, OpBuilder &builder, Location loc, in genLoad() argument
836 if (codegen.curVecLength > 1) { in genLoad()
849 Value vload = genVectorLoad(codegen, builder, ptr, {s}); in genLoad()
853 loc, vectorType(codegen, builder.getI32Type()), vload); in genLoad()
855 !codegen.options.enableSIMDIndex32) in genLoad()
857 loc, vectorType(codegen, builder.getI64Type()), vload); in genLoad()
876 static Value genInvariantValue(Merger &merger, CodeGen &codegen, in genInvariantValue() argument
879 if (codegen.curVecLength > 1) in genInvariantValue()
880 return genVectorInvariantValue(codegen, builder, val); in genInvariantValue()
885 static Value genAddress(CodeGen &codegen, OpBuilder &builder, Location loc, in genAddress() argument
891 mul = genVectorInvariantValue(codegen, builder, inv); in genAddress()
897 static Value genIndexValue(CodeGen &codegen, OpBuilder &builder, unsigned idx, in genIndexValue() argument
899 Value ival = codegen.loops[idx]; in genIndexValue()
904 unsigned vl = codegen.curVecLength; in genIndexValue()
907 VectorType vtp = vectorType(codegen, itype); in genIndexValue()
912 Type stepvty = vectorType(codegen, builder.getI64Type()); in genIndexValue()
933 static Value relinkBranch(CodeGen &codegen, RewriterBase &rewriter, in relinkBranch() argument
937 return genIndexValue(codegen, rewriter, indexOp.dim(), ldx); in relinkBranch()
941 i, relinkBranch(codegen, rewriter, block, def->getOperand(i), ldx)); in relinkBranch()
948 static Value genExp(Merger &merger, CodeGen &codegen, RewriterBase &rewriter, in genExp() argument
954 return genTensorLoad(merger, codegen, rewriter, op, exp); in genExp()
956 return genInvariantValue(merger, codegen, rewriter, exp); in genExp()
958 return genIndexValue(codegen, rewriter, merger.exp(exp).index, ldx); in genExp()
960 genExp(merger, codegen, rewriter, op, merger.exp(exp).children.e0, ldx); in genExp()
962 genExp(merger, codegen, rewriter, op, merger.exp(exp).children.e1, ldx); in genExp()
967 ee = relinkBranch(codegen, rewriter, ee.getParentBlock(), ee, ldx); in genExp()
972 static bool isInvariantAffine(const CodeGen &codegen, AffineExpr a, in isInvariantAffine() argument
979 return codegen.loops[idx] != nullptr; // no longer in play? in isInvariantAffine()
984 return isInvariantAffine(codegen, binOp.getLHS(), ldx, atLevel) && in isInvariantAffine()
985 isInvariantAffine(codegen, binOp.getRHS(), ldx, atLevel); in isInvariantAffine()
993 static void genInvariants(Merger &merger, CodeGen &codegen, OpBuilder &builder, in genInvariants() argument
1006 if (!isInvariantAffine(codegen, a, ldx, atLevel)) in genInvariants()
1016 Value load = genTensorLoad(merger, codegen, builder, op, exp); in genInvariants()
1017 codegen.redKind = getReduction(last); in genInvariants()
1018 codegen.redExp = exp; in genInvariants()
1019 updateReduc(merger, codegen, load); in genInvariants()
1021 Value redVal = codegen.redVal; in genInvariants()
1022 updateReduc(merger, codegen, Value()); in genInvariants()
1023 codegen.redExp = -1u; in genInvariants()
1024 codegen.redKind = kNoReduc; in genInvariants()
1025 genTensorStore(merger, codegen, builder, op, exp, redVal); in genInvariants()
1030 atStart ? genTensorLoad(merger, codegen, builder, op, exp) : Value(); in genInvariants()
1040 genInvariants(merger, codegen, builder, op, e0, ldx, atStart, last); in genInvariants()
1041 genInvariants(merger, codegen, builder, op, e1, ldx, atStart, last); in genInvariants()
1046 static void genExpansion(Merger &merger, CodeGen &codegen, OpBuilder &builder, in genExpansion() argument
1048 OpOperand *lhs = codegen.sparseOut; in genExpansion()
1049 if (!lhs || codegen.outerParNest != op.getRank(lhs) - 1 || in genExpansion()
1050 at != codegen.outerParNest) in genExpansion()
1065 assert(!codegen.expValues); in genExpansion()
1066 codegen.expValues = res.getResult(0); in genExpansion()
1067 codegen.expFilled = res.getResult(1); in genExpansion()
1068 codegen.expAdded = res.getResult(2); in genExpansion()
1069 codegen.expCount = res.getResult(3); in genExpansion()
1071 assert(codegen.expValues); in genExpansion()
1072 builder.create<CompressOp>(loc, tensor, codegen.lexIdx, codegen.expValues, in genExpansion()
1073 codegen.expFilled, codegen.expAdded, in genExpansion()
1074 codegen.expCount); in genExpansion()
1075 codegen.expValues = codegen.expFilled = codegen.expAdded = in genExpansion()
1076 codegen.expCount = Value(); in genExpansion()
1083 static bool genInit(Merger &merger, CodeGen &codegen, OpBuilder &builder, in genInit() argument
1099 if (codegen.pidxs[tensor][topSort[pat - 1]]) in genInit()
1102 Value ptr = codegen.pointers[tensor][idx]; in genInit()
1105 : codegen.pidxs[tensor][topSort[pat - 1]]; in genInit()
1106 codegen.pidxs[tensor][idx] = genLoad(codegen, builder, loc, ptr, p0); in genInit()
1108 codegen.highs[tensor][idx] = genLoad(codegen, builder, loc, ptr, p1); in genInit()
1117 codegen.loops[idx] = constantIndex(builder, loc, 0); in genInit()
1124 static bool isVectorFor(CodeGen &codegen, bool isInner, bool isReduction, in isVectorFor() argument
1127 if (codegen.sparseOut && !isReduction) in isVectorFor()
1130 switch (codegen.options.vectorizationStrategy) { in isVectorFor()
1144 static bool isParallelFor(CodeGen &codegen, bool isOuter, bool isReduction, in isParallelFor() argument
1147 if (codegen.sparseOut) in isParallelFor()
1150 switch (codegen.options.parallelizationStrategy) { in isParallelFor()
1191 static Operation *genFor(Merger &merger, CodeGen &codegen, OpBuilder &builder, in genFor() argument
1200 bool isVector = isVectorFor(codegen, isInner, isReduction, isSparse) && in genFor()
1203 isParallelFor(codegen, isOuter, isReduction, isSparse, isVector); in genFor()
1207 codegen.curVecLength = codegen.options.vectorLength; in genFor()
1211 Value lo = isSparse ? codegen.pidxs[tensor][idx] : codegen.loops[idx]; in genFor()
1212 Value hi = isSparse ? codegen.highs[tensor][idx] : codegen.sizes[idx]; in genFor()
1213 Value step = constantIndex(builder, loc, codegen.curVecLength); in genFor()
1214 if (isVector && codegen.options.enableVLAVectorization) { in genFor()
1225 codegen.pidxs[tensor][idx] = parOp.getInductionVars()[0]; in genFor()
1227 codegen.loops[idx] = parOp.getInductionVars()[0]; in genFor()
1234 if (codegen.redVal) { in genFor()
1236 if (isVector && !codegen.redVal.getType().isa<VectorType>()) { in genFor()
1237 VectorType vtp = vectorType(codegen, codegen.redVal.getType()); in genFor()
1238 Value vred = genVectorReducInit(codegen, builder, loc, vtp); in genFor()
1239 updateReduc(merger, codegen, vred); in genFor()
1241 operands.push_back(codegen.redVal); in genFor()
1243 if (codegen.expValues) in genFor()
1244 operands.push_back(codegen.expCount); in genFor()
1246 if (codegen.redVal) in genFor()
1247 updateReduc(merger, codegen, forOp.getRegionIterArgs().front()); in genFor()
1248 if (codegen.expValues) in genFor()
1249 codegen.expCount = forOp.getRegionIterArgs().back(); in genFor()
1253 codegen.pidxs[tensor][idx] = iv; in genFor()
1255 codegen.loops[idx] = iv; in genFor()
1259 codegen.curVecMask = genVectorMask(codegen, builder, iv, lo, hi, step); in genFor()
1264 static Operation *genWhile(Merger &merger, CodeGen &codegen, OpBuilder &builder, in genWhile() argument
1276 operands.push_back(codegen.pidxs[tensor][idx]); in genWhile()
1279 if (codegen.redVal) { in genWhile()
1280 types.push_back(codegen.redVal.getType()); in genWhile()
1281 operands.push_back(codegen.redVal); in genWhile()
1283 if (codegen.expValues) { in genWhile()
1285 operands.push_back(codegen.expCount); in genWhile()
1289 operands.push_back(codegen.loops[idx]); in genWhile()
1309 Value op2 = codegen.highs[tensor][idx]; in genWhile()
1313 codegen.pidxs[tensor][idx] = after->getArgument(o++); in genWhile()
1316 if (codegen.redVal) in genWhile()
1317 updateReduc(merger, codegen, after->getArgument(o++)); in genWhile()
1318 if (codegen.expValues) in genWhile()
1319 codegen.expCount = after->getArgument(o++); in genWhile()
1321 codegen.loops[idx] = after->getArgument(o++); in genWhile()
1330 static Operation *genLoop(Merger &merger, CodeGen &codegen, OpBuilder &builder, in genLoop() argument
1337 return genFor(merger, codegen, builder, op, isOuter, isInner, idx, indices); in genLoop()
1339 return genWhile(merger, codegen, builder, op, idx, needsUniv, indices); in genLoop()
1344 static void genLocals(Merger &merger, CodeGen &codegen, OpBuilder &builder, in genLocals() argument
1356 Value ptr = codegen.indices[tensor][idx]; in genLocals()
1357 Value s = codegen.pidxs[tensor][idx]; in genLocals()
1358 Value load = genLoad(codegen, builder, loc, ptr, s); in genLocals()
1359 codegen.idxs[tensor][idx] = load; in genLocals()
1375 codegen.loops[idx] = min; in genLocals()
1388 if (codegen.pidxs[tensor][topSort[pat - 1]]) in genLocals()
1391 : codegen.pidxs[tensor][topSort[pat - 1]]; in genLocals()
1392 codegen.pidxs[tensor][idx] = genAddress( in genLocals()
1393 codegen, builder, loc, codegen.sizes[idx], p, codegen.loops[idx]); in genLocals()
1399 if (codegen.sparseOut && !codegen.expValues) { in genLocals()
1401 builder.create<memref::StoreOp>(loc, codegen.loops[idx], codegen.lexIdx, in genLocals()
1407 static void genWhileInduction(Merger &merger, CodeGen &codegen, in genWhileInduction() argument
1413 if (codegen.redVal || codegen.expValues) { in genWhileInduction()
1418 if (codegen.redVal) { in genWhileInduction()
1419 yields.push_back(codegen.redVal); in genWhileInduction()
1420 updateReduc(merger, codegen, ifOp.getResult(y++)); in genWhileInduction()
1422 if (codegen.expValues) { in genWhileInduction()
1423 yields.push_back(codegen.expCount); in genWhileInduction()
1424 codegen.expCount = ifOp->getResult(y++); in genWhileInduction()
1444 Value op1 = codegen.idxs[tensor][idx]; in genWhileInduction()
1445 Value op2 = codegen.loops[idx]; in genWhileInduction()
1446 Value op3 = codegen.pidxs[tensor][idx]; in genWhileInduction()
1451 codegen.pidxs[tensor][idx] = whileOp->getResult(o++); in genWhileInduction()
1454 if (codegen.redVal) { in genWhileInduction()
1455 operands.push_back(codegen.redVal); in genWhileInduction()
1456 updateReduc(merger, codegen, whileOp->getResult(o++)); in genWhileInduction()
1458 if (codegen.expValues) { in genWhileInduction()
1459 operands.push_back(codegen.expCount); in genWhileInduction()
1460 codegen.expCount = whileOp->getResult(o++); in genWhileInduction()
1464 builder.create<arith::AddIOp>(loc, codegen.loops[idx], one)); in genWhileInduction()
1465 codegen.loops[idx] = whileOp->getResult(o++); in genWhileInduction()
1473 static void genForInduction(Merger &merger, CodeGen &codegen, in genForInduction() argument
1479 if (codegen.redVal) { in genForInduction()
1480 operands.push_back(codegen.redVal); in genForInduction()
1481 updateReduc(merger, codegen, loop->getResult(o++)); in genForInduction()
1483 if (codegen.expValues) { in genForInduction()
1484 operands.push_back(codegen.expCount); in genForInduction()
1485 codegen.expCount = loop->getResult(o++); in genForInduction()
1494 static scf::IfOp genIf(Merger &merger, CodeGen &codegen, OpBuilder &builder, in genIf() argument
1506 Value op1 = codegen.idxs[tensor][idx]; in genIf()
1507 Value op2 = codegen.loops[idx]; in genIf()
1516 if (codegen.redVal) in genIf()
1517 types.push_back(codegen.redVal.getType()); in genIf()
1518 if (codegen.expValues) in genIf()
1526 static void endIf(Merger &merger, CodeGen &codegen, OpBuilder &builder, in endIf() argument
1530 if (codegen.redVal) { in endIf()
1531 operands.push_back(codegen.redVal); in endIf()
1532 updateReduc(merger, codegen, redInput); in endIf()
1534 if (codegen.expValues) { in endIf()
1535 operands.push_back(codegen.expCount); in endIf()
1536 codegen.expCount = cntInput; in endIf()
1549 static bool startLoopSeq(Merger &merger, CodeGen &codegen, OpBuilder &builder, in startLoopSeq() argument
1553 assert(codegen.curVecLength == 1); in startLoopSeq()
1554 assert(!codegen.loops[idx]); in startLoopSeq()
1556 genInvariants(merger, codegen, builder, op, exp, ldx, /*atStart=*/true); in startLoopSeq()
1558 genExpansion(merger, codegen, builder, op, at, /*atStart=*/true); in startLoopSeq()
1562 genInit(merger, codegen, builder, op, topSort, at, merger.lat(l0).bits); in startLoopSeq()
1577 static Operation *startLoop(Merger &merger, CodeGen &codegen, in startLoop() argument
1581 assert(codegen.curVecLength == 1); in startLoop()
1583 Operation *loop = genLoop(merger, codegen, builder, op, topSort, at, in startLoop()
1586 genLocals(merger, codegen, builder, op, topSort, at, needsUniv, in startLoop()
1592 static bool endLoop(Merger &merger, CodeGen &codegen, OpBuilder &builder, in endLoop() argument
1595 codegen.curVecLength = 1; in endLoop()
1598 genWhileInduction(merger, codegen, builder, op, idx, needsUniv, in endLoop()
1603 genForInduction(merger, codegen, builder, op, loop); in endLoop()
1608 static void endLoopSeq(Merger &merger, CodeGen &codegen, OpBuilder &builder, in endLoopSeq() argument
1611 assert(codegen.curVecLength == 1); in endLoopSeq()
1612 codegen.loops[idx] = Value(); in endLoopSeq()
1614 if (codegen.redVal) in endLoopSeq()
1615 if (auto vtp = codegen.redVal.getType().dyn_cast<VectorType>()) in endLoopSeq()
1616 updateReduc(merger, codegen, in endLoopSeq()
1617 genVectorReducEnd(codegen, builder, op.getLoc(), vtp)); in endLoopSeq()
1619 genInvariants(merger, codegen, builder, op, exp, ldx, /*atStart=*/false); in endLoopSeq()
1621 genExpansion(merger, codegen, builder, op, at, /*atStart=*/false); in endLoopSeq()
1627 static void genStmt(Merger &merger, CodeGen &codegen, RewriterBase &rewriter, in genStmt() argument
1633 Value rhs = genExp(merger, codegen, rewriter, op, exp, ldx); in genStmt()
1634 genTensorStore(merger, codegen, rewriter, op, exp, rhs); in genStmt()
1644 bool needsUniv = startLoopSeq(merger, codegen, rewriter, op, topSort, exp, at, in genStmt()
1653 startLoop(merger, codegen, rewriter, op, topSort, at, li, needsUniv); in genStmt()
1657 Value redInput = codegen.redVal; in genStmt()
1658 Value cntInput = codegen.expCount; in genStmt()
1667 genIf(merger, codegen, rewriter, op, idx, merger.lat(lj).simple); in genStmt()
1668 genStmt(merger, codegen, rewriter, op, topSort, ej, at + 1); in genStmt()
1669 endIf(merger, codegen, rewriter, op, ifOp, loop, redInput, cntInput); in genStmt()
1671 genStmt(merger, codegen, rewriter, op, topSort, ej, at + 1); in genStmt()
1678 endLoop(merger, codegen, rewriter, op, loop, idx, li, needsUniv); in genStmt()
1682 endLoopSeq(merger, codegen, rewriter, op, exp, at, idx, ldx); in genStmt()
1686 static void genResult(Merger &merger, CodeGen &codegen, RewriterBase &rewriter, in genResult() argument
1694 codegen.sparseOut == lhs); in genResult()
1698 Value val = codegen.buffers.back(); // value array in genResult()
1754 CodeGen codegen(options, numTensors, numLoops, sparseOut, outerParNest); in matchAndRewrite() local
1755 genBuffers(merger, codegen, rewriter, op); in matchAndRewrite()
1756 genStmt(merger, codegen, rewriter, op, topSort, exp, 0); in matchAndRewrite()
1757 genResult(merger, codegen, rewriter, op); in matchAndRewrite()