Lines Matching defs:linalgOp

191   LogicalResult initState(RewriterBase &rewriter, LinalgOp linalgOp,
229 maskOperation(RewriterBase &rewriter, Operation *opToMask, LinalgOp linalgOp,
235 void initIterSpaceStaticSizes(LinalgOp linalgOp) {
236 iterSpaceStaticSizes.append(linalgOp.getStaticLoopRanges());
243 LinalgOp linalgOp);
250 LinalgOp linalgOp,
306 LinalgOp linalgOp) {
312 linalgOp.getLoc(), iterSpaceStaticSizes[vecDim]));
320 if (failed(linalgOp.mapIterationSpaceDimToOperandDim(vecDim, operand,
324 Value dynamicDim = linalgOp.hasPureTensorSemantics()
326 linalgOp.getLoc(), operand, operandDimPos)
328 linalgOp.getLoc(), operand, operandDimPos);
339 VectorizationState::initState(RewriterBase &rewriter, LinalgOp linalgOp,
343 rewriter.setInsertionPoint(linalgOp);
356 canonicalVecShape = linalgOp.getStaticLoopRanges();
357 scalableVecDims.append(linalgOp.getNumLoops(), false);
371 initIterSpaceStaticSizes(linalgOp);
376 if (failed(precomputeIterSpaceValueSizes(rewriter, linalgOp)))
387 RewriterBase &rewriter, Operation *opToMask, LinalgOp linalgOp,
407 linalgOp.getNumLoops(), rewriter.getContext());
448 Value mask = rewriter.create<vector::CreateMaskOp>(linalgOp.getLoc(),
457 LinalgOp linalgOp,
467 getOrCreateMaskFor(rewriter, opToMask, linalgOp, maybeMaskingMap);
576 auto linalgOp = cast<LinalgOp>(outputOperand->getOwner());
578 outputOperand->getOperandNumber() - linalgOp.getNumDpsInputs();
581 if (!matchReduction(linalgOp.getRegionOutputArgs(), outputPos, combinerOps) ||
617 static SmallVector<bool> getDimsToReduce(LinalgOp linalgOp) {
619 llvm::map_range(linalgOp.getIteratorTypesArray(), isReductionIterator));
640 auto linalgOp = cast<LinalgOp>(outputOperand->getOwner());
641 AffineMap opOperandMap = linalgOp.getMatchingIndexingMap(outputOperand);
658 SmallVector<Value> indices(linalgOp.getRank(outputOperand),
673 write = state.maskOperation(rewriter, write, linalgOp, opOperandMap);
701 /// Helper function to vectorize the terminator of a `linalgOp`. New result
711 LinalgOp linalgOp, SmallVectorImpl<Value> &newResults) {
721 linalgOp.getDpsInitOperand(output.index()), state);
729 /// Helper function to vectorize the index operations of a `linalgOp`. Return
736 LinalgOp linalgOp) {
761 AffineMap::getPermutationMap(permPattern, linalgOp.getContext());
767 llvm::to_vector<16>(llvm::seq<int64_t>(0, linalgOp.getNumLoops()));
844 /// Find the index of the trailing non-unit dim in linalgOp. This hook is used
859 static uint64_t getTrailingNonUnitLoopDimIdx(LinalgOp linalgOp) {
860 SmallVector<int64_t> loopRanges = linalgOp.getStaticLoopRanges();
862 (linalgOp.hasDynamicShape() ||
877 static bool isLoopInvariantIdx(LinalgOp &linalgOp, Value &val,
888 auto *block = linalgOp.getBlock();
900 return linalgOp.getStaticLoopRanges()[indexOp.getDim()] == 1;
905 // Values define outside `linalgOp` are loop invariant.
909 // Values defined inside `linalgOp`, which are constant, are loop invariant.
915 result &= isLoopInvariantIdx(linalgOp, op, resType);
937 static bool isContiguousLoadIdx(LinalgOp &linalgOp, Value &val,
948 auto *block = linalgOp.getBlock();
957 auto loopDimThatIncrementsByOne = getTrailingNonUnitLoopDimIdx(linalgOp);
975 result |= isContiguousLoadIdx(linalgOp, op, foundIndexOp, resType);
994 LinalgOp &linalgOp, VectorType resType) {
1024 leadingIdxsLoopInvariant &= isLoopInvariantIdx(linalgOp, indexVal, resType);
1041 isLoopInvariantIdx(linalgOp, extractOpTrailingIdx, resType)) {
1052 bool isContiguousLoad = isContiguousLoadIdx(linalgOp, extractOpTrailingIdx,
1075 Operation *op, LinalgOp linalgOp, const IRMapping &bvm) {
1097 getTensorExtractMemoryAccessPattern(extractOp, linalgOp, resultType);
1107 gatherOp = state.maskOperation(rewriter, gatherOp, linalgOp);
1207 static Operation *reduceIfNeeded(OpBuilder &b, LinalgOp linalgOp, Operation *op,
1219 SmallVector<bool> dimsToMask = getDimsToReduce(linalgOp);
1244 LinalgOp linalgOp, Operation *op, const IRMapping &bvm,
1259 // Clone so that the constant is not confined to the linalgOp block .
1271 if (!blockArg || blockArg.getOwner() != linalgOp.getBlock() ||
1272 blockArg.getArgNumber() < linalgOp.getNumDpsInputs())
1276 linalgOp.getRegionOutputArgs(),
1277 blockArg.getArgNumber() - linalgOp.getNumDpsInputs(), reductionOps);
1285 reduceIfNeeded(rewriter, linalgOp, op, reductionOperands[0].first,
1334 /// Generic vectorization function that rewrites the body of a `linalgOp` into
1336 /// 1. Verify the `linalgOp` has one non-empty region.
1348 /// performed to the maximal common vector size implied by the `linalgOp`
1358 LinalgOp linalgOp,
1361 Block *block = linalgOp.getBlock();
1367 mlir::getUsedValuesDefinedAbove(linalgOp->getRegion(0), valuesSet);
1370 if (linalgOp.getNumDpsInits() == 0)
1374 Location loc = linalgOp.getLoc();
1376 for (OpOperand *opOperand : linalgOp.getOpOperandsMatchingBBargs()) {
1377 BlockArgument bbarg = linalgOp.getMatchingBlockArgument(opOperand);
1378 if (linalgOp.isScalar(opOperand)) {
1385 AffineMap indexingMap = linalgOp.getMatchingIndexingMap(opOperand);
1390 if (linalgOp.isDpsInput(opOperand)) {
1403 SmallVector<Value> indices(linalgOp.getShape(opOperand).size(), zero);
1407 read = state.maskOperation(rewriter, read, linalgOp, indexingMap);
1434 return vectorizeLinalgYield(rewriter, op, bvm, state, linalgOp, newResults);
1441 return vectorizeLinalgIndex(rewriter, state, op, linalgOp);
1448 return vectorizeTensorExtract(rewriter, state, op, linalgOp, bvm);
1455 vectorizeOneOp(rewriter, state, linalgOp, &op, bvm, hooks);
1462 state.maskOperation(rewriter, result.newOp, linalgOp);
1878 LinalgOp linalgOp, ArrayRef<int64_t> inputVectorSizes,
1881 if (llvm::is_contained(linalgOp.getStaticShape(), 0))
1885 failed(vector::isValidMaskedInputVector(linalgOp.getStaticLoopRanges(),
1889 if (linalgOp.hasDynamicShape() && failed(vectorizeDynamicLinalgOpPrecondition(
1890 linalgOp, flatten1DDepthwiseConv))) {
1901 for (Operation &innerOp : linalgOp->getRegion(0).front()) {
1922 if (isElementwise(linalgOp))
1928 if (isa<ConvolutionOpInterface>(linalgOp.getOperation()))
1933 if (!allIndexingsAreProjectedPermutation(linalgOp)) {
1937 if (failed(reductionPreconditions(linalgOp))) {
2017 auto linalgOp = dyn_cast<LinalgOp>(op);
2021 if (!linalgOp)
2046 auto iterators = linalgOp.getIteratorTypesArray();
2109 if (linalgOp.hasUserDefinedMaps())
2114 return success(isElementwise(linalgOp) || isa<linalg::MatmulOp>(op) ||
2117 isa<linalg::MatvecOp>(op) || hasReductionIterator(linalgOp));
2133 .Case<linalg::LinalgOp>([&](auto linalgOp) {
2134 return vectorizeLinalgOpPrecondition(linalgOp, inputVectorSizes,
2151 static void convertAffineApply(RewriterBase &rewriter, LinalgOp linalgOp) {
2153 auto toReplace = linalgOp.getBlock()->getOps<affine::AffineApplyOp>();
2198 if (auto linalgOp = dyn_cast<linalg::LinalgOp>(op)) {
2199 if (failed(state.initState(rewriter, linalgOp, inputVectorSizes,
2209 .Case<linalg::LinalgOp>([&](auto linalgOp) {
2213 if (isa<ConvolutionOpInterface>(linalgOp.getOperation())) {
2215 rewriter, linalgOp, inputVectorSizes, inputScalableVecDims,
2230 convertAffineApply(rewriter, linalgOp);
2237 return vectorizeAsLinalgGeneric(rewriter, state, linalgOp, results);
3053 Conv1DGenerator(RewriterBase &rewriter, LinalgOp linalgOp, int strideW,
3055 : StructuredGenerator<LinalgOp, utils::IteratorType>(rewriter, linalgOp),
3057 // Determine whether `linalgOp` can be generated with this generator
3058 if (linalgOp.getNumDpsInputs() != 2 || linalgOp.getNumDpsInits() != 1)
3060 lhsShaped = linalgOp.getDpsInputOperand(0)->get();
3061 rhsShaped = linalgOp.getDpsInputOperand(1)->get();
3062 resShaped = linalgOp.getDpsInitOperand(0)->get();
3074 Operation *reduceOp = matchLinalgReduction(linalgOp.getDpsInitOperand(0));