1 //===- SparseBufferRewriting.cpp - Sparse buffer rewriting rules ----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements rewriting rules that are specific to sparse tensor 10 // primitives with memref operands. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodegenUtils.h" 15 16 #include "mlir/Dialect/Arith/IR/Arith.h" 17 #include "mlir/Dialect/Func/IR/FuncOps.h" 18 #include "mlir/Dialect/Linalg/IR/Linalg.h" 19 #include "mlir/Dialect/MemRef/IR/MemRef.h" 20 #include "mlir/Dialect/SCF/IR/SCF.h" 21 #include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" 22 #include "mlir/Dialect/SparseTensor/Transforms/Passes.h" 23 #include "mlir/Support/LLVM.h" 24 25 using namespace mlir; 26 using namespace mlir::sparse_tensor; 27 28 //===---------------------------------------------------------------------===// 29 // Helper methods for the actual rewriting rules. 30 //===---------------------------------------------------------------------===// 31 32 static constexpr uint64_t loIdx = 0; 33 static constexpr uint64_t hiIdx = 1; 34 static constexpr uint64_t xStartIdx = 2; 35 36 static constexpr const char kLessThanFuncNamePrefix[] = "_sparse_less_than_"; 37 static constexpr const char kCompareEqFuncNamePrefix[] = "_sparse_compare_eq_"; 38 static constexpr const char kPartitionFuncNamePrefix[] = "_sparse_partition_"; 39 static constexpr const char kBinarySearchFuncNamePrefix[] = 40 "_sparse_binary_search_"; 41 static constexpr const char kSortNonstableFuncNamePrefix[] = 42 "_sparse_sort_nonstable_"; 43 static constexpr const char kSortStableFuncNamePrefix[] = 44 "_sparse_sort_stable_"; 45 46 using FuncGeneratorType = function_ref<void(OpBuilder &, ModuleOp, func::FuncOp, 47 uint64_t, uint64_t, bool)>; 48 49 /// Constructs a function name with this format to facilitate quick sort: 50 /// <namePrefix><nx>_<x type>_<y0 type>..._<yn type> for sort 51 /// <namePrefix><nx>_<x type>_coo_<ny>_<y0 type>..._<yn type> for sort_coo 52 static void getMangledSortHelperFuncName(llvm::raw_svector_ostream &nameOstream, 53 StringRef namePrefix, uint64_t nx, 54 uint64_t ny, bool isCoo, 55 ValueRange operands) { 56 nameOstream << namePrefix << nx << "_" 57 << getMemRefType(operands[xStartIdx]).getElementType(); 58 59 if (isCoo) 60 nameOstream << "_coo_" << ny; 61 62 uint64_t yBufferOffset = isCoo ? 1 : nx; 63 for (Value v : operands.drop_front(xStartIdx + yBufferOffset)) 64 nameOstream << "_" << getMemRefType(v).getElementType(); 65 } 66 67 /// Looks up a function that is appropriate for the given operands being 68 /// sorted, and creates such a function if it doesn't exist yet. The 69 /// parameters `nx` and `ny` tell the number of x and y values provided 70 /// by the buffer in xStartIdx, and `isCoo` indicates whether the instruction 71 /// being processed is a sparse_tensor.sort or sparse_tensor.sort_coo. 72 static FlatSymbolRefAttr 73 getMangledSortHelperFunc(OpBuilder &builder, func::FuncOp insertPoint, 74 TypeRange resultTypes, StringRef namePrefix, 75 uint64_t nx, uint64_t ny, bool isCoo, 76 ValueRange operands, FuncGeneratorType createFunc) { 77 SmallString<32> nameBuffer; 78 llvm::raw_svector_ostream nameOstream(nameBuffer); 79 getMangledSortHelperFuncName(nameOstream, namePrefix, nx, ny, isCoo, 80 operands); 81 82 ModuleOp module = insertPoint->getParentOfType<ModuleOp>(); 83 MLIRContext *context = module.getContext(); 84 auto result = SymbolRefAttr::get(context, nameOstream.str()); 85 auto func = module.lookupSymbol<func::FuncOp>(result.getAttr()); 86 87 if (!func) { 88 // Create the function. 89 OpBuilder::InsertionGuard insertionGuard(builder); 90 builder.setInsertionPoint(insertPoint); 91 Location loc = insertPoint.getLoc(); 92 func = builder.create<func::FuncOp>( 93 loc, nameOstream.str(), 94 FunctionType::get(context, operands.getTypes(), resultTypes)); 95 func.setPrivate(); 96 createFunc(builder, module, func, nx, ny, isCoo); 97 } 98 99 return result; 100 } 101 102 /// Creates a code block to process each pair of (xs[i], xs[j]) for sorting. 103 /// The code to process the value pairs is generated by `bodyBuilder`. 104 static void forEachIJPairInXs( 105 OpBuilder &builder, Location loc, ValueRange args, uint64_t nx, uint64_t ny, 106 bool isCoo, function_ref<void(uint64_t, Value, Value, Value)> bodyBuilder) { 107 Value iOffset, jOffset; 108 if (isCoo) { 109 Value cstep = constantIndex(builder, loc, nx + ny); 110 iOffset = builder.create<arith::MulIOp>(loc, args[0], cstep); 111 jOffset = builder.create<arith::MulIOp>(loc, args[1], cstep); 112 } 113 for (uint64_t k = 0; k < nx; k++) { 114 scf::IfOp ifOp; 115 Value i, j, buffer; 116 if (isCoo) { 117 Value ck = constantIndex(builder, loc, k); 118 i = builder.create<arith::AddIOp>(loc, ck, iOffset); 119 j = builder.create<arith::AddIOp>(loc, ck, jOffset); 120 buffer = args[xStartIdx]; 121 } else { 122 i = args[0]; 123 j = args[1]; 124 buffer = args[xStartIdx + k]; 125 } 126 bodyBuilder(k, i, j, buffer); 127 } 128 } 129 130 /// Creates a code block to process each pair of (xys[i], xys[j]) for sorting. 131 /// The code to process the value pairs is generated by `bodyBuilder`. 132 static void forEachIJPairInAllBuffers( 133 OpBuilder &builder, Location loc, ValueRange args, uint64_t nx, uint64_t ny, 134 bool isCoo, function_ref<void(uint64_t, Value, Value, Value)> bodyBuilder) { 135 136 // Create code for the first (nx + ny) buffers. When isCoo==true, these 137 // logical buffers are all from the xy buffer of the sort_coo operator. 138 forEachIJPairInXs(builder, loc, args, nx + ny, 0, isCoo, bodyBuilder); 139 140 uint64_t numHandledBuffers = isCoo ? 1 : nx + ny; 141 142 // Create code for the remaining buffers. 143 Value i = args[0]; 144 Value j = args[1]; 145 for (const auto &arg : 146 llvm::enumerate(args.drop_front(xStartIdx + numHandledBuffers))) { 147 bodyBuilder(arg.index() + nx + ny, i, j, arg.value()); 148 } 149 } 150 151 /// Creates a code block for swapping the values in index i and j for all the 152 /// buffers. 153 // 154 // The generated IR corresponds to this C like algorithm: 155 // swap(x0[i], x0[j]); 156 // swap(x1[i], x1[j]); 157 // ... 158 // swap(xn[i], xn[j]); 159 // swap(y0[i], y0[j]); 160 // ... 161 // swap(yn[i], yn[j]); 162 static void createSwap(OpBuilder &builder, Location loc, ValueRange args, 163 uint64_t nx, uint64_t ny, bool isCoo) { 164 auto swapOnePair = [&](uint64_t unused, Value i, Value j, Value buffer) { 165 Value vi = builder.create<memref::LoadOp>(loc, buffer, i); 166 Value vj = builder.create<memref::LoadOp>(loc, buffer, j); 167 builder.create<memref::StoreOp>(loc, vj, buffer, i); 168 builder.create<memref::StoreOp>(loc, vi, buffer, j); 169 }; 170 171 forEachIJPairInAllBuffers(builder, loc, args, nx, ny, isCoo, swapOnePair); 172 } 173 174 /// Creates a function to compare all the (xs[i], xs[j]) pairs. The method to 175 /// compare each pair is create via `compareBuilder`. 176 static void createCompareFuncImplementation( 177 OpBuilder &builder, ModuleOp unused, func::FuncOp func, uint64_t nx, 178 uint64_t ny, bool isCoo, 179 function_ref<scf::IfOp(OpBuilder &, Location, Value, Value, Value, bool)> 180 compareBuilder) { 181 OpBuilder::InsertionGuard insertionGuard(builder); 182 183 Block *entryBlock = func.addEntryBlock(); 184 builder.setInsertionPointToStart(entryBlock); 185 Location loc = func.getLoc(); 186 ValueRange args = entryBlock->getArguments(); 187 188 scf::IfOp topIfOp; 189 auto bodyBuilder = [&](uint64_t k, Value i, Value j, Value buffer) { 190 scf::IfOp ifOp = compareBuilder(builder, loc, i, j, buffer, (k == nx - 1)); 191 if (k == 0) { 192 topIfOp = ifOp; 193 } else { 194 OpBuilder::InsertionGuard insertionGuard(builder); 195 builder.setInsertionPointAfter(ifOp); 196 builder.create<scf::YieldOp>(loc, ifOp.getResult(0)); 197 } 198 }; 199 200 forEachIJPairInXs(builder, loc, args, nx, ny, isCoo, bodyBuilder); 201 202 builder.setInsertionPointAfter(topIfOp); 203 builder.create<func::ReturnOp>(loc, topIfOp.getResult(0)); 204 } 205 206 /// Generates an if-statement to compare whether x[i] is equal to x[j]. 207 static scf::IfOp createEqCompare(OpBuilder &builder, Location loc, Value i, 208 Value j, Value x, bool isLastDim) { 209 Value f = constantI1(builder, loc, false); 210 Value t = constantI1(builder, loc, true); 211 Value vi = builder.create<memref::LoadOp>(loc, x, i); 212 Value vj = builder.create<memref::LoadOp>(loc, x, j); 213 214 Value cond = 215 builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::eq, vi, vj); 216 scf::IfOp ifOp = 217 builder.create<scf::IfOp>(loc, f.getType(), cond, /*else=*/true); 218 219 // x[1] != x[j]: 220 builder.setInsertionPointToStart(&ifOp.getElseRegion().front()); 221 builder.create<scf::YieldOp>(loc, f); 222 223 // x[i] == x[j]: 224 builder.setInsertionPointToStart(&ifOp.getThenRegion().front()); 225 if (isLastDim == 1) { 226 // Finish checking all dimensions. 227 builder.create<scf::YieldOp>(loc, t); 228 } 229 230 return ifOp; 231 } 232 233 /// Creates a function to compare whether xs[i] is equal to xs[j]. 234 // 235 // The generate IR corresponds to this C like algorithm: 236 // if (x0[i] != x0[j]) 237 // return false; 238 // else 239 // if (x1[i] != x1[j]) 240 // return false; 241 // else if (x2[2] != x2[j])) 242 // and so on ... 243 static void createEqCompareFunc(OpBuilder &builder, ModuleOp unused, 244 func::FuncOp func, uint64_t nx, uint64_t ny, 245 bool isCoo) { 246 createCompareFuncImplementation(builder, unused, func, nx, ny, isCoo, 247 createEqCompare); 248 } 249 250 /// Generates an if-statement to compare whether x[i] is less than x[j]. 251 static scf::IfOp createLessThanCompare(OpBuilder &builder, Location loc, 252 Value i, Value j, Value x, 253 bool isLastDim) { 254 Value f = constantI1(builder, loc, false); 255 Value t = constantI1(builder, loc, true); 256 Value vi = builder.create<memref::LoadOp>(loc, x, i); 257 Value vj = builder.create<memref::LoadOp>(loc, x, j); 258 259 Value cond = 260 builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ult, vi, vj); 261 scf::IfOp ifOp = 262 builder.create<scf::IfOp>(loc, f.getType(), cond, /*else=*/true); 263 // If (x[i] < x[j]). 264 builder.setInsertionPointToStart(&ifOp.getThenRegion().front()); 265 builder.create<scf::YieldOp>(loc, t); 266 267 builder.setInsertionPointToStart(&ifOp.getElseRegion().front()); 268 if (isLastDim == 1) { 269 // Finish checking all dimensions. 270 builder.create<scf::YieldOp>(loc, f); 271 } else { 272 cond = 273 builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ult, vj, vi); 274 scf::IfOp ifOp2 = 275 builder.create<scf::IfOp>(loc, f.getType(), cond, /*else=*/true); 276 // Otherwise if (x[j] < x[i]). 277 builder.setInsertionPointToStart(&ifOp2.getThenRegion().front()); 278 builder.create<scf::YieldOp>(loc, f); 279 280 // Otherwise check the remaining dimensions. 281 builder.setInsertionPointAfter(ifOp2); 282 builder.create<scf::YieldOp>(loc, ifOp2.getResult(0)); 283 // Set up the insertion point for the nested if-stmt that checks the 284 // remaining dimensions. 285 builder.setInsertionPointToStart(&ifOp2.getElseRegion().front()); 286 } 287 288 return ifOp; 289 } 290 291 /// Creates a function to compare whether xs[i] is less than xs[j]. 292 // 293 // The generate IR corresponds to this C like algorithm: 294 // if (x0[i] < x0[j]) 295 // return true; 296 // else if (x0[j] < x0[i]) 297 // return false; 298 // else 299 // if (x1[i] < x1[j]) 300 // return true; 301 // else if (x1[j] < x1[i])) 302 // and so on ... 303 static void createLessThanFunc(OpBuilder &builder, ModuleOp unused, 304 func::FuncOp func, uint64_t nx, uint64_t ny, 305 bool isCoo) { 306 createCompareFuncImplementation(builder, unused, func, nx, ny, isCoo, 307 createLessThanCompare); 308 } 309 310 /// Creates a function to use a binary search to find the insertion point for 311 /// inserting xs[hi] to the sorted values xs[lo..hi). 312 // 313 // The generate IR corresponds to this C like algorithm: 314 // p = hi 315 // while (lo < hi) 316 // mid = (lo + hi) >> 1 317 // if (xs[p] < xs[mid]) 318 // hi = mid 319 // else 320 // lo = mid - 1 321 // return lo; 322 // 323 static void createBinarySearchFunc(OpBuilder &builder, ModuleOp module, 324 func::FuncOp func, uint64_t nx, uint64_t ny, 325 bool isCoo) { 326 OpBuilder::InsertionGuard insertionGuard(builder); 327 Block *entryBlock = func.addEntryBlock(); 328 builder.setInsertionPointToStart(entryBlock); 329 330 Location loc = func.getLoc(); 331 ValueRange args = entryBlock->getArguments(); 332 Value p = args[hiIdx]; 333 SmallVector<Type, 2> types(2, p.getType()); // only two 334 scf::WhileOp whileOp = builder.create<scf::WhileOp>( 335 loc, types, SmallVector<Value, 2>{args[loIdx], args[hiIdx]}); 336 337 // The before-region of the WhileOp. 338 Block *before = 339 builder.createBlock(&whileOp.getBefore(), {}, types, {loc, loc}); 340 builder.setInsertionPointToEnd(before); 341 Value cond1 = builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ult, 342 before->getArgument(0), 343 before->getArgument(1)); 344 builder.create<scf::ConditionOp>(loc, cond1, before->getArguments()); 345 346 // The after-region of the WhileOp. 347 Block *after = 348 builder.createBlock(&whileOp.getAfter(), {}, types, {loc, loc}); 349 builder.setInsertionPointToEnd(after); 350 Value lo = after->getArgument(0); 351 Value hi = after->getArgument(1); 352 // Compute mid = (lo + hi) >> 1. 353 Value c1 = constantIndex(builder, loc, 1); 354 Value mid = builder.create<arith::ShRUIOp>( 355 loc, builder.create<arith::AddIOp>(loc, lo, hi), c1); 356 Value midp1 = builder.create<arith::AddIOp>(loc, mid, c1); 357 358 // Compare xs[p] < xs[mid]. 359 SmallVector<Value> compareOperands{p, mid}; 360 uint64_t numXBuffers = isCoo ? 1 : nx; 361 compareOperands.append(args.begin() + xStartIdx, 362 args.begin() + xStartIdx + numXBuffers); 363 Type i1Type = IntegerType::get(module.getContext(), 1, IntegerType::Signless); 364 FlatSymbolRefAttr lessThanFunc = getMangledSortHelperFunc( 365 builder, func, {i1Type}, kLessThanFuncNamePrefix, nx, ny, isCoo, 366 compareOperands, createLessThanFunc); 367 Value cond2 = builder 368 .create<func::CallOp>(loc, lessThanFunc, TypeRange{i1Type}, 369 compareOperands) 370 .getResult(0); 371 372 // Update lo and hi for the WhileOp as follows: 373 // if (xs[p] < xs[mid])) 374 // hi = mid; 375 // else 376 // lo = mid + 1; 377 Value newLo = builder.create<arith::SelectOp>(loc, cond2, lo, midp1); 378 Value newHi = builder.create<arith::SelectOp>(loc, cond2, mid, hi); 379 builder.create<scf::YieldOp>(loc, ValueRange{newLo, newHi}); 380 381 builder.setInsertionPointAfter(whileOp); 382 builder.create<func::ReturnOp>(loc, whileOp.getResult(0)); 383 } 384 385 /// Creates code to advance i in a loop based on xs[p] as follows: 386 /// while (xs[i] < xs[p]) i += step (step > 0) 387 /// or 388 /// while (xs[i] > xs[p]) i += step (step < 0) 389 /// The routine returns i as well as a boolean value to indicate whether 390 /// xs[i] == xs[p]. 391 static std::pair<Value, Value> 392 createScanLoop(OpBuilder &builder, ModuleOp module, func::FuncOp func, 393 ValueRange xs, Value i, Value p, uint64_t nx, uint64_t ny, 394 bool isCoo, int step) { 395 Location loc = func.getLoc(); 396 scf::WhileOp whileOp = 397 builder.create<scf::WhileOp>(loc, TypeRange{i.getType()}, ValueRange{i}); 398 399 Block *before = 400 builder.createBlock(&whileOp.getBefore(), {}, {i.getType()}, {loc}); 401 builder.setInsertionPointToEnd(before); 402 SmallVector<Value> compareOperands; 403 if (step > 0) { 404 compareOperands.push_back(before->getArgument(0)); 405 compareOperands.push_back(p); 406 } else { 407 assert(step < 0); 408 compareOperands.push_back(p); 409 compareOperands.push_back(before->getArgument(0)); 410 } 411 compareOperands.append(xs.begin(), xs.end()); 412 MLIRContext *context = module.getContext(); 413 Type i1Type = IntegerType::get(context, 1, IntegerType::Signless); 414 FlatSymbolRefAttr lessThanFunc = getMangledSortHelperFunc( 415 builder, func, {i1Type}, kLessThanFuncNamePrefix, nx, ny, isCoo, 416 compareOperands, createLessThanFunc); 417 Value cond = builder 418 .create<func::CallOp>(loc, lessThanFunc, TypeRange{i1Type}, 419 compareOperands) 420 .getResult(0); 421 builder.create<scf::ConditionOp>(loc, cond, before->getArguments()); 422 423 Block *after = 424 builder.createBlock(&whileOp.getAfter(), {}, {i.getType()}, {loc}); 425 builder.setInsertionPointToEnd(after); 426 Value cs = constantIndex(builder, loc, step); 427 i = builder.create<arith::AddIOp>(loc, after->getArgument(0), cs); 428 builder.create<scf::YieldOp>(loc, ValueRange{i}); 429 i = whileOp.getResult(0); 430 431 builder.setInsertionPointAfter(whileOp); 432 compareOperands[0] = i; 433 compareOperands[1] = p; 434 FlatSymbolRefAttr compareEqFunc = getMangledSortHelperFunc( 435 builder, func, {i1Type}, kCompareEqFuncNamePrefix, nx, ny, isCoo, 436 compareOperands, createEqCompareFunc); 437 Value compareEq = 438 builder 439 .create<func::CallOp>(loc, compareEqFunc, TypeRange{i1Type}, 440 compareOperands) 441 .getResult(0); 442 443 return std::make_pair(whileOp.getResult(0), compareEq); 444 } 445 446 /// Creates a function to perform quick sort partition on the values in the 447 /// range of index [lo, hi), assuming lo < hi. 448 // 449 // The generated IR corresponds to this C like algorithm: 450 // int partition(lo, hi, xs) { 451 // p = (lo+hi)/2 // pivot index 452 // i = lo 453 // j = hi-1 454 // while (i < j) do { 455 // while (xs[i] < xs[p]) i ++; 456 // i_eq = (xs[i] == xs[p]); 457 // while (xs[j] > xs[p]) j --; 458 // j_eq = (xs[j] == xs[p]); 459 // if (i < j) { 460 // swap(xs[i], xs[j]) 461 // if (i == p) { 462 // p = j; 463 // } else if (j == p) { 464 // p = i; 465 // } 466 // if (i_eq && j_eq) { 467 // ++i; 468 // --j; 469 // } 470 // } 471 // } 472 // return p 473 // } 474 static void createPartitionFunc(OpBuilder &builder, ModuleOp module, 475 func::FuncOp func, uint64_t nx, uint64_t ny, 476 bool isCoo) { 477 OpBuilder::InsertionGuard insertionGuard(builder); 478 479 Block *entryBlock = func.addEntryBlock(); 480 builder.setInsertionPointToStart(entryBlock); 481 482 Location loc = func.getLoc(); 483 ValueRange args = entryBlock->getArguments(); 484 Value lo = args[loIdx]; 485 Value hi = args[hiIdx]; 486 Value sum = builder.create<arith::AddIOp>(loc, lo, hi); 487 Value c1 = constantIndex(builder, loc, 1); 488 Value p = builder.create<arith::ShRUIOp>(loc, sum, c1); 489 490 Value i = lo; 491 Value j = builder.create<arith::SubIOp>(loc, hi, c1); 492 SmallVector<Value, 3> operands{i, j, p}; // exactly three 493 SmallVector<Type, 3> types{i.getType(), j.getType(), p.getType()}; 494 scf::WhileOp whileOp = builder.create<scf::WhileOp>(loc, types, operands); 495 496 // The before-region of the WhileOp. 497 Block *before = 498 builder.createBlock(&whileOp.getBefore(), {}, types, {loc, loc, loc}); 499 builder.setInsertionPointToEnd(before); 500 Value cond = builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ult, 501 before->getArgument(0), 502 before->getArgument(1)); 503 builder.create<scf::ConditionOp>(loc, cond, before->getArguments()); 504 505 // The after-region of the WhileOp. 506 Block *after = 507 builder.createBlock(&whileOp.getAfter(), {}, types, {loc, loc, loc}); 508 builder.setInsertionPointToEnd(after); 509 i = after->getArgument(0); 510 j = after->getArgument(1); 511 p = after->getArgument(2); 512 513 uint64_t numXBuffers = isCoo ? 1 : nx; 514 auto [iresult, iCompareEq] = 515 createScanLoop(builder, module, func, args.slice(xStartIdx, numXBuffers), 516 i, p, nx, ny, isCoo, 1); 517 i = iresult; 518 auto [jresult, jCompareEq] = 519 createScanLoop(builder, module, func, args.slice(xStartIdx, numXBuffers), 520 j, p, nx, ny, isCoo, -1); 521 j = jresult; 522 523 // If i < j: 524 cond = builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ult, i, j); 525 scf::IfOp ifOp = builder.create<scf::IfOp>(loc, types, cond, /*else=*/true); 526 builder.setInsertionPointToStart(&ifOp.getThenRegion().front()); 527 SmallVector<Value> swapOperands{i, j}; 528 swapOperands.append(args.begin() + xStartIdx, args.end()); 529 createSwap(builder, loc, swapOperands, nx, ny, isCoo); 530 // If the pivot is moved, update p with the new pivot. 531 Value icond = 532 builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::eq, i, p); 533 scf::IfOp ifOpI = builder.create<scf::IfOp>(loc, TypeRange{p.getType()}, 534 icond, /*else=*/true); 535 builder.setInsertionPointToStart(&ifOpI.getThenRegion().front()); 536 builder.create<scf::YieldOp>(loc, ValueRange{j}); 537 builder.setInsertionPointToStart(&ifOpI.getElseRegion().front()); 538 Value jcond = 539 builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::eq, j, p); 540 scf::IfOp ifOpJ = builder.create<scf::IfOp>(loc, TypeRange{p.getType()}, 541 jcond, /*else=*/true); 542 builder.setInsertionPointToStart(&ifOpJ.getThenRegion().front()); 543 builder.create<scf::YieldOp>(loc, ValueRange{i}); 544 builder.setInsertionPointToStart(&ifOpJ.getElseRegion().front()); 545 builder.create<scf::YieldOp>(loc, ValueRange{p}); 546 builder.setInsertionPointAfter(ifOpJ); 547 builder.create<scf::YieldOp>(loc, ifOpJ.getResults()); 548 builder.setInsertionPointAfter(ifOpI); 549 Value compareEqIJ = 550 builder.create<arith::AndIOp>(loc, iCompareEq, jCompareEq); 551 scf::IfOp ifOp2 = builder.create<scf::IfOp>( 552 loc, TypeRange{i.getType(), j.getType()}, compareEqIJ, /*else=*/true); 553 builder.setInsertionPointToStart(&ifOp2.getThenRegion().front()); 554 Value i2 = builder.create<arith::AddIOp>(loc, i, c1); 555 Value j2 = builder.create<arith::SubIOp>(loc, j, c1); 556 builder.create<scf::YieldOp>(loc, ValueRange{i2, j2}); 557 builder.setInsertionPointToStart(&ifOp2.getElseRegion().front()); 558 builder.create<scf::YieldOp>(loc, ValueRange{i, j}); 559 builder.setInsertionPointAfter(ifOp2); 560 builder.create<scf::YieldOp>( 561 loc, 562 ValueRange{ifOp2.getResult(0), ifOp2.getResult(1), ifOpI.getResult(0)}); 563 564 // False branch for if i < j: 565 builder.setInsertionPointToStart(&ifOp.getElseRegion().front()); 566 builder.create<scf::YieldOp>(loc, ValueRange{i, j, p}); 567 568 // Return for the whileOp. 569 builder.setInsertionPointAfter(ifOp); 570 builder.create<scf::YieldOp>(loc, ifOp.getResults()); 571 572 // Return for the function. 573 builder.setInsertionPointAfter(whileOp); 574 builder.create<func::ReturnOp>(loc, whileOp.getResult(2)); 575 } 576 577 /// Creates a function to perform quick sort on the value in the range of 578 /// index [lo, hi). 579 // 580 // The generate IR corresponds to this C like algorithm: 581 // void quickSort(lo, hi, data) { 582 // if (lo < hi) { 583 // p = partition(low, high, data); 584 // quickSort(lo, p, data); 585 // quickSort(p + 1, hi, data); 586 // } 587 // } 588 static void createSortNonstableFunc(OpBuilder &builder, ModuleOp module, 589 func::FuncOp func, uint64_t nx, uint64_t ny, 590 bool isCoo) { 591 OpBuilder::InsertionGuard insertionGuard(builder); 592 Block *entryBlock = func.addEntryBlock(); 593 builder.setInsertionPointToStart(entryBlock); 594 595 MLIRContext *context = module.getContext(); 596 Location loc = func.getLoc(); 597 ValueRange args = entryBlock->getArguments(); 598 Value lo = args[loIdx]; 599 Value hi = args[hiIdx]; 600 Value cond = 601 builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ult, lo, hi); 602 scf::IfOp ifOp = builder.create<scf::IfOp>(loc, cond, /*else=*/false); 603 604 // The if-stmt true branch. 605 builder.setInsertionPointToStart(&ifOp.getThenRegion().front()); 606 FlatSymbolRefAttr partitionFunc = getMangledSortHelperFunc( 607 builder, func, {IndexType::get(context)}, kPartitionFuncNamePrefix, nx, 608 ny, isCoo, args, createPartitionFunc); 609 auto p = builder.create<func::CallOp>( 610 loc, partitionFunc, TypeRange{IndexType::get(context)}, ValueRange(args)); 611 612 SmallVector<Value> lowOperands{lo, p.getResult(0)}; 613 lowOperands.append(args.begin() + xStartIdx, args.end()); 614 builder.create<func::CallOp>(loc, func, lowOperands); 615 616 SmallVector<Value> highOperands{ 617 builder.create<arith::AddIOp>(loc, p.getResult(0), 618 constantIndex(builder, loc, 1)), 619 hi}; 620 highOperands.append(args.begin() + xStartIdx, args.end()); 621 builder.create<func::CallOp>(loc, func, highOperands); 622 623 // After the if-stmt. 624 builder.setInsertionPointAfter(ifOp); 625 builder.create<func::ReturnOp>(loc); 626 } 627 628 /// Creates a function to perform insertion sort on the values in the range of 629 /// index [lo, hi). 630 // 631 // The generate IR corresponds to this C like algorithm: 632 // void insertionSort(lo, hi, data) { 633 // for (i = lo+1; i < hi; i++) { 634 // d = data[i]; 635 // p = binarySearch(lo, i-1, data) 636 // for (j = 0; j > i - p; j++) 637 // data[i-j] = data[i-j-1] 638 // data[p] = d 639 // } 640 // } 641 static void createSortStableFunc(OpBuilder &builder, ModuleOp module, 642 func::FuncOp func, uint64_t nx, uint64_t ny, 643 bool isCoo) { 644 OpBuilder::InsertionGuard insertionGuard(builder); 645 Block *entryBlock = func.addEntryBlock(); 646 builder.setInsertionPointToStart(entryBlock); 647 648 MLIRContext *context = module.getContext(); 649 Location loc = func.getLoc(); 650 ValueRange args = entryBlock->getArguments(); 651 Value c1 = constantIndex(builder, loc, 1); 652 Value lo = args[loIdx]; 653 Value hi = args[hiIdx]; 654 Value lop1 = builder.create<arith::AddIOp>(loc, lo, c1); 655 656 // Start the outer for-stmt with induction variable i. 657 scf::ForOp forOpI = builder.create<scf::ForOp>(loc, lop1, hi, c1); 658 builder.setInsertionPointToStart(forOpI.getBody()); 659 Value i = forOpI.getInductionVar(); 660 661 // Binary search to find the insertion point p. 662 SmallVector<Value> operands{lo, i}; 663 operands.append(args.begin() + xStartIdx, args.end()); 664 FlatSymbolRefAttr searchFunc = getMangledSortHelperFunc( 665 builder, func, {IndexType::get(context)}, kBinarySearchFuncNamePrefix, nx, 666 ny, isCoo, operands, createBinarySearchFunc); 667 Value p = builder 668 .create<func::CallOp>(loc, searchFunc, TypeRange{c1.getType()}, 669 operands) 670 .getResult(0); 671 672 // Move the value at data[i] to a temporary location. 673 operands[0] = operands[1] = i; 674 SmallVector<Value> d; 675 forEachIJPairInAllBuffers( 676 builder, loc, operands, nx, ny, isCoo, 677 [&](uint64_t unused, Value i, Value unused2, Value buffer) { 678 d.push_back(builder.create<memref::LoadOp>(loc, buffer, i)); 679 }); 680 681 // Start the inner for-stmt with induction variable j, for moving data[p..i) 682 // to data[p+1..i+1). 683 Value imp = builder.create<arith::SubIOp>(loc, i, p); 684 Value c0 = constantIndex(builder, loc, 0); 685 scf::ForOp forOpJ = builder.create<scf::ForOp>(loc, c0, imp, c1); 686 builder.setInsertionPointToStart(forOpJ.getBody()); 687 Value j = forOpJ.getInductionVar(); 688 Value imj = builder.create<arith::SubIOp>(loc, i, j); 689 operands[1] = imj; 690 operands[0] = builder.create<arith::SubIOp>(loc, imj, c1); 691 forEachIJPairInAllBuffers( 692 builder, loc, operands, nx, ny, isCoo, 693 [&](uint64_t unused, Value imjm1, Value imj, Value buffer) { 694 Value t = builder.create<memref::LoadOp>(loc, buffer, imjm1); 695 builder.create<memref::StoreOp>(loc, t, buffer, imj); 696 }); 697 698 // Store the value at data[i] to data[p]. 699 builder.setInsertionPointAfter(forOpJ); 700 operands[0] = operands[1] = p; 701 forEachIJPairInAllBuffers( 702 builder, loc, operands, nx, ny, isCoo, 703 [&](uint64_t k, Value p, Value usused, Value buffer) { 704 builder.create<memref::StoreOp>(loc, d[k], buffer, p); 705 }); 706 707 builder.setInsertionPointAfter(forOpI); 708 builder.create<func::ReturnOp>(loc); 709 } 710 711 /// Implements the rewriting for operator sort and sort_coo. 712 template <typename OpTy> 713 LogicalResult matchAndRewriteSortOp(OpTy op, ValueRange xys, uint64_t nx, 714 uint64_t ny, bool isCoo, 715 PatternRewriter &rewriter) { 716 Location loc = op.getLoc(); 717 SmallVector<Value> operands{constantIndex(rewriter, loc, 0), op.getN()}; 718 719 // Convert `values` to have dynamic shape and append them to `operands`. 720 for (Value v : xys) { 721 auto mtp = getMemRefType(v); 722 if (!mtp.isDynamicDim(0)) { 723 auto newMtp = 724 MemRefType::get({ShapedType::kDynamic}, mtp.getElementType()); 725 v = rewriter.create<memref::CastOp>(loc, newMtp, v); 726 } 727 operands.push_back(v); 728 } 729 auto insertPoint = op->template getParentOfType<func::FuncOp>(); 730 SmallString<32> funcName(op.getStable() ? kSortStableFuncNamePrefix 731 : kSortNonstableFuncNamePrefix); 732 FuncGeneratorType funcGenerator = 733 op.getStable() ? createSortStableFunc : createSortNonstableFunc; 734 FlatSymbolRefAttr func = 735 getMangledSortHelperFunc(rewriter, insertPoint, TypeRange(), funcName, nx, 736 ny, isCoo, operands, funcGenerator); 737 rewriter.replaceOpWithNewOp<func::CallOp>(op, func, TypeRange(), operands); 738 return success(); 739 } 740 741 //===---------------------------------------------------------------------===// 742 // The actual sparse buffer rewriting rules. 743 //===---------------------------------------------------------------------===// 744 745 namespace { 746 747 /// Sparse rewriting rule for the push_back operator. 748 struct PushBackRewriter : OpRewritePattern<PushBackOp> { 749 public: 750 using OpRewritePattern<PushBackOp>::OpRewritePattern; 751 PushBackRewriter(MLIRContext *context, bool enableInit) 752 : OpRewritePattern(context), enableBufferInitialization(enableInit) {} 753 LogicalResult matchAndRewrite(PushBackOp op, 754 PatternRewriter &rewriter) const override { 755 // Rewrite push_back(buffer, value, n) to: 756 // new_size = size(buffer) + n 757 // if (new_size > capacity(buffer)) 758 // while new_size > new_capacity 759 // new_capacity = new_capacity*2 760 // new_buffer = realloc(buffer, new_capacity) 761 // buffer = new_buffer 762 // subBuffer = subviewof(buffer) 763 // linalg.fill subBuffer value 764 // 765 // size(buffer) += n 766 // 767 // The capacity check is skipped when the attribute inbounds is presented. 768 Location loc = op->getLoc(); 769 Value c0 = constantIndex(rewriter, loc, 0); 770 Value buffer = op.getInBuffer(); 771 Value capacity = rewriter.create<memref::DimOp>(loc, buffer, c0); 772 Value size = op.getCurSize(); 773 Value value = op.getValue(); 774 775 Value n = op.getN() ? op.getN() : constantIndex(rewriter, loc, 1); 776 Value newSize = rewriter.create<arith::AddIOp>(loc, size, n); 777 auto nValue = dyn_cast_or_null<arith::ConstantIndexOp>(n.getDefiningOp()); 778 bool nIsOne = (nValue && nValue.value() == 1); 779 780 if (!op.getInbounds()) { 781 Value cond = rewriter.create<arith::CmpIOp>( 782 loc, arith::CmpIPredicate::ugt, newSize, capacity); 783 784 Value c2 = constantIndex(rewriter, loc, 2); 785 auto bufferType = 786 MemRefType::get({ShapedType::kDynamic}, value.getType()); 787 scf::IfOp ifOp = rewriter.create<scf::IfOp>(loc, bufferType, cond, 788 /*else=*/true); 789 // True branch. 790 rewriter.setInsertionPointToStart(&ifOp.getThenRegion().front()); 791 if (nIsOne) { 792 capacity = rewriter.create<arith::MulIOp>(loc, capacity, c2); 793 } else { 794 // Use a do-while loop to calculate the new capacity as follows: 795 // do { new_capacity *= 2 } while (size > new_capacity) 796 scf::WhileOp whileOp = 797 rewriter.create<scf::WhileOp>(loc, capacity.getType(), capacity); 798 799 // The before-region of the WhileOp. 800 Block *before = rewriter.createBlock(&whileOp.getBefore(), {}, 801 {capacity.getType()}, {loc}); 802 rewriter.setInsertionPointToEnd(before); 803 804 capacity = 805 rewriter.create<arith::MulIOp>(loc, before->getArgument(0), c2); 806 cond = rewriter.create<arith::CmpIOp>(loc, arith::CmpIPredicate::ugt, 807 newSize, capacity); 808 rewriter.create<scf::ConditionOp>(loc, cond, ValueRange{capacity}); 809 // The after-region of the WhileOp. 810 Block *after = rewriter.createBlock(&whileOp.getAfter(), {}, 811 {capacity.getType()}, {loc}); 812 rewriter.setInsertionPointToEnd(after); 813 rewriter.create<scf::YieldOp>(loc, after->getArguments()); 814 815 rewriter.setInsertionPointAfter(whileOp); 816 capacity = whileOp.getResult(0); 817 } 818 819 Value newBuffer = 820 rewriter.create<memref::ReallocOp>(loc, bufferType, buffer, capacity); 821 if (enableBufferInitialization) { 822 Value fillSize = rewriter.create<arith::SubIOp>(loc, capacity, newSize); 823 Value fillValue = constantZero(rewriter, loc, value.getType()); 824 Value subBuffer = rewriter.create<memref::SubViewOp>( 825 loc, newBuffer, /*offset=*/ValueRange{newSize}, 826 /*size=*/ValueRange{fillSize}, 827 /*step=*/ValueRange{constantIndex(rewriter, loc, 1)}); 828 rewriter.create<linalg::FillOp>(loc, fillValue, subBuffer); 829 } 830 rewriter.create<scf::YieldOp>(loc, newBuffer); 831 832 // False branch. 833 rewriter.setInsertionPointToStart(&ifOp.getElseRegion().front()); 834 rewriter.create<scf::YieldOp>(loc, buffer); 835 836 // Prepare for adding the value to the end of the buffer. 837 rewriter.setInsertionPointAfter(ifOp); 838 buffer = ifOp.getResult(0); 839 } 840 841 // Add the value to the end of the buffer. 842 if (nIsOne) { 843 rewriter.create<memref::StoreOp>(loc, value, buffer, size); 844 } else { 845 Value subBuffer = rewriter.create<memref::SubViewOp>( 846 loc, buffer, /*offset=*/ValueRange{size}, /*size=*/ValueRange{n}, 847 /*step=*/ValueRange{constantIndex(rewriter, loc, 1)}); 848 rewriter.create<linalg::FillOp>(loc, value, subBuffer); 849 } 850 851 // Update the buffer size. 852 rewriter.replaceOp(op, {buffer, newSize}); 853 return success(); 854 } 855 856 private: 857 bool enableBufferInitialization; 858 }; 859 860 /// Sparse rewriting rule for the sort operator. 861 struct SortRewriter : public OpRewritePattern<SortOp> { 862 public: 863 using OpRewritePattern<SortOp>::OpRewritePattern; 864 865 LogicalResult matchAndRewrite(SortOp op, 866 PatternRewriter &rewriter) const override { 867 SmallVector<Value> xys(op.getXs()); 868 xys.append(op.getYs().begin(), op.getYs().end()); 869 return matchAndRewriteSortOp(op, xys, op.getXs().size(), /*ny=*/0, 870 /*isCoo=*/false, rewriter); 871 } 872 }; 873 874 /// Sparse rewriting rule for the sort_coo operator. 875 struct SortCooRewriter : public OpRewritePattern<SortCooOp> { 876 public: 877 using OpRewritePattern<SortCooOp>::OpRewritePattern; 878 879 LogicalResult matchAndRewrite(SortCooOp op, 880 PatternRewriter &rewriter) const override { 881 SmallVector<Value> xys; 882 xys.push_back(op.getXy()); 883 xys.append(op.getYs().begin(), op.getYs().end()); 884 uint64_t nx = 1; 885 if (auto nxAttr = op.getNxAttr()) 886 nx = nxAttr.getInt(); 887 888 uint64_t ny = 0; 889 if (auto nyAttr = op.getNyAttr()) 890 ny = nyAttr.getInt(); 891 892 return matchAndRewriteSortOp(op, xys, nx, ny, 893 /*isCoo=*/true, rewriter); 894 } 895 }; 896 897 } // namespace 898 899 //===---------------------------------------------------------------------===// 900 // Methods that add patterns described in this file to a pattern list. 901 //===---------------------------------------------------------------------===// 902 903 void mlir::populateSparseBufferRewriting(RewritePatternSet &patterns, 904 bool enableBufferInitialization) { 905 patterns.add<PushBackRewriter>(patterns.getContext(), 906 enableBufferInitialization); 907 patterns.add<SortRewriter, SortCooRewriter>(patterns.getContext()); 908 } 909