1 //===- SparseGPUCodegen.cpp - Generates GPU code --------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is a prototype GPU codegenerator for the sparse compiler. 10 // The objective is to eventually use the right combination of 11 // direct code generation and libary calls into vendor-specific 12 // highly optimized sparse libraries (e.g. cuSparse for CUDA). 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "CodegenUtils.h" 17 #include "LoopEmitter.h" 18 19 #include "mlir/Dialect/Bufferization/IR/Bufferization.h" 20 #include "mlir/Dialect/GPU/IR/GPUDialect.h" 21 #include "mlir/Dialect/Linalg/IR/Linalg.h" 22 #include "mlir/Dialect/Linalg/Utils/Utils.h" 23 #include "mlir/Dialect/MemRef/IR/MemRef.h" 24 #include "mlir/Dialect/SCF/IR/SCF.h" 25 #include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" 26 #include "mlir/Dialect/SparseTensor/IR/SparseTensorType.h" 27 #include "mlir/Dialect/SparseTensor/Transforms/Passes.h" 28 #include "mlir/IR/IRMapping.h" 29 #include "mlir/IR/Matchers.h" 30 31 using namespace mlir; 32 using namespace mlir::sparse_tensor; 33 34 namespace { 35 36 //===----------------------------------------------------------------------===// 37 // Helper methods. 38 //===----------------------------------------------------------------------===// 39 40 /// Marks the given top module as a GPU container module. 41 static void markAsGPUContainer(ModuleOp topModule) { 42 topModule->setAttr(gpu::GPUDialect::getContainerModuleAttrName(), 43 UnitAttr::get(topModule->getContext())); 44 } 45 46 /// Constructs a new GPU module (for GPU kernels) inside the given top module, 47 /// or returns an existing GPU module if one was built previously. 48 static gpu::GPUModuleOp genGPUModule(OpBuilder &builder, ModuleOp topModule) { 49 for (auto op : topModule.getBodyRegion().getOps<gpu::GPUModuleOp>()) 50 return op; // existing 51 markAsGPUContainer(topModule); 52 builder.setInsertionPointToStart(&topModule.getBodyRegion().front()); 53 return builder.create<gpu::GPUModuleOp>(topModule->getLoc(), 54 "sparse_kernels"); 55 } 56 57 /// Constructs a new GPU kernel in the given GPU module. 58 static gpu::GPUFuncOp genGPUFunc(OpBuilder &builder, gpu::GPUModuleOp gpuModule, 59 SmallVectorImpl<Value> &args) { 60 // Get a unique kernel name. Not very creative, 61 // but we simply try kernel0, kernel1, etc. 62 unsigned kernelNumber = 0; 63 SmallString<16> kernelName; 64 do { 65 kernelName.clear(); 66 ("kernel" + Twine(kernelNumber++)).toStringRef(kernelName); 67 } while (gpuModule.lookupSymbol(kernelName)); 68 // Then we insert a new kernel with given arguments into the module. 69 builder.setInsertionPointToStart(&gpuModule.getBodyRegion().front()); 70 SmallVector<Type> argsTp; 71 for (unsigned i = 0, e = args.size(); i < e; i++) 72 argsTp.push_back(args[i].getType()); 73 FunctionType type = FunctionType::get(gpuModule->getContext(), argsTp, {}); 74 auto gpuFunc = 75 builder.create<gpu::GPUFuncOp>(gpuModule->getLoc(), kernelName, type); 76 gpuFunc->setAttr(gpu::GPUDialect::getKernelFuncAttrName(), 77 builder.getUnitAttr()); 78 return gpuFunc; 79 } 80 81 /// Constructs code to launch GPU kernel. 82 static Value genLaunchGPUFunc(OpBuilder &builder, gpu::GPUFuncOp gpuFunc, 83 SmallVectorImpl<Value> &args, 84 SmallVectorImpl<Value> &tokens, 85 unsigned numThreads) { 86 Location loc = gpuFunc->getLoc(); 87 Value none = TypedValue<::mlir::IntegerType>{}; 88 Value one = constantIndex(builder, loc, 1); 89 Value numT = constantIndex(builder, loc, numThreads); 90 gpu::KernelDim3 gridSize = {one, one, one}; 91 gpu::KernelDim3 blckSize = {numT, one, one}; 92 return builder 93 .create<gpu::LaunchFuncOp>(loc, gpuFunc, gridSize, blckSize, 94 /*dynSharedMemSz*/ none, args, 95 builder.getType<gpu::AsyncTokenType>(), tokens) 96 .getAsyncToken(); 97 } 98 99 /// Maps the provided ranked host buffer into the device address space. 100 /// Writes from the host are guaranteed to be visible to device kernels 101 /// that are launched afterwards. Writes from the device are guaranteed 102 /// to be visible on the host after synchronizing with the device kernel 103 /// completion. Needs to cast the buffer to a unranked buffer. 104 static Value genHostRegisterMemref(OpBuilder &builder, Location loc, 105 Value mem) { 106 MemRefType memTp = cast<MemRefType>(mem.getType()); 107 UnrankedMemRefType resTp = 108 UnrankedMemRefType::get(memTp.getElementType(), /*memorySpace=*/0); 109 Value cast = builder.create<memref::CastOp>(loc, resTp, mem); 110 builder.create<gpu::HostRegisterOp>(loc, cast); 111 return cast; 112 } 113 114 /// Unmaps the provided buffer, expecting the casted buffer. 115 static void genHostUnregisterMemref(OpBuilder &builder, Location loc, 116 Value cast) { 117 builder.create<gpu::HostUnregisterOp>(loc, cast); 118 } 119 120 /// Generates first wait in an asynchronous chain. 121 static Value genFirstWait(OpBuilder &builder, Location loc) { 122 Type tokenType = builder.getType<gpu::AsyncTokenType>(); 123 return builder.create<gpu::WaitOp>(loc, tokenType, ValueRange()) 124 .getAsyncToken(); 125 } 126 127 /// Generates last, blocking wait in an asynchronous chain. 128 static void genBlockingWait(OpBuilder &builder, Location loc, 129 ValueRange operands) { 130 builder.create<gpu::WaitOp>(loc, Type(), operands); 131 } 132 133 /// Allocates memory on the device. 134 /// TODO: A `host_shared` attribute could be used to indicate that 135 /// the buffer is visible by both host and device, but lowering 136 /// that feature does not seem to be fully supported yet. 137 static gpu::AllocOp genAllocMemRef(OpBuilder &builder, Location loc, Value mem, 138 Value token) { 139 auto tp = cast<ShapedType>(mem.getType()); 140 auto elemTp = tp.getElementType(); 141 auto shape = tp.getShape(); 142 auto memTp = MemRefType::get(shape, elemTp); 143 SmallVector<Value> dynamicSizes; 144 for (unsigned r = 0, rank = tp.getRank(); r < rank; r++) { 145 if (shape[r] == ShapedType::kDynamic) { 146 Value dimOp = linalg::createOrFoldDimOp(builder, loc, mem, r); 147 dynamicSizes.push_back(dimOp); 148 } 149 } 150 return builder.create<gpu::AllocOp>(loc, TypeRange({memTp, token.getType()}), 151 token, dynamicSizes, ValueRange()); 152 } 153 154 // Allocates a typed buffer on the host with given size. 155 static Value genHostBuffer(OpBuilder &builder, Location loc, Type type, 156 Value size) { 157 const auto memTp = MemRefType::get({ShapedType::kDynamic}, type); 158 return builder.create<memref::AllocOp>(loc, memTp, size).getResult(); 159 } 160 161 // Allocates a typed buffer on the device with given size. 162 static gpu::AllocOp genAllocBuffer(OpBuilder &builder, Location loc, Type type, 163 Value size, Value token) { 164 const auto memTp = MemRefType::get({ShapedType::kDynamic}, type); 165 return builder.create<gpu::AllocOp>(loc, TypeRange({memTp, token.getType()}), 166 token, size, ValueRange()); 167 } 168 169 // Allocates a void buffer on the device with given size. 170 static gpu::AllocOp genAllocBuffer(OpBuilder &builder, Location loc, Value size, 171 Value token) { 172 return genAllocBuffer(builder, loc, builder.getI8Type(), size, token); 173 } 174 175 /// Deallocates memory from the device. 176 static Value genDeallocMemRef(OpBuilder &builder, Location loc, Value mem, 177 Value token) { 178 return builder.create<gpu::DeallocOp>(loc, token.getType(), token, mem) 179 .getAsyncToken(); 180 } 181 182 /// Copies memory between host and device (direction is implicit). 183 static Value genCopyMemRef(OpBuilder &builder, Location loc, Value dst, 184 Value src, Value token) { 185 return builder.create<gpu::MemcpyOp>(loc, token.getType(), token, dst, src) 186 .getAsyncToken(); 187 } 188 189 /// Generates an alloc/copy pair. 190 static Value genAllocCopy(OpBuilder &builder, Location loc, Value b, 191 SmallVectorImpl<Value> &tokens) { 192 Value firstToken = genFirstWait(builder, loc); 193 auto alloc = genAllocMemRef(builder, loc, b, firstToken); 194 Value devMem = alloc.getResult(0); 195 Value depToken = alloc.getAsyncToken(); // copy-after-alloc 196 tokens.push_back(genCopyMemRef(builder, loc, devMem, b, depToken)); 197 return devMem; 198 } 199 200 /// Generates a memref from tensor operation. 201 static Value genTensorToMemref(PatternRewriter &rewriter, Location loc, 202 Value tensor) { 203 auto tensorType = llvm::cast<ShapedType>(tensor.getType()); 204 auto memrefType = 205 MemRefType::get(tensorType.getShape(), tensorType.getElementType()); 206 return rewriter.create<bufferization::ToMemrefOp>(loc, memrefType, tensor); 207 } 208 209 /// Prepares the outlined arguments, passing scalars and buffers in. Here we 210 /// assume that the first buffer is the one allocated for output. We create 211 /// a set of properly chained asynchronous allocation/copy pairs to increase 212 /// overlap before launching the kernel. 213 static Value genParametersIn(OpBuilder &builder, Location loc, 214 SmallVectorImpl<Value> &scalars, 215 SmallVectorImpl<Value> &buffers, 216 SmallVectorImpl<Value> &args, 217 SmallVectorImpl<Value> &tokens, 218 bool useHostRegistrationForOut) { 219 Value out; 220 // Scalars are passed by value. 221 for (Value s : scalars) 222 args.push_back(s); 223 // Buffers are need to be made visible on device. 224 for (Value b : buffers) { 225 if (useHostRegistrationForOut) { 226 out = genHostRegisterMemref(builder, loc, b); 227 args.push_back(b); 228 useHostRegistrationForOut = false; 229 continue; 230 } 231 args.push_back(genAllocCopy(builder, loc, b, tokens)); 232 } 233 return out; 234 } 235 236 /// Finalizes the outlined arguments. The output buffer is copied depending 237 /// on the kernel token and then deallocated. All other buffers are simply 238 /// deallocated. Then we wait for all operations to complete. 239 static void genParametersOut(OpBuilder &builder, Location loc, Value out, 240 Value kernelToken, SmallVectorImpl<Value> &scalars, 241 SmallVectorImpl<Value> &buffers, 242 SmallVectorImpl<Value> &args, 243 SmallVectorImpl<Value> &tokens) { 244 unsigned base = scalars.size(); 245 for (unsigned i = base, e = args.size(); i < e; i++) { 246 Value firstToken; 247 if (i == base) { 248 // Assumed output parameter: unregister or copy-out. 249 if (out) { 250 genHostUnregisterMemref(builder, loc, out); 251 out = Value(); 252 continue; 253 } 254 firstToken = 255 genCopyMemRef(builder, loc, buffers[0], args[i], kernelToken); 256 } else { 257 firstToken = genFirstWait(builder, loc); 258 } 259 tokens.push_back(genDeallocMemRef(builder, loc, args[i], firstToken)); 260 } 261 } 262 263 /// Constructs code for new GPU kernel. 264 static void genGPUCode(PatternRewriter &rewriter, gpu::GPUFuncOp gpuFunc, 265 scf::ParallelOp forallOp, 266 SmallVectorImpl<Value> &constants, 267 SmallVectorImpl<Value> &scalars, 268 SmallVectorImpl<Value> &buffers) { 269 Location loc = gpuFunc->getLoc(); 270 Block &block = gpuFunc.getBody().front(); 271 rewriter.setInsertionPointToStart(&block); 272 273 // Re-generate the constants, recapture all arguments. 274 unsigned arg = 0; 275 IRMapping irMap; 276 for (Value c : constants) 277 irMap.map(c, rewriter.clone(*c.getDefiningOp())->getResult(0)); 278 for (Value s : scalars) 279 irMap.map(s, block.getArgument(arg++)); 280 for (Value b : buffers) 281 irMap.map(b, block.getArgument(arg++)); 282 283 // Assume 1-dimensional grid/block configuration (only x dimension), 284 // so that: 285 // row = blockIdx.x * blockDim.x + threadIdx.x 286 // inc = blockDim.x * gridDim.x 287 Value bid = rewriter.create<gpu::BlockIdOp>(loc, gpu::Dimension::x); 288 Value bsz = rewriter.create<gpu::BlockDimOp>(loc, gpu::Dimension::x); 289 Value tid = rewriter.create<gpu::ThreadIdOp>(loc, gpu::Dimension::x); 290 Value gsz = rewriter.create<gpu::GridDimOp>(loc, gpu::Dimension::x); 291 Value mul = rewriter.create<arith::MulIOp>(loc, bid, bsz); 292 Value row = rewriter.create<arith::AddIOp>(loc, mul, tid); 293 Value inc = rewriter.create<arith::MulIOp>(loc, bsz, gsz); 294 295 // Construct the iteration over the computational space that 296 // accounts for the fact that the total number of threads and 297 // the amount of work to be done usually do not match precisely. 298 // for (r = row; r < N; r += inc) { 299 // <loop-body> 300 // } 301 Value upper = irMap.lookup(forallOp.getUpperBound()[0]); 302 scf::ForOp forOp = rewriter.create<scf::ForOp>(loc, row, upper, inc); 303 rewriter.cloneRegionBefore(forallOp.getLoopBody(), forOp.getLoopBody(), 304 forOp.getLoopBody().begin(), irMap); 305 306 // Done. 307 rewriter.setInsertionPointAfter(forOp); 308 rewriter.create<gpu::ReturnOp>(gpuFunc->getLoc()); 309 } 310 311 //===----------------------------------------------------------------------===// 312 // Library helper methods. 313 //===----------------------------------------------------------------------===// 314 315 /// Helper to detect a + b with arguments taken from given block. 316 static bool matchAddOfArgs(Block *block, Value val) { 317 if (auto *def = val.getDefiningOp()) { 318 if (isa<arith::AddFOp, arith::AddIOp>(def)) { 319 Value a = block->getArguments()[0]; 320 Value b = block->getArguments()[1]; 321 return (def->getOperand(0) == a && def->getOperand(1) == b) || 322 (def->getOperand(0) == b && def->getOperand(1) == a); 323 } 324 } 325 return false; 326 } 327 328 /// Helper to detect a * b with arguments taken from given block. 329 static bool matchMulOfArgs(Block *block, Value val) { 330 if (auto *def = val.getDefiningOp()) { 331 if (isa<arith::MulFOp, arith::MulIOp>(def)) { 332 Value a = block->getArguments()[0]; 333 Value b = block->getArguments()[1]; 334 return (def->getOperand(0) == a && def->getOperand(1) == b) || 335 (def->getOperand(0) == b && def->getOperand(1) == a); 336 } 337 } 338 return false; 339 } 340 341 /// Helper to detect x = x + a * b 342 static bool matchSumOfMultOfArgs(linalg::GenericOp op) { 343 auto yieldOp = cast<linalg::YieldOp>(op.getRegion().front().getTerminator()); 344 if (auto *def = yieldOp.getOperand(0).getDefiningOp()) { 345 if (isa<arith::AddFOp, arith::AddIOp>(def)) { 346 Value x = op.getBlock()->getArguments()[2]; 347 return (def->getOperand(0) == x && 348 matchMulOfArgs(op.getBlock(), def->getOperand(1))) || 349 (def->getOperand(1) == x && 350 matchMulOfArgs(op.getBlock(), def->getOperand(0))); 351 } 352 } 353 return false; 354 } 355 356 // Helper to detect c += spy(s) x (a * b) 357 static bool matchSumReductionOfMulUnary(linalg::GenericOp op) { 358 auto yieldOp = cast<linalg::YieldOp>(op.getRegion().front().getTerminator()); 359 // The linalg yields a custom reduce result. 360 Value s_out = op.getBlock()->getArguments()[2]; 361 if (auto redOp = 362 yieldOp.getOperand(0).getDefiningOp<sparse_tensor::ReduceOp>()) { 363 // The reduce consumes the output. 364 Value other; 365 if (s_out == redOp->getOperand(0)) 366 other = redOp->getOperand(1); 367 else if (s_out == redOp->getOperand(1)) 368 other = redOp->getOperand(0); 369 else 370 return false; 371 // The reduce op also consumes an unary which also consumes the output 372 // and does not define an absent value. 373 if (auto unOp = other.getDefiningOp<sparse_tensor::UnaryOp>()) { 374 if (s_out != unOp->getOperand(0) || !unOp.getAbsentRegion().empty()) 375 return false; 376 // And the bodies are as expected. 377 auto yieldUn = cast<sparse_tensor::YieldOp>( 378 unOp.getRegion(0).front().getTerminator()); 379 auto yieldRed = cast<sparse_tensor::YieldOp>( 380 redOp.getRegion().front().getTerminator()); 381 return matchMulOfArgs(op.getBlock(), yieldUn.getOperand(0)) && 382 matchAddOfArgs(&redOp.getRegion().front(), yieldRed.getOperand(0)); 383 } 384 } 385 return false; 386 } 387 388 /// Determines if the given value is a dense tensor instead of a sparse one. 389 static bool isDenseTensor(Value v) { 390 return (sparse_tensor::getSparseTensorType(v).isAllDense()); 391 } 392 393 /// Test for sorted COO with suitable data and coordinates types. 394 static bool isAdmissibleCOO(SparseTensorType &aTp) { 395 return aTp.isCompressedLvl(0) && aTp.isOrderedLvl(0) && !aTp.isUniqueLvl(0) && 396 aTp.isSingletonLvl(1) && aTp.isOrderedLvl(1) && aTp.isUniqueLvl(1) && 397 (aTp.getElementType().isF64() || aTp.getElementType().isF32()) && 398 (aTp.getCrdWidth() == 0 || aTp.getCrdWidth() == 32 || 399 aTp.getCrdWidth() == 64); 400 } 401 402 /// Test for CSR with suitable data and coordinates types. 403 static bool isAdmissibleCSR(SparseTensorType &aTp) { 404 return aTp.isDenseLvl(0) && aTp.isCompressedLvl(1) && aTp.isOrderedLvl(1) && 405 aTp.isUniqueLvl(1) && 406 (aTp.getElementType().isF64() || aTp.getElementType().isF32()) && 407 (aTp.getCrdWidth() == 0 || aTp.getCrdWidth() == 32 || 408 aTp.getCrdWidth() == 64); 409 } 410 411 /// Test for admissible types on operands (with output parameter `isCOO`). 412 static bool areAdmissibleTypes(SparseTensorType aTp, SparseTensorType bTp, 413 SparseTensorType cTp, bool enableRT, 414 bool isMatVec, bool &isCOO) { 415 if (bTp.hasEncoding() || cTp.hasEncoding()) 416 return false; 417 if (isAdmissibleCOO(aTp)) { 418 isCOO = true; 419 #ifdef CUSPARSE_COO_AOS 420 return isMatVec; 421 #else 422 return enableRT; 423 #endif 424 } 425 return isAdmissibleCSR(aTp); 426 } 427 428 /// Generates the first positions/coordinates of a sparse matrix. 429 static Value genFirstPosOrCrds(OpBuilder &builder, Location loc, Value a, 430 bool isCOO, bool enableRT) { 431 if (isCOO) { 432 // Library uses SoA COO, direct IR uses AoS COO. 433 if (enableRT) 434 return genToCoordinates(builder, loc, a, 0, /*cooStart=*/0); 435 return genToCoordinatesBuffer(builder, loc, a); 436 } 437 // CSR uses positions. 438 return genToPositions(builder, loc, a, 1); 439 } 440 441 /// Generates the second coordinates of a sparse matrix. 442 static Value genSecondCrds(OpBuilder &builder, Location loc, Value a, 443 bool isCOO, bool enableRT) { 444 if (isCOO && !enableRT) 445 return Value(); // nothing needed 446 return genToCoordinates(builder, loc, a, 1, /*cooStart=*/isCOO ? 0 : 2); 447 } 448 449 /// Generates the sparse matrix multiplication. 450 static Operation *genSpMat(OpBuilder &builder, Location loc, Type handleTp, 451 Type tokenTp, Value token, Value sz1, Value sz2, 452 Value nseA, Value rowA, Value colA, Value valA, 453 bool isCOO, bool enableRT) { 454 if (isCOO) { 455 // Library uses SoA COO, direct IR uses AoS COO. 456 if (enableRT) { 457 assert(colA); 458 return builder.create<gpu::CreateCooOp>(loc, handleTp, tokenTp, token, 459 sz1, sz2, nseA, rowA, colA, valA); 460 } 461 #ifdef CUSPARSE_COO_AOS 462 assert(!colA); 463 return builder.create<gpu::CreateCooAoSOp>(loc, handleTp, tokenTp, token, 464 sz1, sz2, nseA, rowA, valA); 465 #else 466 llvm_unreachable("gpu::CreateCooAoSOp is deprecated"); 467 #endif 468 } 469 assert(colA); 470 return builder.create<gpu::CreateCsrOp>(loc, handleTp, tokenTp, token, sz1, 471 sz2, nseA, rowA, colA, valA); 472 } 473 474 /// Match and rewrite SpMV kernel. 475 static LogicalResult 476 rewriteSpMV(PatternRewriter &rewriter, linalg::GenericOp op, bool enableRT, 477 GPUDataTransferStrategy gpuDataTransferStrategy) { 478 Location loc = op.getLoc(); 479 Value a = op.getOperand(0); 480 Value x = op.getOperand(1); 481 Value y = op.getOperand(2); // we have y = Ax 482 SmallVector<Value> tokens; 483 484 bool isZeroCopy = 485 gpuDataTransferStrategy == GPUDataTransferStrategy::kZeroCopy; 486 487 // Only admissible sparse matrix format and dense vectors. 488 bool isCOO = false; 489 SparseTensorType aTp = getSparseTensorType(a); 490 SparseTensorType xTp = getSparseTensorType(x); 491 SparseTensorType yTp = getSparseTensorType(y); 492 if (!areAdmissibleTypes(aTp, xTp, yTp, enableRT, /*isMatVec=*/true, isCOO)) 493 return failure(); 494 495 // Start sparse kernel and copy data from host to device. 496 // a : memR/memC/memV -> rowA,colA,valA 497 // x : memX -> vecX 498 // y : memY -> vecY 499 Value nseA = rewriter.create<NumberOfEntriesOp>(loc, a); 500 Value szY = linalg::createOrFoldDimOp(rewriter, loc, a, 0); 501 Value szX = linalg::createOrFoldDimOp(rewriter, loc, a, 1); 502 Value memR = genFirstPosOrCrds(rewriter, loc, a, isCOO, enableRT); 503 Value memC = genSecondCrds(rewriter, loc, a, isCOO, enableRT); 504 Value memV = genToValues(rewriter, loc, a); 505 Value memX, memY; 506 Value castR, castC, castV, castX, castY; 507 if (gpuDataTransferStrategy != GPUDataTransferStrategy::kRegularDMA) { 508 memX = genTensorToMemref(rewriter, loc, x); 509 memY = genTensorToMemref(rewriter, loc, y); 510 castR = genHostRegisterMemref(rewriter, loc, memR); 511 if (memC) 512 castC = genHostRegisterMemref(rewriter, loc, memC); 513 castV = genHostRegisterMemref(rewriter, loc, memV); 514 castX = genHostRegisterMemref(rewriter, loc, memX); 515 castY = genHostRegisterMemref(rewriter, loc, memY); 516 } 517 518 Value rowA = genAllocCopy(rewriter, loc, memR, tokens); 519 Value colA = memC ? genAllocCopy(rewriter, loc, memC, tokens) : Value(); 520 Value valA = genAllocCopy(rewriter, loc, memV, tokens); 521 if (gpuDataTransferStrategy == GPUDataTransferStrategy::kRegularDMA) 522 memX = genTensorToMemref(rewriter, loc, x); 523 Value vecX = isZeroCopy ? memX : genAllocCopy(rewriter, loc, memX, tokens); 524 if (gpuDataTransferStrategy == GPUDataTransferStrategy::kRegularDMA) 525 memY = genTensorToMemref(rewriter, loc, y); 526 Value vecY = genAllocCopy(rewriter, loc, memY, tokens); 527 genBlockingWait(rewriter, loc, tokens); 528 tokens.clear(); 529 530 // Create sparse environment and sparse matrix/dense vector handles. 531 Type indexTp = rewriter.getIndexType(); 532 Type dnTensorHandleTp = rewriter.getType<gpu::SparseDnTensorHandleType>(); 533 Type spmatHandleTp = rewriter.getType<gpu::SparseSpMatHandleType>(); 534 Type tokenTp = rewriter.getType<gpu::AsyncTokenType>(); 535 Value token = genFirstWait(rewriter, loc); 536 Operation *spGenA = 537 genSpMat(rewriter, loc, spmatHandleTp, tokenTp, token, szY, szX, nseA, 538 rowA, colA, valA, isCOO, enableRT); 539 Value spMatA = spGenA->getResult(0); 540 token = spGenA->getResult(1); 541 auto dvecX = rewriter.create<gpu::CreateDnTensorOp>( 542 loc, dnTensorHandleTp, tokenTp, token, vecX, szX); 543 Value dnX = dvecX.getResult(0); 544 token = dvecX.getAsyncToken(); 545 auto dvecY = rewriter.create<gpu::CreateDnTensorOp>( 546 loc, dnTensorHandleTp, tokenTp, token, vecY, szY); 547 Value dnY = dvecY.getResult(0); 548 token = dvecY.getAsyncToken(); 549 550 auto dnYType = llvm::cast<ShapedType>(y.getType()).getElementType(); 551 552 // Precompute buffersize for SpMV. 553 auto bufferComp = rewriter.create<gpu::SpMVBufferSizeOp>( 554 loc, indexTp, tokenTp, token, spMatA, dnX, dnY, 555 /*computeType=*/dnYType); 556 Value bufferSz = bufferComp.getResult(0); 557 token = bufferComp.getAsyncToken(); 558 auto buf = genAllocBuffer(rewriter, loc, bufferSz, token); 559 Value buffer = buf.getResult(0); 560 token = buf.getAsyncToken(); 561 562 // Perform the SpMV. 563 auto spmvComp = rewriter.create<gpu::SpMVOp>( 564 loc, tokenTp, token, spMatA, dnX, dnY, /*computeType=*/dnYType, buffer); 565 token = spmvComp.getAsyncToken(); 566 567 // Copy data back to host and free all the resoures. 568 token = rewriter.create<gpu::DestroySpMatOp>(loc, tokenTp, token, spMatA) 569 .getAsyncToken(); 570 token = rewriter.create<gpu::DestroyDnTensorOp>(loc, tokenTp, token, dnX) 571 .getAsyncToken(); 572 token = rewriter.create<gpu::DestroyDnTensorOp>(loc, tokenTp, token, dnY) 573 .getAsyncToken(); 574 token = genDeallocMemRef(rewriter, loc, rowA, token); 575 if (colA) 576 token = genDeallocMemRef(rewriter, loc, colA, token); 577 token = genDeallocMemRef(rewriter, loc, valA, token); 578 token = genDeallocMemRef(rewriter, loc, buffer, token); 579 if (!isZeroCopy) 580 token = genDeallocMemRef(rewriter, loc, vecX, token); 581 token = genCopyMemRef(rewriter, loc, memY, vecY, token); 582 token = genDeallocMemRef(rewriter, loc, vecY, token); 583 tokens.push_back(token); 584 genBlockingWait(rewriter, loc, tokens); 585 tokens.clear(); 586 if (gpuDataTransferStrategy != GPUDataTransferStrategy::kRegularDMA) { 587 genHostUnregisterMemref(rewriter, loc, castR); 588 if (memC) 589 genHostUnregisterMemref(rewriter, loc, castC); 590 genHostUnregisterMemref(rewriter, loc, castV); 591 genHostUnregisterMemref(rewriter, loc, castX); 592 genHostUnregisterMemref(rewriter, loc, castY); 593 } 594 595 // Done. 596 rewriter.replaceOpWithNewOp<bufferization::ToTensorOp>(op, memY); 597 return success(); 598 } 599 600 /// Match and rewrite SpMM kernel. 601 static LogicalResult 602 rewriteSpMM(PatternRewriter &rewriter, linalg::GenericOp op, bool enableRT, 603 GPUDataTransferStrategy gpuDataTransferStrategy) { 604 Location loc = op.getLoc(); 605 Value a = op.getOperand(0); 606 Value b = op.getOperand(1); 607 Value c = op.getOperand(2); // we have C = AB 608 SmallVector<Value> tokens; 609 610 bool isZeroCopy = 611 gpuDataTransferStrategy == GPUDataTransferStrategy::kZeroCopy; 612 613 // Only admissible sparse matrix format and dense matrices. 614 bool isCOO = false; 615 SparseTensorType aTp = getSparseTensorType(a); 616 SparseTensorType bTp = getSparseTensorType(b); 617 SparseTensorType cTp = getSparseTensorType(c); 618 if (!areAdmissibleTypes(aTp, bTp, cTp, enableRT, /*isMatVec=*/false, isCOO)) 619 return failure(); 620 621 // Start sparse kernel and copy data from host to device. 622 // a : memR/memC/memV -> rowA,colA,valA 623 // b : bufB -> matA 624 // c : bufC -> matC 625 Value nseA = rewriter.create<NumberOfEntriesOp>(loc, a); 626 Value szm = linalg::createOrFoldDimOp(rewriter, loc, a, 0); 627 Value szk = linalg::createOrFoldDimOp(rewriter, loc, a, 1); 628 Value szn = linalg::createOrFoldDimOp(rewriter, loc, b, 1); 629 Value memR = genFirstPosOrCrds(rewriter, loc, a, isCOO, enableRT); 630 Value memC = genSecondCrds(rewriter, loc, a, isCOO, enableRT); 631 Value memV = genToValues(rewriter, loc, a); 632 Value bufB, bufC; 633 Value castR, castC, castV, castB, castBufC; 634 if (gpuDataTransferStrategy != GPUDataTransferStrategy::kRegularDMA) { 635 bufB = genTensorToMemref(rewriter, loc, b); 636 bufC = genTensorToMemref(rewriter, loc, c); 637 castR = genHostRegisterMemref(rewriter, loc, memR); 638 if (memC) 639 castC = genHostRegisterMemref(rewriter, loc, memC); 640 castV = genHostRegisterMemref(rewriter, loc, memV); 641 castB = genHostRegisterMemref(rewriter, loc, bufB); 642 castBufC = genHostRegisterMemref(rewriter, loc, bufC); 643 } 644 Value rowA = genAllocCopy(rewriter, loc, memR, tokens); 645 Value colA = memC ? genAllocCopy(rewriter, loc, memC, tokens) : Value(); 646 Value valA = genAllocCopy(rewriter, loc, memV, tokens); 647 if (gpuDataTransferStrategy == GPUDataTransferStrategy::kRegularDMA) 648 bufB = genTensorToMemref(rewriter, loc, b); 649 Value matB = isZeroCopy ? bufB : genAllocCopy(rewriter, loc, bufB, tokens); 650 if (gpuDataTransferStrategy == GPUDataTransferStrategy::kRegularDMA) 651 bufC = genTensorToMemref(rewriter, loc, c); 652 Value matC = genAllocCopy(rewriter, loc, bufC, tokens); 653 genBlockingWait(rewriter, loc, tokens); 654 tokens.clear(); 655 656 // Create sparse environment and sparse matrix/dense matrix handles. 657 Type indexTp = rewriter.getIndexType(); 658 Type dnTensorHandleTp = rewriter.getType<gpu::SparseDnTensorHandleType>(); 659 Type spMatHandleTp = rewriter.getType<gpu::SparseSpMatHandleType>(); 660 Type tokenTp = rewriter.getType<gpu::AsyncTokenType>(); 661 Value token = genFirstWait(rewriter, loc); 662 Operation *spGenA = 663 genSpMat(rewriter, loc, spMatHandleTp, tokenTp, token, szm, szk, nseA, 664 rowA, colA, valA, isCOO, enableRT); 665 Value spMatA = spGenA->getResult(0); 666 token = spGenA->getResult(1); 667 auto dmatB = rewriter.create<gpu::CreateDnTensorOp>( 668 loc, dnTensorHandleTp, tokenTp, token, matB, 669 SmallVector<Value>{szk, szn}); 670 Value dnB = dmatB.getResult(0); 671 token = dmatB.getAsyncToken(); 672 auto dmatC = rewriter.create<gpu::CreateDnTensorOp>( 673 loc, dnTensorHandleTp, tokenTp, token, matC, 674 SmallVector<Value>{szm, szn}); 675 Value dnC = dmatC.getResult(0); 676 token = dmatC.getAsyncToken(); 677 678 auto dmatCType = llvm::cast<ShapedType>(c.getType()).getElementType(); 679 680 // Precompute buffersize for SpMM. 681 auto bufferComp = rewriter.create<gpu::SpMMBufferSizeOp>( 682 loc, indexTp, tokenTp, token, spMatA, dnB, dnC, 683 /*computeType=*/dmatCType); 684 Value bufferSz = bufferComp.getResult(0); 685 token = bufferComp.getAsyncToken(); 686 auto buf = genAllocBuffer(rewriter, loc, bufferSz, token); 687 Value buffer = buf.getResult(0); 688 token = buf.getAsyncToken(); 689 690 auto dnCType = llvm::cast<ShapedType>(c.getType()).getElementType(); 691 692 // Perform the SpMM. 693 auto spmmComp = rewriter.create<gpu::SpMMOp>( 694 loc, tokenTp, token, spMatA, dnB, dnC, /*computeType=*/dnCType, buffer); 695 token = spmmComp.getAsyncToken(); 696 697 // Copy data back to host and free all the resoures. 698 token = rewriter.create<gpu::DestroySpMatOp>(loc, tokenTp, token, spMatA) 699 .getAsyncToken(); 700 token = rewriter.create<gpu::DestroyDnTensorOp>(loc, tokenTp, token, dnB) 701 .getAsyncToken(); 702 token = rewriter.create<gpu::DestroyDnTensorOp>(loc, tokenTp, token, dnC) 703 .getAsyncToken(); 704 token = genDeallocMemRef(rewriter, loc, rowA, token); 705 if (colA) 706 token = genDeallocMemRef(rewriter, loc, colA, token); 707 token = genDeallocMemRef(rewriter, loc, valA, token); 708 token = genDeallocMemRef(rewriter, loc, buffer, token); 709 if (!isZeroCopy) 710 token = genDeallocMemRef(rewriter, loc, matB, token); 711 token = genCopyMemRef(rewriter, loc, bufC, matC, token); 712 token = genDeallocMemRef(rewriter, loc, matC, token); 713 tokens.push_back(token); 714 genBlockingWait(rewriter, loc, tokens); 715 tokens.clear(); 716 if (gpuDataTransferStrategy != GPUDataTransferStrategy::kRegularDMA) { 717 genHostUnregisterMemref(rewriter, loc, castR); 718 if (memC) 719 genHostUnregisterMemref(rewriter, loc, castC); 720 genHostUnregisterMemref(rewriter, loc, castV); 721 genHostUnregisterMemref(rewriter, loc, castB); 722 genHostUnregisterMemref(rewriter, loc, castC); 723 } 724 725 // Done. 726 rewriter.replaceOpWithNewOp<bufferization::ToTensorOp>(op, bufC); 727 return success(); 728 } 729 730 // Match and rewrite SpGEMM kernel. 731 static LogicalResult 732 rewriteSpGEMM(PatternRewriter &rewriter, linalg::GenericOp op, bool enableRT, 733 GPUDataTransferStrategy gpuDataTransferStrategy) { 734 Location loc = op.getLoc(); 735 Value a = op.getOperand(0); 736 Value b = op.getOperand(1); 737 Value c = op.getOperand(2); // we have C = AB 738 SmallVector<Value> tokens; 739 740 // Only CSR <- CSR x CSR supported. 741 bool isCOO = false; 742 SparseTensorType aTp = getSparseTensorType(a); 743 SparseTensorType bTp = getSparseTensorType(b); 744 SparseTensorType cTp = getSparseTensorType(c); 745 if (!isAdmissibleCSR(aTp) || !isAdmissibleCSR(bTp) || !isAdmissibleCSR(cTp)) 746 return failure(); 747 748 // Start sparse kernel and copy data from host to device. 749 // a : amemR/amemC/amemV -> rowA,colA,valA 750 // b : bmemR/bmemC/bmemV -> rowB,colB,valB 751 // c : materializes 752 auto dnCType = cTp.getElementType(); 753 Value nseA = rewriter.create<NumberOfEntriesOp>(loc, a); 754 Value nseB = rewriter.create<NumberOfEntriesOp>(loc, b); 755 Value szm = linalg::createOrFoldDimOp(rewriter, loc, a, 0); 756 Value szk = linalg::createOrFoldDimOp(rewriter, loc, a, 1); 757 Value szn = linalg::createOrFoldDimOp(rewriter, loc, b, 1); 758 Value amemR = genFirstPosOrCrds(rewriter, loc, a, isCOO, enableRT); 759 Value amemC = genSecondCrds(rewriter, loc, a, isCOO, enableRT); 760 Value amemV = genToValues(rewriter, loc, a); 761 Value bmemR = genFirstPosOrCrds(rewriter, loc, b, isCOO, enableRT); 762 Value bmemC = genSecondCrds(rewriter, loc, b, isCOO, enableRT); 763 Value bmemV = genToValues(rewriter, loc, b); 764 Value rowA = genAllocCopy(rewriter, loc, amemR, tokens); 765 Value colA = genAllocCopy(rewriter, loc, amemC, tokens); 766 Value valA = genAllocCopy(rewriter, loc, amemV, tokens); 767 Value rowB = genAllocCopy(rewriter, loc, bmemR, tokens); 768 Value colB = genAllocCopy(rewriter, loc, bmemC, tokens); 769 Value valB = genAllocCopy(rewriter, loc, bmemV, tokens); 770 genBlockingWait(rewriter, loc, tokens); 771 tokens.clear(); 772 773 // Create sparse environment and sparse matrix/dense vector handles. 774 Type indexTp = rewriter.getIndexType(); 775 Type spmatHandleTp = rewriter.getType<gpu::SparseSpMatHandleType>(); 776 Type descTp = rewriter.getType<gpu::SparseSpGEMMOpHandleType>(); 777 Type tokenTp = rewriter.getType<gpu::AsyncTokenType>(); 778 Value token = genFirstWait(rewriter, loc); 779 Operation *spGenA = 780 genSpMat(rewriter, loc, spmatHandleTp, tokenTp, token, szm, szk, nseA, 781 rowA, colA, valA, isCOO, enableRT); 782 Value spMatA = spGenA->getResult(0); 783 token = spGenA->getResult(1); 784 Operation *spGenB = 785 genSpMat(rewriter, loc, spmatHandleTp, tokenTp, token, szk, szn, nseB, 786 rowB, colB, valB, isCOO, enableRT); 787 Value spMatB = spGenB->getResult(0); 788 token = spGenB->getResult(1); 789 790 // Sparse matrix C materializes (also assumes beta == 0). 791 Value zero = constantIndex(rewriter, loc, 0); 792 Value one = constantIndex(rewriter, loc, 1); 793 Value mplus1 = rewriter.create<arith::AddIOp>(loc, szm, one); 794 auto e1 = genAllocBuffer(rewriter, loc, cTp.getPosType(), mplus1, token); 795 Value rowC = e1.getResult(0); 796 token = e1.getAsyncToken(); 797 auto e2 = genAllocBuffer(rewriter, loc, cTp.getCrdType(), zero, token); 798 Value colC = e2.getResult(0); 799 token = e2.getAsyncToken(); 800 auto e3 = genAllocBuffer(rewriter, loc, dnCType, zero, token); 801 Value valC = e3.getResult(0); 802 token = e3.getAsyncToken(); 803 Operation *spGenC = 804 genSpMat(rewriter, loc, spmatHandleTp, tokenTp, token, szm, szn, zero, 805 rowC, colC, valC, isCOO, enableRT); 806 Value spMatC = spGenC->getResult(0); 807 token = spGenC->getResult(1); 808 809 // Precompute buffersizes for SpGEMM. 810 Operation *descOp = 811 rewriter.create<gpu::SpGEMMCreateDescrOp>(loc, descTp, tokenTp, token); 812 Value desc = descOp->getResult(0); 813 token = descOp->getResult(1); 814 Operation *work1 = rewriter.create<gpu::SpGEMMWorkEstimationOrComputeOp>( 815 loc, indexTp, tokenTp, token, desc, gpu::TransposeMode::NON_TRANSPOSE, 816 gpu::TransposeMode::NON_TRANSPOSE, spMatA, spMatB, spMatC, dnCType, zero, 817 valC, gpu::SpGEMMWorkEstimationOrComputeKind::WORK_ESTIMATION); 818 Value bufferSz1 = work1->getResult(0); 819 token = work1->getResult(1); 820 auto buf1 = genAllocBuffer(rewriter, loc, bufferSz1, token); 821 Value buffer1 = buf1.getResult(0); 822 token = buf1.getAsyncToken(); 823 Operation *work2 = rewriter.create<gpu::SpGEMMWorkEstimationOrComputeOp>( 824 loc, indexTp, tokenTp, token, desc, gpu::TransposeMode::NON_TRANSPOSE, 825 gpu::TransposeMode::NON_TRANSPOSE, spMatA, spMatB, spMatC, dnCType, 826 bufferSz1, buffer1, 827 gpu::SpGEMMWorkEstimationOrComputeKind::WORK_ESTIMATION); 828 token = work2->getResult(1); 829 830 // Compute step. 831 Operation *compute1 = rewriter.create<gpu::SpGEMMWorkEstimationOrComputeOp>( 832 loc, indexTp, tokenTp, token, desc, gpu::TransposeMode::NON_TRANSPOSE, 833 gpu::TransposeMode::NON_TRANSPOSE, spMatA, spMatB, spMatC, dnCType, zero, 834 valC, gpu::SpGEMMWorkEstimationOrComputeKind::COMPUTE); 835 Value bufferSz2 = compute1->getResult(0); 836 token = compute1->getResult(1); 837 auto buf2 = genAllocBuffer(rewriter, loc, bufferSz2, token); 838 Value buffer2 = buf2.getResult(0); 839 token = buf2.getAsyncToken(); 840 Operation *compute2 = rewriter.create<gpu::SpGEMMWorkEstimationOrComputeOp>( 841 loc, indexTp, tokenTp, token, desc, gpu::TransposeMode::NON_TRANSPOSE, 842 gpu::TransposeMode::NON_TRANSPOSE, spMatA, spMatB, spMatC, dnCType, 843 bufferSz2, buffer2, gpu::SpGEMMWorkEstimationOrComputeKind::COMPUTE); 844 token = compute2->getResult(1); 845 846 // Get sizes. 847 Operation *sizes = rewriter.create<gpu::SpGEMMGetSizeOp>( 848 loc, indexTp, indexTp, indexTp, tokenTp, token, spMatC); 849 Value nnz = sizes->getResult(2); 850 token = sizes->getResult(3); 851 auto a2 = genAllocBuffer(rewriter, loc, cTp.getCrdType(), nnz, token); 852 colC = a2.getResult(0); 853 token = a2.getAsyncToken(); 854 auto a3 = genAllocBuffer(rewriter, loc, dnCType, nnz, token); 855 valC = a3.getResult(0); 856 token = a3.getAsyncToken(); 857 858 // Update C with new pointers and copy final product back into C. 859 Operation *update = rewriter.create<gpu::SetCsrPointersOp>( 860 loc, tokenTp, token, spMatC, rowC, colC, valC); 861 token = update->getResult(0); 862 Operation *copy = rewriter.create<gpu::SpGEMMCopyOp>( 863 loc, tokenTp, token, desc, gpu::TransposeMode::NON_TRANSPOSE, 864 gpu::TransposeMode::NON_TRANSPOSE, spMatA, spMatB, spMatC, dnCType); 865 token = copy->getResult(0); 866 867 // Allocate buffers on host. 868 Value rowH = genHostBuffer(rewriter, loc, cTp.getPosType(), mplus1); 869 Value colH = genHostBuffer(rewriter, loc, cTp.getCrdType(), nnz); 870 Value valH = genHostBuffer(rewriter, loc, dnCType, nnz); 871 872 // Copy data back to host and free all the resoures. 873 token = rewriter.create<gpu::SpGEMMDestroyDescrOp>(loc, tokenTp, token, desc) 874 .getAsyncToken(); 875 token = rewriter.create<gpu::DestroySpMatOp>(loc, tokenTp, token, spMatA) 876 .getAsyncToken(); 877 token = rewriter.create<gpu::DestroySpMatOp>(loc, tokenTp, token, spMatB) 878 .getAsyncToken(); 879 token = rewriter.create<gpu::DestroySpMatOp>(loc, tokenTp, token, spMatC) 880 .getAsyncToken(); 881 token = genCopyMemRef(rewriter, loc, rowH, rowC, token); 882 token = genCopyMemRef(rewriter, loc, colH, colC, token); 883 token = genCopyMemRef(rewriter, loc, valH, valC, token); 884 tokens.push_back(token); 885 genBlockingWait(rewriter, loc, tokens); 886 tokens.clear(); 887 888 // Done. 889 Value vt = rewriter.create<bufferization::ToTensorOp>(loc, valH); 890 Value rt = rewriter.create<bufferization::ToTensorOp>(loc, rowH); 891 Value ct = rewriter.create<bufferization::ToTensorOp>(loc, colH); 892 rewriter.replaceOpWithNewOp<PackOp>(op, c.getType(), vt, ValueRange{rt, ct}); 893 return success(); 894 } 895 896 // Match and rewrite 2:4 SpMM kernel. 897 static LogicalResult 898 rewrite2To4SpMM(PatternRewriter &rewriter, linalg::GenericOp op, 899 GPUDataTransferStrategy gpuDataTransferStrategy) { 900 Location loc = op.getLoc(); 901 Value A = op.getOperand(0); 902 Value B = op.getOperand(1); 903 Value C = op.getOperand(2); // we have C = AB 904 SmallVector<Value> tokens; 905 906 bool isZeroCopy = 907 gpuDataTransferStrategy == GPUDataTransferStrategy::kZeroCopy; 908 909 // All input should be dense tensors. 910 if (!isDenseTensor(A) || !isDenseTensor(B) || !isDenseTensor(C)) 911 return failure(); 912 913 Value matA, matB; 914 Value bufA = genTensorToMemref(rewriter, loc, A); 915 if (!isZeroCopy) 916 matA = genAllocCopy(rewriter, loc, bufA, tokens); 917 Value bufB = genTensorToMemref(rewriter, loc, B); 918 if (!isZeroCopy) 919 matB = genAllocCopy(rewriter, loc, bufB, tokens); 920 Value bufC = genTensorToMemref(rewriter, loc, C); 921 Value castA, castB, castC; 922 if (gpuDataTransferStrategy != GPUDataTransferStrategy::kRegularDMA) { 923 castA = genHostRegisterMemref(rewriter, loc, bufA); 924 castB = genHostRegisterMemref(rewriter, loc, bufB); 925 castC = genHostRegisterMemref(rewriter, loc, bufC); 926 } 927 if (isZeroCopy) { 928 matA = bufA; 929 matB = bufB; 930 } 931 Value matC = genAllocCopy(rewriter, loc, bufC, tokens); 932 genBlockingWait(rewriter, loc, tokens); 933 tokens.clear(); 934 935 // Create sparse environment and sparse matrix/dense vector handles. 936 Value szm = linalg::createOrFoldDimOp(rewriter, loc, matA, 0); 937 Value szk = linalg::createOrFoldDimOp(rewriter, loc, matB, 0); 938 Value szn = linalg::createOrFoldDimOp(rewriter, loc, matC, 1); 939 Type indexTp = rewriter.getIndexType(); 940 Type dnTensorHandleTp = rewriter.getType<gpu::SparseDnTensorHandleType>(); 941 Type spMatHandleTp = rewriter.getType<gpu::SparseSpMatHandleType>(); 942 Type tokenTp = rewriter.getType<gpu::AsyncTokenType>(); 943 Value token = genFirstWait(rewriter, loc); 944 Operation *spGenA = rewriter.create<gpu::Create2To4SpMatOp>( 945 loc, spMatHandleTp, tokenTp, token, szm, szk, 946 gpu::Prune2To4SpMatFlag::PRUNE_AND_CHECK, matA); 947 Value spMatA = spGenA->getResult(0); 948 token = spGenA->getResult(1); 949 auto dmatB = rewriter.create<gpu::CreateDnTensorOp>( 950 loc, dnTensorHandleTp, tokenTp, token, matB, 951 SmallVector<Value>{szk, szn}); 952 Value dnB = dmatB.getResult(0); 953 token = dmatB.getAsyncToken(); 954 auto dmatC = rewriter.create<gpu::CreateDnTensorOp>( 955 loc, dnTensorHandleTp, tokenTp, token, matC, 956 SmallVector<Value>{szm, szn}); 957 Value dnC = dmatC.getResult(0); 958 token = dmatC.getAsyncToken(); 959 auto dmatCType = llvm::cast<ShapedType>(matC.getType()).getElementType(); 960 961 // Precompute buffersize for SpMM. 962 SmallVector<Type> bufferTypes_{indexTp, indexTp, indexTp}; 963 TypeRange bufferTypes(bufferTypes_); 964 auto bufferComp = rewriter.create<gpu::SpMMBufferSizeOp>( 965 loc, bufferTypes, tokenTp, token, gpu::TransposeMode::NON_TRANSPOSE, 966 gpu::TransposeMode::NON_TRANSPOSE, spMatA, dnB, dnC, 967 /*computeType=*/dmatCType); 968 token = bufferComp.getAsyncToken(); 969 970 Value bufferSz = bufferComp.getResult(0); 971 auto buf = genAllocBuffer(rewriter, loc, bufferSz, token); 972 Value buffer = buf.getResult(0); 973 token = buf.getAsyncToken(); 974 975 Value bufferSz2 = bufferComp.getResult(1); 976 auto buf2 = genAllocBuffer(rewriter, loc, bufferSz2, token); 977 Value buffer2 = buf2.getResult(0); 978 token = buf2.getAsyncToken(); 979 980 Value bufferSz3 = bufferComp.getResult(2); 981 auto buf3 = genAllocBuffer(rewriter, loc, bufferSz3, token); 982 Value buffer3 = buf3.getResult(0); 983 token = buf3.getAsyncToken(); 984 985 auto dnCType = llvm::cast<ShapedType>(matC.getType()).getElementType(); 986 987 // Perform the SpMM. 988 auto spmmComp = rewriter.create<gpu::SpMMOp>( 989 loc, tokenTp, token, spMatA, dnB, dnC, /*computeType=*/dnCType, 990 SmallVector<Value>{buffer, buffer2, buffer3}); 991 token = spmmComp.getAsyncToken(); 992 993 // Copy data back to host and free all the resources. 994 token = rewriter.create<gpu::DestroySpMatOp>(loc, tokenTp, token, spMatA) 995 .getAsyncToken(); 996 token = rewriter.create<gpu::DestroyDnTensorOp>(loc, tokenTp, token, dnB) 997 .getAsyncToken(); 998 token = rewriter.create<gpu::DestroyDnTensorOp>(loc, tokenTp, token, dnC) 999 .getAsyncToken(); 1000 SmallVector<Value> newDynamicSizes; 1001 token = genDeallocMemRef(rewriter, loc, buffer, token); 1002 token = genDeallocMemRef(rewriter, loc, buffer2, token); 1003 token = genDeallocMemRef(rewriter, loc, buffer3, token); 1004 if (!isZeroCopy) 1005 token = genDeallocMemRef(rewriter, loc, matA, token); 1006 if (!isZeroCopy) 1007 token = genDeallocMemRef(rewriter, loc, matB, token); 1008 token = genCopyMemRef(rewriter, loc, bufC, matC, token); 1009 token = genDeallocMemRef(rewriter, loc, matC, token); 1010 tokens.push_back(token); 1011 genBlockingWait(rewriter, loc, tokens); 1012 tokens.clear(); 1013 if (gpuDataTransferStrategy != GPUDataTransferStrategy::kRegularDMA) { 1014 genHostUnregisterMemref(rewriter, loc, castA); 1015 genHostUnregisterMemref(rewriter, loc, castB); 1016 genHostUnregisterMemref(rewriter, loc, castC); 1017 } 1018 1019 // Done. 1020 rewriter.replaceOpWithNewOp<bufferization::ToTensorOp>(op, bufC); 1021 return success(); 1022 } 1023 1024 /// Match and rewrite SDDMM kernel. 1025 static LogicalResult 1026 rewriteSDDMM(PatternRewriter &rewriter, linalg::GenericOp op, bool enableRT, 1027 GPUDataTransferStrategy gpuDataTransferStrategy) { 1028 Location loc = op.getLoc(); 1029 Value a = op.getOperand(0); 1030 Value b = op.getOperand(1); 1031 Value c = op.getOperand(2); 1032 SmallVector<Value> tokens; 1033 1034 bool isZeroCopy = 1035 gpuDataTransferStrategy == GPUDataTransferStrategy::kZeroCopy; 1036 1037 // Only admissible sparse matrix format and dense matrices, no COO. 1038 bool isCOO = false; 1039 SparseTensorType aTp = getSparseTensorType(a); 1040 SparseTensorType bTp = getSparseTensorType(b); 1041 SparseTensorType cTp = getSparseTensorType(c); 1042 if (!areAdmissibleTypes(cTp, bTp, aTp, enableRT, false, isCOO)) 1043 return failure(); 1044 if (isCOO) 1045 return failure(); 1046 1047 // The SDDMM does the in-place operation. 1048 // Start sparse kernel and copy data from host to device. 1049 // a : bufA -> matA 1050 // b : bufB -> matA 1051 // c : memR/memC/memV -> rowC,colC,valC 1052 Value nseC = rewriter.create<NumberOfEntriesOp>(loc, c); 1053 Value szm = linalg::createOrFoldDimOp(rewriter, loc, a, 0); 1054 Value szk = linalg::createOrFoldDimOp(rewriter, loc, a, 1); 1055 Value szn = linalg::createOrFoldDimOp(rewriter, loc, b, 1); 1056 Value matA, matB; 1057 Value bufA = genTensorToMemref(rewriter, loc, a); 1058 if (!isZeroCopy) 1059 matA = genAllocCopy(rewriter, loc, bufA, tokens); 1060 Value bufB = genTensorToMemref(rewriter, loc, b); 1061 if (!isZeroCopy) 1062 matB = isZeroCopy ? bufB : genAllocCopy(rewriter, loc, bufB, tokens); 1063 Value memR = genFirstPosOrCrds(rewriter, loc, c, isCOO, enableRT); 1064 Value memC = genSecondCrds(rewriter, loc, c, isCOO, enableRT); 1065 Value memV = genToValues(rewriter, loc, c); 1066 Value castB, castA, castR, castC, castV; 1067 if (gpuDataTransferStrategy != GPUDataTransferStrategy::kRegularDMA) { 1068 castB = genHostRegisterMemref(rewriter, loc, bufB); 1069 castA = genHostRegisterMemref(rewriter, loc, bufA); 1070 castR = genHostRegisterMemref(rewriter, loc, memR); 1071 if (memC) 1072 castC = genHostRegisterMemref(rewriter, loc, memC); 1073 castV = genHostRegisterMemref(rewriter, loc, memV); 1074 } 1075 if (isZeroCopy) { 1076 matA = bufA; 1077 matB = bufB; 1078 } 1079 Value rowC = genAllocCopy(rewriter, loc, memR, tokens); 1080 Value colC = memC ? genAllocCopy(rewriter, loc, memC, tokens) : Value(); 1081 Value valC = genAllocCopy(rewriter, loc, memV, tokens); 1082 genBlockingWait(rewriter, loc, tokens); 1083 tokens.clear(); 1084 1085 // Create sparse environment and sparse matrix/dense matrix handles. 1086 Type indexTp = rewriter.getIndexType(); 1087 Type dnMatHandleTp = rewriter.getType<gpu::SparseDnTensorHandleType>(); 1088 Type spMatHandleTp = rewriter.getType<gpu::SparseSpMatHandleType>(); 1089 Type tokenTp = rewriter.getType<gpu::AsyncTokenType>(); 1090 Value token = genFirstWait(rewriter, loc); 1091 auto dmatA = rewriter.create<gpu::CreateDnTensorOp>( 1092 loc, dnMatHandleTp, tokenTp, token, matA, SmallVector<Value>{szm, szk}); 1093 Value dnA = dmatA.getResult(0); 1094 token = dmatA.getAsyncToken(); 1095 auto dmatB = rewriter.create<gpu::CreateDnTensorOp>( 1096 loc, dnMatHandleTp, tokenTp, token, matB, SmallVector<Value>{szk, szn}); 1097 Value dnB = dmatB.getResult(0); 1098 token = dmatB.getAsyncToken(); 1099 1100 Operation *spGenC = 1101 genSpMat(rewriter, loc, spMatHandleTp, tokenTp, token, szm, szn, nseC, 1102 rowC, colC, valC, isCOO, enableRT); 1103 Value spMatC = spGenC->getResult(0); 1104 token = spGenC->getResult(1); 1105 auto dnCType = llvm::cast<ShapedType>(c.getType()).getElementType(); 1106 1107 // Precompute buffersize for SDDMM. 1108 auto bufferComp = rewriter.create<gpu::SDDMMBufferSizeOp>( 1109 loc, indexTp, tokenTp, token, dnA, dnB, spMatC, dnCType); 1110 Value bufferSz = bufferComp.getResult(0); 1111 token = bufferComp.getAsyncToken(); 1112 auto buf = genAllocBuffer(rewriter, loc, bufferSz, token); 1113 Value buffer = buf.getResult(0); 1114 token = buf.getAsyncToken(); 1115 1116 // Perform the SDDMM. 1117 auto sddmmComp = rewriter.create<gpu::SDDMMOp>(loc, tokenTp, token, dnA, dnB, 1118 spMatC, dnCType, buffer); 1119 token = sddmmComp.getAsyncToken(); 1120 1121 // Copy data back to host and free all the resoures. 1122 token = rewriter.create<gpu::DestroyDnTensorOp>(loc, tokenTp, token, dnA) 1123 .getAsyncToken(); 1124 token = rewriter.create<gpu::DestroyDnTensorOp>(loc, tokenTp, token, dnB) 1125 .getAsyncToken(); 1126 token = rewriter.create<gpu::DestroySpMatOp>(loc, tokenTp, token, spMatC) 1127 .getAsyncToken(); 1128 token = genDeallocMemRef(rewriter, loc, buffer, token); 1129 if (!isZeroCopy) { 1130 token = genDeallocMemRef(rewriter, loc, matA, token); 1131 token = genDeallocMemRef(rewriter, loc, matB, token); 1132 } 1133 token = genDeallocMemRef(rewriter, loc, rowC, token); 1134 if (colC) 1135 token = genDeallocMemRef(rewriter, loc, colC, token); 1136 token = genCopyMemRef(rewriter, loc, memV, valC, token); 1137 token = genDeallocMemRef(rewriter, loc, valC, token); 1138 tokens.push_back(token); 1139 genBlockingWait(rewriter, loc, tokens); 1140 tokens.clear(); 1141 if (gpuDataTransferStrategy != GPUDataTransferStrategy::kRegularDMA) { 1142 genHostUnregisterMemref(rewriter, loc, castB); 1143 genHostUnregisterMemref(rewriter, loc, castA); 1144 genHostUnregisterMemref(rewriter, loc, castR); 1145 if (memC) 1146 genHostUnregisterMemref(rewriter, loc, castC); 1147 genHostUnregisterMemref(rewriter, loc, castV); 1148 } 1149 1150 // Done. 1151 rewriter.replaceOpWithNewOp<sparse_tensor::LoadOp>(op, c); 1152 return success(); 1153 } 1154 1155 //===----------------------------------------------------------------------===// 1156 // Rewriting rules for direct code generation. 1157 //===----------------------------------------------------------------------===// 1158 1159 /// Proof-of-concept rewriter. This rule generates a GPU implementation 1160 /// for each outermost forall loop generated by the sparse compiler. 1161 /// TODO: right now works with parallelization-strategy=dense-outer-loop 1162 /// but give this its own flags in the future 1163 struct ForallRewriter : public OpRewritePattern<scf::ParallelOp> { 1164 using OpRewritePattern<scf::ParallelOp>::OpRewritePattern; 1165 1166 ForallRewriter(MLIRContext *context, unsigned nT) 1167 : OpRewritePattern(context), numThreads(nT){}; 1168 1169 LogicalResult matchAndRewrite(scf::ParallelOp forallOp, 1170 PatternRewriter &rewriter) const override { 1171 // Reject inadmissible loop form. 1172 // Essentially only accept a loop, generated by the sparse compiler, 1173 // of the form 1174 // forall (i = 0; i < N; i++) 1175 // so that cyclic scheduling over the threads is easy. 1176 if (!forallOp->hasAttr(LoopEmitter::getLoopEmitterLoopAttrName()) || 1177 forallOp.getNumReductions() != 0 || forallOp.getNumLoops() != 1 || 1178 !matchPattern(forallOp.getLowerBound()[0], m_Zero()) || 1179 !matchPattern(forallOp.getStep()[0], m_One())) 1180 return failure(); 1181 // Collect every value that is computed outside the parallel loop. 1182 SetVector<Value> invariants; // stable iteration! 1183 forallOp->walk([&](Operation *op) { 1184 // Collect all values of admissible ops. 1185 for (OpOperand &o : op->getOpOperands()) { 1186 Value val = o.get(); 1187 Block *block; 1188 if (auto arg = dyn_cast<BlockArgument>(val)) 1189 block = arg.getOwner(); 1190 else 1191 block = val.getDefiningOp()->getBlock(); 1192 if (!isNestedIn(block, forallOp)) 1193 invariants.insert(val); 1194 } 1195 }); 1196 // Outline the outside values as proper parameters. Fail when sharing 1197 // value between host and device is not straightforward. 1198 SmallVector<Value> constants; 1199 SmallVector<Value> scalars; 1200 SmallVector<Value> buffers; 1201 for (Value val : invariants) { 1202 Type tp = val.getType(); 1203 if (val.getDefiningOp<arith::ConstantOp>()) 1204 constants.push_back(val); 1205 else if (isa<FloatType>(tp) || tp.isIntOrIndex()) 1206 scalars.push_back(val); 1207 else if (isa<MemRefType>(tp)) 1208 buffers.push_back(val); 1209 else 1210 return failure(); // don't know how to share 1211 } 1212 // Pass outlined non-constant values. 1213 // TODO: Experiment with `useHostRegistrationForOut` to see if we want to 1214 // keep the feature at all (either through a heuristic or compiler 1215 // option for gpu codegen). 1216 Location loc = forallOp->getLoc(); 1217 SmallVector<Value> args; 1218 SmallVector<Value> tokens; 1219 Value out = genParametersIn(rewriter, loc, scalars, buffers, args, tokens, 1220 /*useHostRegistrationForOut=*/false); 1221 // Set up GPU module and construct GPU function. 1222 auto saveIp = rewriter.saveInsertionPoint(); 1223 ModuleOp topModule = forallOp->getParentOfType<ModuleOp>(); 1224 auto gpuModule = genGPUModule(rewriter, topModule); 1225 auto gpuFunc = genGPUFunc(rewriter, gpuModule, args); 1226 genGPUCode(rewriter, gpuFunc, forallOp, constants, scalars, buffers); 1227 // Generate code that launches the kernel asynchronously, blocking on all 1228 // opens tokens and yielding a new token for the output. 1229 // TODO: Passing in tokens to launch up does not seem to be properly lowered 1230 // by cubin yet, hence the current blocking wait. 1231 rewriter.restoreInsertionPoint(saveIp); 1232 genBlockingWait(rewriter, loc, tokens); 1233 tokens.clear(); 1234 Value kernelToken = 1235 genLaunchGPUFunc(rewriter, gpuFunc, args, tokens, numThreads); 1236 // Finalize the outlined arguments. 1237 genParametersOut(rewriter, loc, out, kernelToken, scalars, buffers, args, 1238 tokens); 1239 genBlockingWait(rewriter, loc, tokens); 1240 rewriter.eraseOp(forallOp); 1241 return success(); 1242 } 1243 1244 private: 1245 // Helper method to see if block appears in given loop. 1246 static bool isNestedIn(Block *block, scf::ParallelOp forallOp) { 1247 for (Operation *o = block->getParentOp(); o; o = o->getParentOp()) { 1248 if (o == forallOp) 1249 return true; 1250 } 1251 return false; 1252 } 1253 1254 unsigned numThreads; 1255 }; 1256 1257 //===----------------------------------------------------------------------===// 1258 // Rewriting rules for library recognition and code generation. 1259 //===----------------------------------------------------------------------===// 1260 1261 /// Proof-of-concept rewriter. This rule recognizes certain math kernels 1262 /// and replaces these with corresponding calls into a sparse library. 1263 struct LinalgOpRewriter : public OpRewritePattern<linalg::GenericOp> { 1264 using OpRewritePattern<linalg::GenericOp>::OpRewritePattern; 1265 1266 LinalgOpRewriter(MLIRContext *context, bool rt, GPUDataTransferStrategy t) 1267 : OpRewritePattern(context), enableRT(rt), gpuDataTransferStrategy(t) {} 1268 1269 LogicalResult matchAndRewrite(linalg::GenericOp op, 1270 PatternRewriter &rewriter) const override { 1271 if (op.getNumDpsInits() != 1) 1272 return failure(); // reject multi-output 1273 1274 const unsigned numLoops = op.getNumLoops(); 1275 const unsigned numTensors = op->getNumOperands(); 1276 const auto iteratorTypes = op.getIteratorTypesArray(); 1277 SmallVector<AffineMap, 4> maps = op.getIndexingMapsArray(); 1278 1279 using MapList = ArrayRef<ArrayRef<AffineExpr>>; 1280 auto infer = [](MapList m) { return AffineMap::inferFromExprList(m); }; 1281 AffineExpr i, j, k; 1282 bindDims(getContext(), i, j, k); 1283 1284 // TODO: more robust patterns, tranposed versions, more kernels, 1285 // identify alpha and beta and pass them to the CUDA calls. 1286 1287 // Recognize a SpMV kernel. 1288 if (numLoops == 2 && numTensors == 3 && 1289 linalg::isParallelIterator(iteratorTypes[0]) && 1290 linalg::isReductionIterator(iteratorTypes[1]) && 1291 maps == infer({{i, j}, {j}, {i}}) && matchSumOfMultOfArgs(op)) { 1292 return rewriteSpMV(rewriter, op, enableRT, gpuDataTransferStrategy); 1293 } 1294 1295 // Recognize a SpGEMM, 2:4-SpMM, or SpMM kernel. 1296 if (numLoops == 3 && numTensors == 3 && 1297 linalg::isParallelIterator(iteratorTypes[0]) && 1298 linalg::isParallelIterator(iteratorTypes[1]) && 1299 linalg::isReductionIterator(iteratorTypes[2]) && 1300 maps == infer({{i, k}, {k, j}, {i, j}}) && matchSumOfMultOfArgs(op)) { 1301 if (!isDenseTensor(op.getOperand(0)) && !isDenseTensor(op.getOperand(1))) 1302 return rewriteSpGEMM(rewriter, op, enableRT, gpuDataTransferStrategy); 1303 if (op->getAttr("DENSE24")) 1304 return rewrite2To4SpMM(rewriter, op, gpuDataTransferStrategy); 1305 return rewriteSpMM(rewriter, op, enableRT, gpuDataTransferStrategy); 1306 } 1307 1308 // Recognize a SDDMM kernel. 1309 if (numLoops == 3 && numTensors == 3 && 1310 linalg::isParallelIterator(iteratorTypes[0]) && 1311 linalg::isParallelIterator(iteratorTypes[1]) && 1312 linalg::isReductionIterator(iteratorTypes[2]) && 1313 maps == infer({{i, k}, {k, j}, {i, j}}) && 1314 matchSumReductionOfMulUnary(op)) { 1315 return rewriteSDDMM(rewriter, op, enableRT, gpuDataTransferStrategy); 1316 } 1317 1318 return failure(); 1319 } 1320 1321 private: 1322 bool enableRT; 1323 GPUDataTransferStrategy gpuDataTransferStrategy; 1324 }; 1325 1326 } // namespace 1327 1328 //===----------------------------------------------------------------------===// 1329 // Public method for populating GPU rewriting rules. 1330 // 1331 // Currently two set of rewriting rules are made available. The first set 1332 // implements direct code generation, currently by means of convering the 1333 // outermost paralell loop into GPU threads. The second set implements 1334 // libary recognition of a set of sparse operations. Eventually, the right 1335 // combination of these two approaches has to be found. 1336 //===----------------------------------------------------------------------===// 1337 1338 void mlir::populateSparseGPUCodegenPatterns(RewritePatternSet &patterns, 1339 unsigned numThreads) { 1340 patterns.add<ForallRewriter>(patterns.getContext(), numThreads); 1341 } 1342 1343 void mlir::populateSparseGPULibgenPatterns( 1344 RewritePatternSet &patterns, bool enableRT, 1345 GPUDataTransferStrategy gpuDataTransfer) { 1346 patterns.add<LinalgOpRewriter>(patterns.getContext(), enableRT, 1347 gpuDataTransfer); 1348 } 1349