/llvm-project/mlir/include/mlir/ExecutionEngine/SparseTensor/ |
H A D | Storage.h | 206 positions(lvlRank), coordinates(lvlRank), lvlCursor(lvlRank) {} in SparseTensorStorage() 258 *out = &coordinates[lvl]; in getCoordinates() 279 assert(i < coordinates[l].size()); in getCoordinatesBuffer() 280 crdBuffer.push_back(coordinates[l][i]); in getCoordinatesBuffer() 360 assert(nnz == coordinates[l].size()); in sortInPlace() 373 lvlCrds[l] = coordinates[l][i]; in sortInPlace() 380 coordinates[l][current] = coordinates[l][next]; in sortInPlace() 386 coordinates[l][current] = lvlCrds[l]; in sortInPlace() 400 if (coordinates[l][lhs] == coordinates[l][rhs]) in sortInPlace() 402 return coordinates[l][lhs] < coordinates[l][rhs]; in sortInPlace() [all …]
|
H A D | COO.h | 85 coordinates.reserve(capacity * dimRank); 103 const uint64_t *base = coordinates.data(); in add() 104 const uint64_t size = coordinates.size(); in add() 110 coordinates.push_back(dimCoords[d]); in add() 117 const uint64_t *const newBase = coordinates.data(); in add() 143 std::vector<uint64_t> coordinates; // shared coordinate pool variable
|
/llvm-project/mlir/test/Dialect/SparseTensor/ |
H A D | sparse_concat.mlir | 19 // CHECK-DAG: %[[TMP_2:.*]] = sparse_tensor.coordinates %[[TMP_arg0]] {level = 0 : index} : tens… 21 // CHECK-DAG: %[[TMP_4:.*]] = sparse_tensor.coordinates %[[TMP_arg0]] {level = 1 : index} : tens… 39 // CHECK-DAG: %[[TMP_9:.*]] = sparse_tensor.coordinates %[[TMP_arg1]] {level = 0 : index} : tens… 41 // CHECK-DAG: %[[TMP_11:.*]] = sparse_tensor.coordinates %[[TMP_arg1]] {level = 1 : index} : ten… 60 // CHECK-DAG: %[[TMP_16:.*]] = sparse_tensor.coordinates %[[TMP_arg2]] {level = 0 : index} : ten… 62 // CHECK-DAG: %[[TMP_18:.*]] = sparse_tensor.coordinates %[[TMP_arg2]] {level = 1 : index} : ten… 105 // CHECK-DAG: %[[TMP_2:.*]] = sparse_tensor.coordinates %[[TMP_arg0]] {level = 0 : index} : tens… 107 // CHECK-DAG: %[[TMP_4:.*]] = sparse_tensor.coordinates %[[TMP_arg0]] {level = 1 : index} : tens… 125 // CHECK-DAG: %[[TMP_9:.*]] = sparse_tensor.coordinates %[[TMP_arg1]] {level = 0 : index} : tens… 127 // CHECK-DAG: %[[TMP_11:.*]] = sparse_tensor.coordinates %[[TMP_arg1]] {level = 1 : index} : ten… [all …]
|
H A D | sparse_kernels.mlir | 17 // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<10x20xf32, #sparse{{[0-9]*}}> to memref<?xindex> 19 // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<10x20xf32, #sparse{{[0-9]*}}> to memref<?xindex> 63 // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} 65 // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} 113 // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<4x8xf64, #sparse{{[0-9]*}}> to memref<?xindex> 115 // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<4x8xf64, #sparse{{[0-9]*}}> to memref<?xindex> 118 // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<8x4xf64, #sparse{{[0-9]*}}> to memref<?xindex> 120 // CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<8x4xf64, #sparse{{[0-9]*}}> to memref<?xindex> 208 // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<3x3xi32, #sparse{{[0-9]*}}> to memref<?xindex> 210 // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates [all...] |
H A D | invalid.mlir | 13 func.func @non_static_pack_ret(%values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x1xi32>) 16 %0 = sparse_tensor.assemble (%pos, %coordinates), %values 25 func.func @invalid_pack_type(%values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x1xi32>) 28 %0 = sparse_tensor.assemble (%pos, %coordinates), %values 37 func.func @invalid_pack_type(%values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x3xi32>) 40 %0 = sparse_tensor.assemble (%pos, %coordinates), %values 49 func.func @invalid_pack_mis_position(%values: tensor<6xf64>, %coordinates: tensor<6xi32>) 52 %0 = sparse_tensor.assemble (%coordinates), %values 61 func.func @invalid_unpack_type(%sp: tensor<100xf32, #SparseVector>, %values: tensor<6xf64>, %pos: tensor<2xi32>, %coordinates: tensor<6x1xi32>) { 64 out_lvls(%pos, %coordinates [all...] |
H A D | sparse_reshape.mlir | 25 // CHECK-DAG: %[[I0:.*]] = sparse_tensor.coordinates %[[S]] {level = 0 : index} 62 // CHECK-DAG: %[[I0:.*]] = sparse_tensor.coordinates %[[S]] {level = 0 : index} 64 // CHECK-DAG: %[[I1:.*]] = sparse_tensor.coordinates %[[S]] {level = 1 : index} 110 // CHECK-DAG: %[[I0:.*]] = sparse_tensor.coordinates %[[S]] {level = 0 : index} 153 // CHECK-DAG: %[[I0:.*]] = sparse_tensor.coordinates %[[S]] {level = 0 : index} 155 // CHECK-DAG: %[[I1:.*]] = sparse_tensor.coordinates %[[S]] {level = 1 : index}
|
H A D | external_direct.mlir | 26 // CHECK: %[[C:.*]] = sparse_tensor.coordinates %[[F]] 43 // CHECK: %[[C:.*]] = sparse_tensor.coordinates %[[F]]#1
|
H A D | sparse_fp_ops.mlir | 39 // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex> 71 // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex> 103 // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex> 135 // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex> 170 // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> 230 // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex> 290 // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> 326 // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> 359 // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf64, #sparse{{[0-9]*}}> to memref<?xindex> 407 // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates [all...] |
H A D | sorted_coo.mlir | 47 // C_HECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<?x?xf32, #sparse{{[0-9]*}}> to memref<?xindex, strided<[?], offset: ?>> 101 // C_HECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}> to memref<?xindex, strided<[?], offset: ?>> 102 // C_HECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}> to memref<?xindex, strided<[?], offset: ?>> 166 // C_HECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}> to memref<?xindex, strided<[?], offset: ?>> 167 // C_HECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}> to memref<?xindex, strided<[?], offset: ?>> 170 // C_HECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}> to memref<?xindex, strided<[?], offset: ?>> 171 // C_HECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x64xf64, #sparse{{[0-9]*}}> to memref<?xindex, strided<[?], offset: ?>>
|
H A D | sparse_tensor_reshape.mlir | 14 // CHECK-DAG: %[[I0:.*]] = sparse_tensor.coordinates %[[S]] {level = 0 : index} 16 // CHECK-DAG: %[[I1:.*]] = sparse_tensor.coordinates %[[S]] {level = 1 : index}
|
H A D | sparse_broadcast.mlir | 21 // CHECK-DAG: %[[TMP_2:.*]] = sparse_tensor.coordinates %[[TMP_arg0]] {level = 0 : index} 23 // CHECK-DAG: %[[TMP_4:.*]] = sparse_tensor.coordinates %[[TMP_arg0]] {level = 1 : index}
|
H A D | sparse_out.mlir | 107 // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tens… 160 // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tens… 162 // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : ten… 164 // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : ten… 167 // CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : ten… 169 // CHECK-DAG: %[[VAL_18:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : ten… 171 // CHECK-DAG: %[[VAL_20:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 2 : index} : ten… 323 // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : ten… 325 // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : ten… 328 // CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : ten… [all …]
|
H A D | sparse_int_ops.mlir | 34 // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> 95 // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> 155 // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> 191 // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> 225 // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> 259 // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref<?xindex> 297 // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref<?xindex> 357 // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref<?xindex> 415 // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi64, #sparse{{[0-9]*}}> to memref<?xindex> 449 // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates [all...] |
H A D | sparse_outbuf.mlir | 20 // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<10xi32, #{{.*}}> to memref<?xindex> 54 // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<10xi32, #{{.*}}> to memref<?xindex> 87 // CHECK-DAG: %[[VAL_5:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<10xf32, #{{.*}}> to memref<?xindex>
|
H A D | sparse_affine.mlir | 26 // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xf32, #sparse{{[0-9]*}}> 113 // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32xi32, #sparse{{[0-9]*}}> 164 // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf64, #sparse{{[0-9]*}}> 221 // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<34x16xf64, #sparse{{[0-9]*}}> to memref<?xindex> 224 // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x19xf64, #sparse{{[0-9]*}}> to memref<?xindex> 285 // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<34x16xf64, #sparse{{[0-9]*}}> to memref<?xindex> 288 // CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x19xf64, #sparse{{[0-9]*}}> to memref<?xindex>
|
H A D | sparse_2d.mlir | 138 // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex> 203 // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex> 266 // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex> 307 // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex> 377 // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex> 445 // CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex> 487 // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex> 489 // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex> 583 // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16xf32, #sparse{{[0-9]*}}> to memref<?xindex> 585 // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates [all...] |
H A D | sparse_transpose.mlir | 26 // CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[DEMAP]] {level = 0 : index} : tens… 28 // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[DEMAP]] {level = 1 : index} : tens…
|
H A D | sparse_foreach.mlir | 46 // C_HECK-DAG: %[[VAL_4:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : ten… 51 // C_HECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : ten… 106 // C_HECK-DAG: %[[VAL_6:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : ten… 108 // C_HECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : ten…
|
H A D | sparse_3d.mlir | 121 // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex> 188 // CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex> 235 // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex> 306 // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex> 353 // CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex> 355 // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex> 449 // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex> 451 // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 2 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex> 500 // CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x16x8xf32, #sparse{{[0-9]*}}> to memref<?xindex> 576 // CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates [all...] |
/llvm-project/mlir/include/mlir/Dialect/SparseTensor/IR/ |
H A D | SparseTensorAttrDefs.td | 149 level-expressions collectively define an affine map from dimension-coordinates to 150 level-coordinates. The dimension-expressions collectively define the inverse map, 156 as **coordinates** and offsets into the storage format as **positions**. 164 - **singleton** : a variant of the compressed format, where coordinates have no siblings 176 coordinates at that level) and ordered (coordinates appear sorted at that 177 level). For singleton levels, the coordinates are fused with its parents in AoS 181 - **nonunique** : duplicate coordinates may appear at the level 182 - **nonordered** : coordinates may appear in arbribratry order 195 - The required bitwidth for coordinate storage (the coordinates [all...] |
H A D | SparseTensorBase.td | 23 schemes consisting of positions, coordinates, and values. Lower-level 47 iteration graph, reflecting the required order on coordinates with 52 consists of a conjunction of tensor coordinates together with a tensor 55 the way coordinates are exhausted. As such these iteration
|
/llvm-project/mlir/test/Integration/GPU/CUDA/sm90/python/tools/ |
H A D | matmulBuilder.py | 434 coordinates=[coord, dimX], 442 coordinates=[dimY, coord], 451 coordinates=[dimY2, coord], 885 coordinates=[coord, dimX], 893 coordinates=[dimY, coord], 901 coordinates=[dimY2, coord], 1065 coordinates=[coord, dimX], 1073 coordinates=[dimY, coord], 1082 coordinates=[dimY2, coord],
|
/llvm-project/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/ |
H A D | sparse-matvec-lib.mlir | 39 // Compute matrix vector y = Ax on COO with default index coordinates. 47 // Compute matrix vector y = Ax on CSR with 32-bit positions and coordinates. 55 // Compute matrix vector y = Ax on CSC with 64-bit positions and coordinates.
|
/llvm-project/llvm/docs/AMDGPU/ |
H A D | gfx7_vaddr_887f26.rst | 13 Image address which includes from one to four dimensional coordinates and other data used to locate…
|
H A D | gfx8_vaddr_887f26.rst | 13 Image address which includes from one to four dimensional coordinates and other data used to locate…
|