/llvm-project/llvm/test/CodeGen/AArch64/GlobalISel/ |
H A D | legalizer-info-validation.mir | 17 # DEBUG: G_ADD (opcode [[ADD_OPC:[0-9]+]]): 1 type index, 0 imm indices 21 # DEBUG-NEXT: G_SUB (opcode [[SUB_OPC:[0-9]+]]): 1 type index, 0 imm indices 26 # DEBUG-NEXT: G_MUL (opcode {{[0-9]+}}): 1 type index, 0 imm indices 30 # DEBUG-NEXT: G_SDIV (opcode {{[0-9]+}}): 1 type index, 0 imm indices 34 # DEBUG-NEXT: G_UDIV (opcode {{[0-9]+}}): 1 type index, 0 imm indices 39 # DEBUG-NEXT: G_SREM (opcode {{[0-9]+}}): 1 type index, 0 imm indices 43 # DEBUG-NEXT: G_UREM (opcode {{[0-9]+}}): 1 type index, 0 imm indices 48 # DEBUG-NEXT: G_SDIVREM (opcode {{[0-9]+}}): 1 type index, 0 imm indices 53 # DEBUG-NEXT: G_UDIVREM (opcode {{[0-9]+}}): 1 type index, 0 imm indices 58 # DEBUG-NEXT: G_AND (opcode {{[0-9]+}}): 1 type index, 0 imm indices [all...] |
/llvm-project/clang/test/CodeGen/aarch64-sve-intrinsics/ |
H A D | acle_sve_tbl.c |
|
H A D | acle_sve_adrh.c |
|
H A D | acle_sve_adrw.c |
|
H A D | acle_sve_adrd.c |
|
/llvm-project/clang/test/CodeGen/aarch64-sve2-intrinsics/ |
H A D | acle_sve2_tbx.c |
|
H A D | acle_sve2_tbl2.c |
|
/llvm-project/llvm/test/Transforms/VectorCombine/X86/ |
H A D | scalarize-vector-gep.ll | 11 define void @both_operands_need_extraction.2elts(<2 x ptr> %baseptrs, <2 x i64> %indices) { 13 … [[PTRS:%.*]] = getelementptr inbounds i64, <2 x ptr> [[BASEPTRS:%.*]], <2 x i64> [[INDICES:%.*]] 20 %ptrs = getelementptr inbounds i64, <2 x ptr> %baseptrs, <2 x i64> %indices 31 define void @both_operands_need_extraction.3elts(<3 x ptr> %baseptrs, <3 x i64> %indices) { 33 … [[PTRS:%.*]] = getelementptr inbounds i64, <3 x ptr> [[BASEPTRS:%.*]], <3 x i64> [[INDICES:%.*]] 42 %ptrs = getelementptr inbounds i64, <3 x ptr> %baseptrs, <3 x i64> %indices 56 define void @both_operands_need_extraction.4elts(<4 x ptr> %baseptrs, <4 x i64> %indices) { 58 … [[PTRS:%.*]] = getelementptr inbounds i64, <4 x ptr> [[BASEPTRS:%.*]], <4 x i64> [[INDICES:%.*]] 69 %ptrs = getelementptr inbounds i64, <4 x ptr> %baseptrs, <4 x i64> %indices 88 define void @indices_need_extraction.2elts(ptr %baseptr, <2 x i64> %indices) { [all …]
|
/llvm-project/llvm/test/CodeGen/X86/ |
H A D | var-permute-256.ll | 14 define <4 x i64> @var_shuffle_v4i64(<4 x i64> %v, <4 x i64> %indices) nounwind { 66 %index0 = extractelement <4 x i64> %indices, i32 0 67 %index1 = extractelement <4 x i64> %indices, i32 1 68 %index2 = extractelement <4 x i64> %indices, i32 2 69 %index3 = extractelement <4 x i64> %indices, i32 3 81 define <4 x i64> @var_shuffle_zero_v4i64(<4 x i64> %v, <4 x i64> %indices) nounwind { 168 %cmp = icmp ugt <4 x i64> %indices, <i64 3, i64 3, i64 3, i64 3> 169 %or = select <4 x i1> %cmp, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i64> %indices 186 define <8 x i32> @var_shuffle_v8i32(<8 x i32> %v, <8 x i32> %indices) nounwind { 211 %index0 = extractelement <8 x i32> %indices, i3 [all...] |
H A D | var-permute-512.ll | 6 define <8 x i64> @var_shuffle_v8i64(<8 x i64> %v, <8 x i64> %indices) nounwind { 11 %index0 = extractelement <8 x i64> %indices, i32 0 12 %index1 = extractelement <8 x i64> %indices, i32 1 13 %index2 = extractelement <8 x i64> %indices, i32 2 14 %index3 = extractelement <8 x i64> %indices, i32 3 15 %index4 = extractelement <8 x i64> %indices, i32 4 16 %index5 = extractelement <8 x i64> %indices, i32 5 17 %index6 = extractelement <8 x i64> %indices, i32 6 18 %index7 = extractelement <8 x i64> %indices, i32 7 38 define <16 x i32> @var_shuffle_v16i32(<16 x i32> %v, <16 x i32> %indices) nounwind { [all …]
|
/llvm-project/mlir/test/Transforms/ |
H A D | invalid-parallel-loop-collapsing.mlir | 3 // CL0: No collapsed-indices were specified. This pass is only for testing and does not automatical… 5 …ne='builtin.module(func.func(test-scf-parallel-loop-collapsing{collapsed-indices-1=1}))' 2>&1 | Fi… 6 // CL1: collapsed-indices-1 specified but not collapsed-indices-0 8 …module(func.func(test-scf-parallel-loop-collapsing{collapsed-indices-0=1 collapsed-indices-2=2}))… 9 // CL2: collapsed-indices-2 specified but not collapsed-indices-1 11 …module(func.func(test-scf-parallel-loop-collapsing{collapsed-indices-0=1 collapsed-indices-1=2}))… 12 // NON-ZERO: collapsed-indices arguments must include all values [0,N). 14 …module(func.func(test-scf-parallel-loop-collapsing{collapsed-indices-0=0 collapsed-indices-1=2}))… 15 // NON-CONTIGUOUS: collapsed-indices arguments must include all values [0,N). 19 …ne='builtin.module(func.func(test-scf-parallel-loop-collapsing{collapsed-indices-0=0,1}))' -verify…
|
/llvm-project/llvm/test/Transforms/CodeGenPrepare/AArch64/ |
H A D | sink-gather-scatter-addressing.ll | 7 define <vscale x 4 x float> @gather_offsets_sink_gep(ptr %base, <vscale x 4 x i32> %indices, <vscal… 9 ; CHECK-SAME: ptr [[BASE:%.*]], <vscale x 4 x i32> [[INDICES:%.*]], <vscale x 4 x i1> [[MASK:%.*]],… 13 ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr float, ptr [[BASE]], <vscale x 4 x i32> [[INDICES]] 20 %ptrs = getelementptr float, ptr %base, <vscale x 4 x i32> %indices 33 define <vscale x 4 x float> @gather_offsets_sink_sext(ptr %base, <vscale x 4 x i32> %indices, <vsca… 35 ; CHECK-SAME: ptr [[BASE:%.*]], <vscale x 4 x i32> [[INDICES:%.*]], <vscale x 4 x i1> [[MASK:%.*]],… 39 ; CHECK-NEXT: [[TMP0:%.*]] = sext <vscale x 4 x i32> [[INDICES]] to <vscale x 4 x i64> 47 %indices.sext = sext <vscale x 4 x i32> %indices to <vscale x 4 x i64> 51 %ptrs = getelementptr float, ptr %base, <vscale x 4 x i64> %indices.sext 61 define <vscale x 4 x float> @gather_offsets_sink_sext_get(ptr %base, <vscale x 4 x i32> %indices, <… [all …]
|
/llvm-project/llvm/test/CodeGen/AArch64/ |
H A D | sve-intrinsics-scatter-stores-32bit-scaled-offsets.ll | 11 …h_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %indices) { 20 <vscale x 4 x i32> %indices) 24 …h_s_sxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %indices) { 33 <vscale x 4 x i32> %indices) 37 …h_d_uxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %indices) { 46 <vscale x 2 x i32> %indices) 50 …h_d_sxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %indices) { 59 <vscale x 2 x i32> %indices) 64 …w_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %indices) { 72 <vscale x 4 x i32> %indices) [all …]
|
/llvm-project/llvm/test/Transforms/InstCombine/ |
H A D | gep-vector-indices.ll | 27 define ptr @vector_indices_v2i64_ext0(ptr %a, <2 x i64> %indices) { 29 ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x i64> [[INDICES:%.*]], i64 0 33 %gep = getelementptr i32, ptr %a, <2 x i64> %indices 38 define ptr @vector_indices_nxv1i64_ext0(ptr %a, <vscale x 1 x i64> %indices) { 40 ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <vscale x 1 x i64> [[INDICES:%.*]], i64 0 44 %gep = getelementptr i32, ptr %a, <vscale x 1 x i64> %indices 100 define ptr @vector_indices_nxv2i64_ext3(ptr %a, <vscale x 2 x i64> %indices) { 102 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[A:%.*]], <vscale x 2 x i64> [[INDICES:%.*]] 106 %gep = getelementptr i32, ptr %a, <vscale x 2 x i64> %indices 111 define ptr @vector_indices_nxv2i64_extN(ptr %a, <vscale x 2 x i64> %indices, i3 [all...] |
/llvm-project/mlir/include/mlir/Dialect/LLVMIR/ |
H A D | LLVMDialect.h | 75 /// a constant index as is required for indices into struct types. 109 /// Class used for convenient access and iteration over GEP indices. 114 /// GEP indices may either be constant indices or dynamic indices. The 136 /// Constructs a GEPIndicesAdaptor with the raw constant indices of a GEPOp 137 /// and the range that is indexed into for retrieving dynamic indices. 143 /// all indices, use the iterators. in isDynamicIndex() 156 /// Returns the amount of indices of the GEPOp. 159 /// Returns true if this GEPOp does not have any indices in iterator() 221 SmallVector<IntT> indices; convertArrayToIndices() local [all...] |
/llvm-project/mlir/include/mlir/Dialect/Bufferization/Transforms/ |
H A D | FuncBufferizableOpInterfaceImpl.h | 44 // indices instead of BlockArguments/OpOperand pointers. 46 /// A set of block argument indices. 49 /// A mapping of indices to indices. 52 /// A mapping of indices to a list of indices. 55 /// A mapping of ReturnOp OpOperand indices to equivalent FuncOp BBArg 56 /// indices. 59 /// A mapping of FuncOp BBArg indices to aliasing ReturnOp OpOperand indices [all...] |
/llvm-project/mlir/include/mlir/ExecutionEngine/ |
H A D | CRunnerUtils.h | 140 T &operator[](Range &&indices) { 141 assert(indices.size() == N && 142 "indices should match rank in memref subscript"); 145 int64_t currentIndex = *(indices.begin() + dim); 178 T &operator[](Range indices) { 179 assert(indices.size() == 1 && 180 "indices should match rank in memref subscript"); 181 return (*this)[*indices.begin()]; 199 T &operator[](Range indices) { 200 assert((indices.size() == 0) && [all …]
|
/llvm-project/libcxx/test/std/containers/views/mdspan/layout_right/ |
H A D | index_operator.pass.cpp | 15 // template<class... Indices> 16 // constexpr index_type operator()(Indices...) const noexcept; 19 // * sizeof...(Indices) == extents_type::rank() is true, 20 // * (is_convertible_v<Indices, index_type> && ...) is true, and 21 // * (is_nothrow_constructible_v<index_type, Indices> && ...) is true. 37 template<class Mapping, class ... Indices> 38 concept operator_constraints = requires(Mapping m, Indices ... idxs) { 42 template<class Mapping, class ... Indices> 44 operator_constraints<Mapping, Indices...> in check_operator_constraints() 46 constexpr bool check_operator_constraints(Mapping m, Indices in check_operator_constraints() 49 check_operator_constraints(Mapping,Indices...) check_operator_constraints() argument [all...] |
/llvm-project/libcxx/test/std/containers/views/mdspan/layout_left/ |
H A D | index_operator.pass.cpp | 15 // template<class... Indices> 16 // constexpr index_type operator()(Indices...) const noexcept; 19 // * sizeof...(Indices) == extents_type::rank() is true, 20 // * (is_convertible_v<Indices, index_type> && ...) is true, and 21 // * (is_nothrow_constructible_v<index_type, Indices> && ...) is true. 37 template<class Mapping, class ... Indices> 38 concept operator_constraints = requires(Mapping m, Indices ... idxs) { 42 template<class Mapping, class ... Indices> 44 operator_constraints<Mapping, Indices...> in check_operator_constraints() 46 constexpr bool check_operator_constraints(Mapping m, Indices in check_operator_constraints() 49 check_operator_constraints(Mapping,Indices...) check_operator_constraints() argument [all...] |
/llvm-project/mlir/lib/Dialect/MemRef/Transforms/ |
H A D | FoldMemRefAliasOps.cpp | 49 /// Given the 'indices' of a load/store operation where the memref is a result 50 /// of a expand_shape op, returns the indices w.r.t to the source memref of the 65 ValueRange indices, in resolveSourceIndicesExpandShape() argument 93 // Traverse all reassociation groups to determine the appropriate indices in resolveSourceIndicesExpandShape() 96 assert(!groups.empty() && "association indices groups cannot be empty"); in resolveSourceIndicesExpandShape() 127 dynamicIndices[i] = indices[groups[i]]; in resolveSourceIndicesExpandShape() 129 // Supply suffix product results followed by load op indices as operands in resolveSourceIndicesExpandShape() 152 /// Given the 'indices' of a load/store operation where the memref is a result 153 /// of a collapse_shape op, returns the indices w.r.t to the source memref of 168 ValueRange indices, in resolveSourceIndicesCollapseShape() argument 377 calculateExpandedAccessIndices(AffineMap affineMap,const SmallVector<Value> & indices,Location loc,PatternRewriter & rewriter) calculateExpandedAccessIndices() argument 439 SmallVector<Value> indices(loadOp.getIndices().begin(), matchAndRewrite() local 505 SmallVector<Value> indices(loadOp.getIndices().begin(), matchAndRewrite() local 553 SmallVector<Value> indices(loadOp.getIndices().begin(), matchAndRewrite() local 606 SmallVector<Value> indices(storeOp.getIndices().begin(), matchAndRewrite() local 668 SmallVector<Value> indices(storeOp.getIndices().begin(), matchAndRewrite() local 717 SmallVector<Value> indices(storeOp.getIndices().begin(), matchAndRewrite() local [all...] |
H A D | ExtractAddressComputations.cpp | 43 ArrayRef<Value> indices) { in rebuildLoadOp() argument 45 return rewriter.create<memref::LoadOp>(loc, srcMemRef, indices, in rebuildLoadOp() 73 ArrayRef<Value> indices) { in rebuildStoreOp() argument 76 srcMemRef, indices, in rebuildStoreOp() 105 ArrayRef<Value> indices) { in rebuildLdMatrixOp() argument 108 loc, ldMatrixOp.getResult().getType(), srcMemRef, indices, in rebuildLdMatrixOp() 133 ArrayRef<Value> indices) { in rebuildTransferReadOp() argument 136 loc, transferReadOp.getResult().getType(), srcMemRef, indices, in rebuildTransferReadOp() 151 ArrayRef<Value> indices) { in rebuildTransferWriteOp() argument 154 loc, transferWriteOp.getValue(), srcMemRef, indices, in rebuildTransferWriteOp() [all …]
|
/llvm-project/mlir/lib/Dialect/Utils/ |
H A D | ReshapeOpsUtils.cpp | 116 [](size_t all, ReassociationIndicesRef indices) { in composeReassociationIndices() argument 117 return all + indices.size(); in composeReassociationIndices() 136 for (const auto &indices : reassociationIndices) { in convertReassociationIndicesToExprs() local 138 reassociationMap.reserve(indices.size()); in convertReassociationIndicesToExprs() 139 for (int64_t index : indices) in convertReassociationIndicesToExprs() 164 reassociation, [&](const ReassociationIndices &indices) -> Attribute { in getReassociationIndicesAttribute() argument 165 return cast<Attribute>(b.getI64ArrayAttr(indices)); in getReassociationIndicesAttribute() 174 ReassociationIndices indices; in convertReassociationMapsToIndices() local 175 indices.reserve(exprs.size()); in convertReassociationMapsToIndices() 177 indices in convertReassociationMapsToIndices() 353 getUniqueNonUnitDim(ArrayRef<int64_t> indices,ArrayRef<int64_t> shape) getUniqueNonUnitDim() argument 376 for (const auto &indices : reassociationIndices) getCollapseShapeTrivialSegments() local [all...] |
/llvm-project/libcxx/test/std/containers/views/mdspan/layout_stride/ |
H A D | index_operator.pass.cpp | 15 // template<class... Indices> 16 // constexpr index_type operator()(Indices...) const noexcept; 19 // * sizeof...(Indices) == extents_type::rank() is true, 20 // * (is_convertible_v<Indices, index_type> && ...) is true, and 21 // * (is_nothrow_constructible_v<index_type, Indices> && ...) is true. 37 template <class Mapping, class... Indices> 38 concept operator_constraints = requires(Mapping m, Indices... idxs) { 42 template <class Mapping, class... Indices> in check_operator_constraints() 43 requires(operator_constraints<Mapping, Indices...>) in check_operator_constraints() 44 constexpr bool check_operator_constraints(Mapping m, Indices in check_operator_constraints() 47 check_operator_constraints(Mapping,Indices...) check_operator_constraints() argument [all...] |
/llvm-project/libc/src/stdfix/ |
H A D | exphk.cpp | 75 // To get the indices, we shift the values so that it start with 0. 76 // Range of indices: 0 <= indices <= 89 variable 77 StorageType indices = cpp::bit_cast<StorageType>((x_rounded + 0x1.6p2hk) >> 80 // indices = (hi + mid + 44/8) * 8 82 // hi + mid = indices/8 - 5.5 84 // exp( floor(indices / 8) - 5 ) 86 // exp( (indices - floor(indices)) - 0.5 ) 87 short accum exp_hi = EXP_HI[indices >> [all...] |
H A D | expk.cpp | 85 // To get the indices, we shift the values so that it start with 0. 86 // Range of indices: 0 <= indices <= 355. variable 87 StorageType indices = cpp::bit_cast<StorageType>((x_rounded + 0x1.62p3k) >> 90 // indices = (hi + mid + 177/16) * 16 92 // hi + mid = indices/16 - 11.0625 94 // exp( floor(indices / 16) - 11 ) 96 // exp( (indices - floor(indices)) - 0.0625 ) 97 accum exp_hi = EXP_HI[indices >> [all...] |