Home
last modified time | relevance | path

Searched full:offsets (Results 1 – 25 of 1675) sorted by relevance

12345678910>>...67

/llvm-project/llvm/test/CodeGen/AArch64/
H A Dsve-masked-gather-32b-unsigned-unscaled.ll6 ; unscaled unpacked 32-bit offsets
9 define <vscale x 2 x i64> @masked_gather_nxv2i8(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2…
14 %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
15 %ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets.zext
21 define <vscale x 2 x i64> @masked_gather_nxv2i16(ptr %base, <vscale x 2 x i32> %offsets, <vscale x …
26 %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
27 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets.zext
34 define <vscale x 2 x i64> @masked_gather_nxv2i32(ptr %base, <vscale x 2 x i32> %offsets, <vscale x …
39 %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
40 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets.zext
[all …]
H A Dsve-masked-gather-32b-unsigned-scaled.ll6 ; unscaled unpacked 32-bit offsets
9 define <vscale x 2 x i64> @masked_gather_nxv2i16(ptr %base, <vscale x 2 x i32> %offsets, <vscale x …
14 %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
15 %ptrs = getelementptr i16, ptr %base, <vscale x 2 x i64> %offsets.zext
21 define <vscale x 2 x i64> @masked_gather_nxv2i32(ptr %base, <vscale x 2 x i32> %offsets, <vscale x …
26 %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
27 %ptrs = getelementptr i32, ptr %base, <vscale x 2 x i64> %offsets.zext
33 define <vscale x 2 x i64> @masked_gather_nxv2i64(ptr %base, <vscale x 2 x i32> %offsets, <vscale x …
38 %offsets.zext = zext <vscale x 2 x i32> %offsets to <vscale x 2 x i64>
39 %ptrs = getelementptr i64, ptr %base, <vscale x 2 x i64> %offsets.zext
[all …]
H A Dsve-masked-scatter-32b-unscaled.ll5 ; unscaled unpacked 32-bit offsets
13 %offsets = sext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
14 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
25 %offsets = sext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
26 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
37 %offsets = sext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
38 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
49 %offsets = sext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
50 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
61 %offsets = sext <vscale x 2 x i32> %i32offsets to <vscale x 2 x i64>
[all …]
H A Dsve-intrinsics-scatter-stores-32bit-unscaled-offsets.ll11 …b_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %offsets) {
20 <vscale x 4 x i32> %offsets)
24 …b_s_sxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %offsets) {
33 <vscale x 4 x i32> %offsets)
37 …b_d_uxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %offsets) {
46 <vscale x 2 x i32> %offsets)
50 …b_d_sxtw(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, ptr %base, <vscale x 2 x i32> %offsets) {
59 <vscale x 2 x i32> %offsets)
64 …h_s_uxtw(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, ptr %base, <vscale x 4 x i32> %offsets) {
73 <vscale x 4 x i32> %offsets)
[all …]
H A Dsve-masked-gather-32b-signed-unscaled.ll6 ; unscaled unpacked 32-bit offsets
9 define <vscale x 2 x i64> @masked_gather_nxv2i8(ptr %base, <vscale x 2 x i32> %offsets, <vscale x 2…
14 %ptrs = getelementptr i8, ptr %base, <vscale x 2 x i32> %offsets
20 define <vscale x 2 x i64> @masked_gather_nxv2i16(ptr %base, <vscale x 2 x i32> %offsets, <vscale x …
25 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i32> %offsets
32 define <vscale x 2 x i64> @masked_gather_nxv2i32(ptr %base, <vscale x 2 x i32> %offsets, <vscale x …
37 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i32> %offsets
44 define <vscale x 2 x i64> @masked_gather_nxv2i64(ptr %base, <vscale x 2 x i32> %offsets, <vscale x …
49 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i32> %offsets
55 define <vscale x 2 x half> @masked_gather_nxv2f16(ptr %base, <vscale x 2 x i32> %offsets, <vscale x…
[all …]
H A Dsve-masked-gather-32b-signed-scaled.ll6 ; unscaled unpacked 32-bit offsets
9 define <vscale x 2 x i64> @masked_gather_nxv2i16(ptr %base, <vscale x 2 x i32> %offsets, <vscale x …
14 %ptrs = getelementptr i16, ptr %base, <vscale x 2 x i32> %offsets
20 define <vscale x 2 x i64> @masked_gather_nxv2i32(ptr %base, <vscale x 2 x i32> %offsets, <vscale x …
25 %ptrs = getelementptr i32, ptr %base, <vscale x 2 x i32> %offsets
31 define <vscale x 2 x i64> @masked_gather_nxv2i64(ptr %base, <vscale x 2 x i32> %offsets, <vscale x …
36 %ptrs = getelementptr i64, ptr %base, <vscale x 2 x i32> %offsets
41 define <vscale x 2 x half> @masked_gather_nxv2f16(ptr %base, <vscale x 2 x i32> %offsets, <vscale x…
46 %ptrs = getelementptr half, ptr %base, <vscale x 2 x i32> %offsets
51 define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(ptr %base, <vscale x 2 x i32> %offsets, <vscal…
[all …]
H A Dsve-masked-gather-64b-unscaled.ll5 define <vscale x 2 x i64> @masked_gather_nxv2i8(ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2…
10 %ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
16 define <vscale x 2 x i64> @masked_gather_nxv2i16(ptr %base, <vscale x 2 x i64> %offsets, <vscale x …
21 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
28 define <vscale x 2 x i64> @masked_gather_nxv2i32(ptr %base, <vscale x 2 x i64> %offsets, <vscale x …
33 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
40 define <vscale x 2 x i64> @masked_gather_nxv2i64(ptr %base, <vscale x 2 x i64> %offsets, <vscale x …
45 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
51 define <vscale x 2 x half> @masked_gather_nxv2f16(ptr %base, <vscale x 2 x i64> %offsets, <vscale x…
56 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
[all …]
H A Dsve-masked-gather-64b-scaled.ll5 define <vscale x 2 x i64> @masked_gather_nxv2i16(ptr %base, <vscale x 2 x i64> %offsets, <vscale x …
10 %ptrs = getelementptr i16, ptr %base, <vscale x 2 x i64> %offsets
16 define <vscale x 2 x i64> @masked_gather_nxv2i32(ptr %base, <vscale x 2 x i64> %offsets, <vscale x …
21 %ptrs = getelementptr i32, ptr %base, <vscale x 2 x i64> %offsets
27 define <vscale x 2 x i64> @masked_gather_nxv2i64(ptr %base, <vscale x 2 x i64> %offsets, <vscale x …
32 %ptrs = getelementptr i64, ptr %base, <vscale x 2 x i64> %offsets
37 define <vscale x 2 x half> @masked_gather_nxv2f16(ptr %base, <vscale x 2 x i64> %offsets, <vscale x…
42 %ptrs = getelementptr half, ptr %base, <vscale x 2 x i64> %offsets
47 define <vscale x 2 x bfloat> @masked_gather_nxv2bf16(ptr %base, <vscale x 2 x i64> %offsets, <vscal…
52 %ptrs = getelementptr bfloat, ptr %base, <vscale x 2 x i64> %offsets
[all …]
H A Dsve-masked-scatter-64b-unscaled.ll5 ; unscaled 64-bit offsets
8 …ed_64bit_offsets(<vscale x 2 x i8> %data, ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1…
13 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
19 …d_64bit_offsets(<vscale x 2 x i16> %data, ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1…
24 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
30 …d_64bit_offsets(<vscale x 2 x i32> %data, ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1…
35 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
41 …d_64bit_offsets(<vscale x 2 x i64> %data, ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1…
46 %byte_ptrs = getelementptr i8, ptr %base, <vscale x 2 x i64> %offsets
52 …_64bit_offsets(<vscale x 2 x half> %data, ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1…
[all …]
H A Dsve-masked-scatter-64b-scaled.ll5 ; scaled 64-bit offsets
8 …scatter_nxv2i16(<vscale x 2 x i16> %data, ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1…
13 %ptrs = getelementptr i16, ptr %base, <vscale x 2 x i64> %offsets
18 …scatter_nxv2i32(<vscale x 2 x i32> %data, ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1…
23 %ptrs = getelementptr i32, ptr %base, <vscale x 2 x i64> %offsets
28 …scatter_nxv2i64(<vscale x 2 x i64> %data, ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1…
33 %ptrs = getelementptr i64, ptr %base, <vscale x 2 x i64> %offsets
38 …catter_nxv2f16(<vscale x 2 x half> %data, ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1…
43 %ptrs = getelementptr half, ptr %base, <vscale x 2 x i64> %offsets
48 …atter_nxv2f32(<vscale x 2 x float> %data, ptr %base, <vscale x 2 x i64> %offsets, <vscale x 2 x i1…
[all …]
/llvm-project/mlir/test/Dialect/Vector/
H A Dvector-unroll-options.mlir19 // CHECK-SAME: offsets = [0, 0]
21 // CHECK-SAME: offsets = [0, 0]
23 // CHECK-SAME: offsets = [0, 0]
28 // CHECK-SAME: offsets = [0, 2]
30 // CHECK-SAME: offsets = [0, 2]
35 // CHECK-SAME: offsets = [0, 0]
37 // CHECK-SAME: offsets = [4, 0]
39 // CHECK-SAME: offsets = [0, 4]
44 // CHECK-SAME: offsets = [0, 2]
46 // CHECK-SAME: offsets
[all...]
H A Dvector-transfer-unroll.mlir8 // CHECK-NEXT: %[[VEC0:.*]] = vector.insert_strided_slice %[[VTR0]], %{{.*}} {offsets = [0, 0], strides = [1, 1]} : vector<2x2xf32> into vector<4x4xf32>
10 // CHECK-NEXT: %[[VEC1:.*]] = vector.insert_strided_slice %[[VTR1]], %[[VEC0]] {offsets = [0, 2], strides = [1, 1]} : vector<2x2xf32> into vector<4x4xf32>
12 // CHECK-NEXT: %[[VEC2:.*]] = vector.insert_strided_slice %[[VTR2]], %[[VEC1]] {offsets = [2, 0], strides = [1, 1]} : vector<2x2xf32> into vector<4x4xf32>
14 // CHECK-NEXT: %[[VEC3:.*]] = vector.insert_strided_slice %[[VTR3]], %[[VEC2]] {offsets = [2, 2], strides = [1, 1]} : vector<2x2xf32> into vector<4x4xf32>
20 // ORDER-NEXT: %[[VEC0:.*]] = vector.insert_strided_slice %[[VTR0]], %{{.*}} {offsets = [0, 0], strides = [1, 1]} : vector<2x2xf32> into vector<4x4xf32>
22 // ORDER-NEXT: %[[VEC1:.*]] = vector.insert_strided_slice %[[VTR1]], %[[VEC0]] {offsets = [2, 0], strides = [1, 1]} : vector<2x2xf32> into vector<4x4xf32>
24 // ORDER-NEXT: %[[VEC2:.*]] = vector.insert_strided_slice %[[VTR2]], %[[VEC1]] {offsets = [0, 2], strides = [1, 1]} : vector<2x2xf32> into vector<4x4xf32>
26 // ORDER-NEXT: %[[VEC3:.*]] = vector.insert_strided_slice %[[VTR3]], %[[VEC2]] {offsets = [2, 2], strides = [1, 1]} : vector<2x2xf32> into vector<4x4xf32>
41 // CHECK: %[[S0:.*]] = vector.extract_strided_slice %{{.*}} {offsets = [0, 0], sizes = [2, 2], strides = [1, 1]} : vector<4x4xf32> to vector<2x2xf32>
43 // CHECK-NEXT: %[[S1:.*]] = vector.extract_strided_slice %{{.*}} {offsets
[all...]
H A Dvector-break-down-bitcast.mlir11 // CHECK: %[[EXTRACT0:.+]] = vector.extract_strided_slice %[[INPUT]] {offsets = [0], sizes = [4], strides = [1]} : vector<8xf16> to vector<4xf16>
13 // CHECK: %[[INSERT0:.+]] = vector.insert_strided_slice %[[CAST0]], %[[INIT]] {offsets = [0], strides = [1]} : vector<2xf32> into vector<4xf32>
14 // CHECK: %[[EXTRACT1:.+]] = vector.extract_strided_slice %[[INPUT]] {offsets = [4], sizes = [4], strides = [1]} : vector<8xf16> to vector<4xf16>
16 // CHECK: %[[INSERT1:.+]] = vector.insert_strided_slice %[[CAST1]], %[[INSERT0]] {offsets = [2], strides = [1]} : vector<2xf32> into vector<4xf32>
29 // CHECK: %[[EXTRACT0:.+]] = vector.extract_strided_slice %[[INPUT]] {offsets = [0], sizes = [4], strides = [1]} : vector<16xi8> to vector<4xi8>
31 // CHECK: %[[INSERT0:.+]] = vector.insert_strided_slice %[[CAST0]], %[[INIT]] {offsets = [0], strides = [1]} : vector<1xi32> into vector<4xi32>
32 // CHECK: %[[EXTRACT1:.+]] = vector.extract_strided_slice %[[INPUT]] {offsets = [4], sizes = [4], strides = [1]} : vector<16xi8> to vector<4xi8>
34 // CHECK: %[[INSERT1:.+]] = vector.insert_strided_slice %[[CAST1]], %[[INSERT0]] {offsets = [1], strides = [1]} : vector<1xi32> into vector<4xi32>
35 // CHECK: %[[EXTRACT2:.+]] = vector.extract_strided_slice %[[INPUT]] {offsets = [8], sizes = [4], strides = [1]} : vector<16xi8> to vector<4xi8>
37 // CHECK: %[[INSERT2:.+]] = vector.insert_strided_slice %[[CAST2]], %[[INSERT1]] {offsets
[all...]
H A Dvector-scan-transforms.mlir7 // CHECK: %[[B:.*]] = vector.extract_strided_slice %[[ARG0]] {offsets = [0], sizes = [1], stri…
8 // CHECK: %[[C:.*]] = vector.insert_strided_slice %[[B]], %[[A]] {offsets = [0], strides = [1]…
9 // CHECK: %[[D:.*]] = vector.extract_strided_slice %[[ARG0]] {offsets = [1], sizes = [1], stri…
11 // CHECK: %[[F:.*]] = vector.insert_strided_slice %[[E]], %[[C]] {offsets = [1], strides = [1]…
25 // CHECK: %[[B:.*]] = vector.extract_strided_slice %[[ARG0]] {offsets = [0], sizes = [1], stri…
27 // CHECK: %[[D:.*]] = vector.insert_strided_slice %[[C]], %[[A]] {offsets = [0], strides = [1]…
29 // CHECK: %[[F:.*]] = vector.insert_strided_slice %[[E]], %[[D]] {offsets = [1], strides = [1]…
43 // CHECK: %[[B:.*]] = vector.extract_strided_slice %[[ARG0]] {offsets = [0, 0], sizes = [1, 3]…
44 // CHECK: %[[C:.*]] = vector.insert_strided_slice %[[B]], %[[A]] {offsets = [0, 0], strides = …
45 // CHECK: %[[D:.*]] = vector.extract_strided_slice %[[ARG0]] {offsets = [1, 0], sizes = [1, 3]…
[all …]
/llvm-project/mlir/python/mlir/dialects/
H A Dmemref.py30 source_memref_type, offsets, static_sizes, static_strides argument
34 offsets, static_sizes, static_strides, source_strides = map(
35 list, (offsets, static_sizes, static_strides, source_strides)
50 for s in [offsets, static_sizes, static_strides]:
55 if any(not _is_static_int_like(i) for i in offsets + [source_offset]):
59 for offset, target_stride in zip(offsets, source_strides):
73 offsets,
90 offsets: MixedValues,
98 if offsets is None:
99 offsets = []
[all …]
/llvm-project/llvm/lib/Target/ARM/
H A DMVEGatherScatterLowering.cpp85 // Decompose a ptr into Base and Offsets, potentially using a GEP to return a
86 // scalar base and vector offsets, or else fallback to using a base of 0 and
88 Value *decomposePtr(Value *Ptr, Value *&Offsets, int &Scale,
91 // Check for a getelementptr and deduce base and offsets from it, on success
92 // returning the base directly and the offsets indirectly using the Offsets
94 Value *decomposeGEP(Value *&Offsets, FixedVectorType *Ty,
107 // Create a gather from a base + vector of offsets
121 // Create a scatter to a base + vector of offsets
122 Instruction *tryCreateMaskedScatterOffset(IntrinsicInst *I, Value *Offsets,
185 checkOffsetSize(Value * Offsets,unsigned TargetElemCount) checkOffsetSize() argument
227 decomposePtr(Value * Ptr,Value * & Offsets,int & Scale,FixedVectorType * Ty,Type * MemoryTy,IRBuilder<> & Builder) decomposePtr() argument
254 decomposeGEP(Value * & Offsets,FixedVectorType * Ty,GetElementPtrInst * GEP,IRBuilder<> & Builder) decomposeGEP() argument
548 Value *Offsets; tryCreateMaskedGatherOffset() local
701 Value *Offsets; tryCreateMaskedScatterOffset() local
745 Value *Offsets; tryCreateIncrementingGatScat() local
803 tryCreateIncrementingWBGatScat(IntrinsicInst * I,Value * BasePtr,Value * Offsets,unsigned TypeScale,IRBuilder<> & Builder) tryCreateIncrementingWBGatScat() argument
972 optimiseOffsets(Value * Offsets,BasicBlock * BB,LoopInfo * LI) optimiseOffsets() argument
1189 foldGEP(GetElementPtrInst * GEP,Value * & Offsets,unsigned & Scale,IRBuilder<> & Builder) foldGEP() argument
1224 Value *Offsets; optimiseAddress() local
[all...]
/llvm-project/clang/test/CodeGen/aarch64-sve-intrinsics/
H A Dacle_sve_adrb.c
/llvm-project/llvm/test/DebugInfo/X86/
H A Ddwarfdump-str-offsets.s2 # RUN: llvm-dwarfdump -v %t.o 2> %t.err | FileCheck --check-prefixes=COMMON,SPLIT,OFFSETS %s
7 # RUN: llvm-dwarfdump -debug-str-offsets %t.o | FileCheck --check-prefix=OFFSETS %s
410 # OFFSETS: .debug_str_offsets contents:
411 # OFFSETS-NEXT: 0x00000000: Contribution size = 32, Format = DWARF32, Version = 5
412 # OFFSETS-NEXT: 0x00000008: 00000000 "Handmade DWARF producer"
413 # OFFSETS-NEXT: 0x0000000c: 00000018 "Compile_Unit_1"
414 # OFFSETS-NEXT: 0x00000010: 00000027 "/home/test/CU1"
415 # OFFSETS-NEXT: 0x00000014: 00000067 "MyFunc"
416 # OFFSETS-NEXT: 0x00000018: 0000006e "MyVar1"
417 # OFFSETS-NEXT: 0x0000001c: 00000075 "MyVar2"
[all …]
/llvm-project/mlir/test/Dialect/ArmNeon/
H A Dlower-to-arm-neon.mlir49 // CHECK-DAG: %[[VAL_4:.*]] = vector.extract_strided_slice %[[VAL_0]] {offsets = [0, 0], sizes = […
50 // CHECK-DAG: %[[VAL_5:.*]] = vector.extract_strided_slice %[[VAL_1]] {offsets = [0, 0], sizes = […
51 // CHECK-DAG: %[[VAL_6:.*]] = vector.extract_strided_slice %[[VAL_2]] {offsets = [0, 0], sizes = […
57 // CHECK-DAG: %[[VAL_12:.*]] = vector.insert_strided_slice %[[VAL_11]], %[[VAL_3]] {offsets = [0, …
58 // CHECK-DAG: %[[VAL_13:.*]] = vector.extract_strided_slice %[[VAL_0]] {offsets = [0, 0], sizes = …
59 // CHECK-DAG: %[[VAL_14:.*]] = vector.extract_strided_slice %[[VAL_1]] {offsets = [2, 0], sizes = …
60 // CHECK-DAG: %[[VAL_15:.*]] = vector.extract_strided_slice %[[VAL_2]] {offsets = [0, 2], sizes = …
66 // CHECK-DAG: %[[VAL_21:.*]] = vector.insert_strided_slice %[[VAL_20]], %[[VAL_12]] {offsets = [0,…
67 // CHECK-DAG: %[[VAL_22:.*]] = vector.extract_strided_slice %[[VAL_0]] {offsets = [2, 0], sizes = …
68 // CHECK-DAG: %[[VAL_23:.*]] = vector.extract_strided_slice %[[VAL_1]] {offsets = [0, 0], sizes = …
[all …]
/llvm-project/lldb/test/Shell/ScriptInterpreter/Python/Crashlog/
H A Dpatch-crashlog.py15 def __init__(self, data, binary, offsets, json): argument
18 self.offsets = offsets
34 if not self.offsets:
42 if symbol in self.offsets:
43 patch_addr = int(m.group(1), 16) + int(self.offsets[symbol])
61 parser.add_argument("--offsets", required=True)
66 offsets = json.loads(args.offsets) variable
71 p = CrashLogPatcher(data, args.binary, offsets, args.json)
/llvm-project/llvm/test/tools/yaml2obj/ELF/DWARF/
H A Ddebug-str-offsets.yaml26 ## ^------- offsets[0] (4-byte)
27 ## ^------- offsets[1] (4-byte)
33 ## ^---------------- offsets[0] (8-byte)
34 ## ^---------------- offsets[1] (8-byte)
44 - Offsets:
48 Offsets:
63 ## ^------- offsets[0] (4-byte)
64 ## ^------- offsets[1] (4-byte)
70 ## ^---------------- offsets[0] (8-byte)
71 ## ^---------------- offsets[1] (8-byte)
[all …]
H A Ddebug-rnglists.yaml28 ## ^------- offsets[0] (4-byte)
30 ## ^------- offsets[1] (4-byte)
51 ## ^------- offsets[0] (4-byte)
52 ## ^------- offsets[1] (4-byte)
115 ## ^------- offsets[0] (4-byte)
117 ## ^------- offsets[1] (4-byte)
138 ## ^------- offsets[0] (4-byte)
139 ## ^------- offsets[1] (4-byte)
171 ## ^---------------- offsets[0] (8-byte)
172 ## ^------- offsets[1] (8-byte)
[all …]
/llvm-project/llvm/test/tools/llvm-objdump/MachO/
H A Darchive-headers.test3 …macho-universal-archive.x86_64.i386 --macho --archive-headers --arch all --archive-member-offsets \
4 RUN: | FileCheck %s -check-prefix=OFFSETS
16 OFFSETS: Archive : {{.*}}/macho-universal-archive.x86_64.i386 (architecture x86_64)
17 OFFSETS: 8 -rw-r--r--124/11 44 {{.*}} __.SYMDEF SORTED
18 OFFSETS: 112 -rw-r--r--124/0 860 {{.*}} hello.o
19 OFFSETS: Archive : {{.*}}/macho-universal-archive.x86_64.i386 (architecture i386)
20 OFFSETS: 8 -rw-r--r--124/11 60 {{.*}} __.SYMDEF SORTED
21 OFFSETS: 128 -rw-r--r--124/0 388 {{.*}} foo.o
/llvm-project/mlir/lib/Dialect/Tensor/IR/
H A DTensorTilingInterfaceImpl.cpp52 ArrayRef<OpFoldResult> offsets, in getTiledImplementation()
55 tensor::bubbleUpPadSlice(b, cast<PadOp>(op), offsets, sizes); in getTiledImplementation()
63 ArrayRef<OpFoldResult> offsets, in getResultTilePosition()
67 resultOffsets.assign(offsets.begin(), offsets.end()); in getResultTilePosition()
74 ArrayRef<OpFoldResult> offsets, ArrayRef<OpFoldResult> sizes, in getPackUnPackIterationDomain()
77 iterDomainOffsets.assign(offsets.begin(), offsets.end()); in getPackUnPackIterationDomain()
84 ArrayRef<OpFoldResult> offsets, in getPackUnPackIterationDomain()
86 return getTiledImplementation(op, b, offsets, size in getPackUnPackIterationDomain()
93 applyPermToRange(SmallVector<OpFoldResult> & offsets,SmallVector<OpFoldResult> & sizes,ArrayRef<int64_t> permutation) applyPermToRange() argument
578 bubbleUpPadSlice(OpBuilder & b,tensor::PadOp padOp,ArrayRef<OpFoldResult> offsets,ArrayRef<OpFoldResult> sizes,bool generateZeroSliceGuard) bubbleUpPadSlice() argument
[all...]
/llvm-project/llvm/test/Transforms/LoopStrengthReduce/ARM/
H A D2012-06-15-lsr-noaddrmode.ll13 ; LSR Use: Kind=Special, Offsets={0}, all-fixups-outside-loop, widest fixup type: i32
15 ; LSR Use: Kind=ICmpZero, Offsets={0}, widest fixup type: i32
17 ; LSR Use: Kind=Address of i32, Offsets={0}, widest fixup type: i32*
19 ; LSR Use: Kind=Address of i32, Offsets={0}, widest fixup type: i32*
21 ; LSR Use: Kind=Special, Offsets={0}, all-fixups-outside-loop, widest fixup type: i32
26 ; LSR Use: Kind=Special, Offsets={0}, all-fixups-outside-loop, widest fixup type: i32
28 ; LSR Use: Kind=ICmpZero, Offsets={0}, widest fixup type: i32
30 ; LSR Use: Kind=Address of i32, Offsets={0}, widest fixup type: i32*
32 ; LSR Use: Kind=Address of i32, Offsets={0}, widest fixup type: i32*
34 ; LSR Use: Kind=Special, Offsets={0}, all-fixups-outside-loop, widest fixup type: i32

12345678910>>...67