Lines Matching full:get
6 …t of 32 for instruction: %mask_nxv16i1_i64 = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nx…
7 …ost of 16 for instruction: %mask_nxv8i1_i64 = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nx…
8 …cost of 8 for instruction: %mask_nxv4i1_i64 = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nx…
9 …cost of 4 for instruction: %mask_nxv2i1_i64 = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nx…
10 …cost of 2 for instruction: %mask_nxv1i1_i64 = call <vscale x 1 x i1> @llvm.get.active.lane.mask.nx…
11 …t of 16 for instruction: %mask_nxv16i1_i32 = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nx…
12 …cost of 8 for instruction: %mask_nxv8i1_i32 = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nx…
13 …cost of 4 for instruction: %mask_nxv4i1_i32 = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nx…
14 …cost of 2 for instruction: %mask_nxv2i1_i32 = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nx…
15 …cost of 2 for instruction: %mask_nxv1i1_i32 = call <vscale x 1 x i1> @llvm.get.active.lane.mask.nx…
16 …t of 64 for instruction: %mask_nxv32i1_i64 = call <vscale x 32 x i1> @llvm.get.active.lane.mask.nx…
17 …st of 8 for instruction: %mask_nxv16i1_i16 = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nx…
18 …timated cost of 16 for instruction: %mask_v16i1_i64 = call <16 x i1> @llvm.get.active.lane.mask.v1…
19 … estimated cost of 8 for instruction: %mask_v8i1_i64 = call <8 x i1> @llvm.get.active.lane.mask.v8…
20 … estimated cost of 4 for instruction: %mask_v4i1_i64 = call <4 x i1> @llvm.get.active.lane.mask.v4…
21 … estimated cost of 2 for instruction: %mask_v2i1_i64 = call <2 x i1> @llvm.get.active.lane.mask.v2…
22 …stimated cost of 8 for instruction: %mask_v16i1_i32 = call <16 x i1> @llvm.get.active.lane.mask.v1…
23 … estimated cost of 4 for instruction: %mask_v8i1_i32 = call <8 x i1> @llvm.get.active.lane.mask.v8…
24 … estimated cost of 2 for instruction: %mask_v4i1_i32 = call <4 x i1> @llvm.get.active.lane.mask.v4…
25 … estimated cost of 2 for instruction: %mask_v2i1_i32 = call <2 x i1> @llvm.get.active.lane.mask.v2…
26 …timated cost of 32 for instruction: %mask_v32i1_i64 = call <32 x i1> @llvm.get.active.lane.mask.v3…
27 …stimated cost of 4 for instruction: %mask_v16i1_i16 = call <16 x i1> @llvm.get.active.lane.mask.v1…
30 …%mask_nxv16i1_i64 = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 undef, i64 …
31 …%mask_nxv8i1_i64 = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64 undef, i64 und…
32 …%mask_nxv4i1_i64 = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 undef, i64 und…
33 …%mask_nxv2i1_i64 = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 undef, i64 und…
34 …%mask_nxv1i1_i64 = call <vscale x 1 x i1> @llvm.get.active.lane.mask.nxv1i1.i64(i64 undef, i64 und…
36 …%mask_nxv16i1_i32 = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 undef, i32 …
37 …%mask_nxv8i1_i32 = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i32(i32 undef, i32 und…
38 …%mask_nxv4i1_i32 = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32 undef, i32 und…
39 …%mask_nxv2i1_i32 = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i32(i32 undef, i32 und…
40 …%mask_nxv1i1_i32 = call <vscale x 1 x i1> @llvm.get.active.lane.mask.nxv1i1.i32(i32 undef, i32 und…
42 …%mask_nxv32i1_i64 = call <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i64(i64 undef, i64 …
43 …%mask_nxv16i1_i16 = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i16(i16 undef, i16 …
45 %mask_v16i1_i64 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i64(i64 undef, i64 undef)
46 %mask_v8i1_i64 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i64(i64 undef, i64 undef)
47 %mask_v4i1_i64 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 undef, i64 undef)
48 %mask_v2i1_i64 = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64 undef, i64 undef)
50 %mask_v16i1_i32 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 undef, i32 undef)
51 %mask_v8i1_i32 = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 undef, i32 undef)
52 %mask_v4i1_i32 = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 undef, i32 undef)
53 %mask_v2i1_i32 = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i32(i32 undef, i32 undef)
55 %mask_v32i1_i64 = call <32 x i1> @llvm.get.active.lane.mask.v32i1.i64(i64 undef, i64 undef)
56 %mask_v16i1_i16 = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i16(i16 undef, i16 undef)
61 declare <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64, i64)
62 declare <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i64(i64, i64)
63 declare <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64, i64)
64 declare <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64, i64)
65 declare <vscale x 1 x i1> @llvm.get.active.lane.mask.nxv1i1.i64(i64, i64)
66 declare <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32, i32)
67 declare <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1.i32(i32, i32)
68 declare <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i32(i32, i32)
69 declare <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i32(i32, i32)
70 declare <vscale x 1 x i1> @llvm.get.active.lane.mask.nxv1i1.i32(i32, i32)
71 declare <vscale x 32 x i1> @llvm.get.active.lane.mask.nxv32i1.i64(i64, i64)
72 declare <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i16(i16, i16)
73 declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i64(i64, i64)
74 declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i64(i64, i64)
75 declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64, i64)
76 declare <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64, i64)
77 declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32, i32)
78 declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32)
79 declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
80 declare <2 x i1> @llvm.get.active.lane.mask.v2i1.i32(i32, i32)
81 declare <32 x i1> @llvm.get.active.lane.mask.v32i1.i64(i64, i64)
82 declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i16(i16, i16)