1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s 3 4; 5; LD1B, LD1W, LD1H, LD1D: vector base + immediate offset (index) 6; e.g. ld1h { z0.s }, p0/z, [z0.s, #16] 7; 8 9; LD1B 10define <vscale x 4 x i32> @gld1b_s_imm_offset(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) { 11; CHECK-LABEL: gld1b_s_imm_offset: 12; CHECK: // %bb.0: 13; CHECK-NEXT: ld1b { z0.s }, p0/z, [z0.s, #16] 14; CHECK-NEXT: ret 15 %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i1> %pg, 16 <vscale x 4 x i32> %base, 17 i64 16) 18 %res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32> 19 ret <vscale x 4 x i32> %res 20} 21 22define <vscale x 2 x i64> @gld1b_d_imm_offset(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) { 23; CHECK-LABEL: gld1b_d_imm_offset: 24; CHECK: // %bb.0: 25; CHECK-NEXT: ld1b { z0.d }, p0/z, [z0.d, #16] 26; CHECK-NEXT: ret 27 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg, 28 <vscale x 2 x i64> %base, 29 i64 16) 30 %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64> 31 ret <vscale x 2 x i64> %res 32} 33 34; LD1H 35define <vscale x 4 x i32> @gld1h_s_imm_offset(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) { 36; CHECK-LABEL: gld1h_s_imm_offset: 37; CHECK: // %bb.0: 38; CHECK-NEXT: ld1h { z0.s }, p0/z, [z0.s, #16] 39; CHECK-NEXT: ret 40 %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg, 41 <vscale x 4 x i32> %base, 42 i64 16) 43 %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32> 44 ret <vscale x 4 x i32> %res 45} 46 47define <vscale x 2 x i64> @gld1h_d_imm_offset(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) { 48; CHECK-LABEL: gld1h_d_imm_offset: 49; CHECK: // %bb.0: 50; CHECK-NEXT: ld1h { z0.d }, p0/z, [z0.d, #16] 51; CHECK-NEXT: ret 52 %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg, 53 <vscale x 2 x i64> %base, 54 i64 16) 55 %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64> 56 ret <vscale x 2 x i64> %res 57} 58 59; LD1W 60define <vscale x 4 x i32> @gld1w_s_imm_offset(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) { 61; CHECK-LABEL: gld1w_s_imm_offset: 62; CHECK: // %bb.0: 63; CHECK-NEXT: ld1w { z0.s }, p0/z, [z0.s, #16] 64; CHECK-NEXT: ret 65 %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1> %pg, 66 <vscale x 4 x i32> %base, 67 i64 16) 68 ret <vscale x 4 x i32> %load 69} 70 71define <vscale x 2 x i64> @gld1w_d_imm_offset(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) { 72; CHECK-LABEL: gld1w_d_imm_offset: 73; CHECK: // %bb.0: 74; CHECK-NEXT: ld1w { z0.d }, p0/z, [z0.d, #16] 75; CHECK-NEXT: ret 76 %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg, 77 <vscale x 2 x i64> %base, 78 i64 16) 79 %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64> 80 ret <vscale x 2 x i64> %res 81} 82 83define <vscale x 4 x float> @gld1w_s_imm_offset_float(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) { 84; CHECK-LABEL: gld1w_s_imm_offset_float: 85; CHECK: // %bb.0: 86; CHECK-NEXT: ld1w { z0.s }, p0/z, [z0.s, #16] 87; CHECK-NEXT: ret 88 %load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x i1> %pg, 89 <vscale x 4 x i32> %base, 90 i64 16) 91 ret <vscale x 4 x float> %load 92} 93 94; LD1D 95define <vscale x 2 x i64> @gld1d_d_imm_offset(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) { 96; CHECK-LABEL: gld1d_d_imm_offset: 97; CHECK: // %bb.0: 98; CHECK-NEXT: ld1d { z0.d }, p0/z, [z0.d, #16] 99; CHECK-NEXT: ret 100 %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1> %pg, 101 <vscale x 2 x i64> %base, 102 i64 16) 103 ret <vscale x 2 x i64> %load 104} 105 106define <vscale x 2 x double> @gld1d_d_imm_offset_double(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) { 107; CHECK-LABEL: gld1d_d_imm_offset_double: 108; CHECK: // %bb.0: 109; CHECK-NEXT: ld1d { z0.d }, p0/z, [z0.d, #16] 110; CHECK-NEXT: ret 111 %load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x i1> %pg, 112 <vscale x 2 x i64> %base, 113 i64 16) 114 ret <vscale x 2 x double> %load 115} 116 117; 118; LD1SB, LD1SW, LD1SH: vector base + immediate offset (index) 119; e.g. ld1sh { z0.s }, p0/z, [z0.s, #16] 120; 121 122; LD1SB 123define <vscale x 4 x i32> @gld1sb_s_imm_offset(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) { 124; CHECK-LABEL: gld1sb_s_imm_offset: 125; CHECK: // %bb.0: 126; CHECK-NEXT: ld1sb { z0.s }, p0/z, [z0.s, #16] 127; CHECK-NEXT: ret 128 %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i1> %pg, 129 <vscale x 4 x i32> %base, 130 i64 16) 131 %res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32> 132 ret <vscale x 4 x i32> %res 133} 134 135define <vscale x 2 x i64> @gld1sb_d_imm_offset(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) { 136; CHECK-LABEL: gld1sb_d_imm_offset: 137; CHECK: // %bb.0: 138; CHECK-NEXT: ld1sb { z0.d }, p0/z, [z0.d, #16] 139; CHECK-NEXT: ret 140 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg, 141 <vscale x 2 x i64> %base, 142 i64 16) 143 %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64> 144 ret <vscale x 2 x i64> %res 145} 146 147; LD1SH 148define <vscale x 4 x i32> @gld1sh_s_imm_offset(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) { 149; CHECK-LABEL: gld1sh_s_imm_offset: 150; CHECK: // %bb.0: 151; CHECK-NEXT: ld1sh { z0.s }, p0/z, [z0.s, #16] 152; CHECK-NEXT: ret 153 %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg, 154 <vscale x 4 x i32> %base, 155 i64 16) 156 %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32> 157 ret <vscale x 4 x i32> %res 158} 159 160define <vscale x 2 x i64> @gld1sh_d_imm_offset(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) { 161; CHECK-LABEL: gld1sh_d_imm_offset: 162; CHECK: // %bb.0: 163; CHECK-NEXT: ld1sh { z0.d }, p0/z, [z0.d, #16] 164; CHECK-NEXT: ret 165 %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg, 166 <vscale x 2 x i64> %base, 167 i64 16) 168 %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64> 169 ret <vscale x 2 x i64> %res 170} 171 172; LD1SW 173define <vscale x 2 x i64> @gld1sw_d_imm_offset(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) { 174; CHECK-LABEL: gld1sw_d_imm_offset: 175; CHECK: // %bb.0: 176; CHECK-NEXT: ld1sw { z0.d }, p0/z, [z0.d, #16] 177; CHECK-NEXT: ret 178 %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg, 179 <vscale x 2 x i64> %base, 180 i64 16) 181 %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64> 182 ret <vscale x 2 x i64> %res 183} 184 185; 186; LD1B, LD1W, LD1H, LD1D: vector base + out of range immediate offset 187; e.g. ld1b { z0.d }, p0/z, [x0, z0.d] 188; 189 190; LD1B 191define <vscale x 4 x i32> @gld1b_s_imm_offset_out_of_range(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) { 192; CHECK-LABEL: gld1b_s_imm_offset_out_of_range: 193; CHECK: // %bb.0: 194; CHECK-NEXT: mov w8, #32 195; CHECK-NEXT: ld1b { z0.s }, p0/z, [x8, z0.s, uxtw] 196; CHECK-NEXT: ret 197 %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i1> %pg, 198 <vscale x 4 x i32> %base, 199 i64 32) 200 %res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32> 201 ret <vscale x 4 x i32> %res 202} 203 204define <vscale x 2 x i64> @gld1b_d_imm_offset_out_of_range(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) { 205; CHECK-LABEL: gld1b_d_imm_offset_out_of_range: 206; CHECK: // %bb.0: 207; CHECK-NEXT: mov w8, #32 208; CHECK-NEXT: ld1b { z0.d }, p0/z, [x8, z0.d] 209; CHECK-NEXT: ret 210 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg, 211 <vscale x 2 x i64> %base, 212 i64 32) 213 %res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64> 214 ret <vscale x 2 x i64> %res 215} 216 217; LD1H 218define <vscale x 4 x i32> @gld1h_s_imm_offset_out_of_range(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) { 219; CHECK-LABEL: gld1h_s_imm_offset_out_of_range: 220; CHECK: // %bb.0: 221; CHECK-NEXT: mov w8, #63 222; CHECK-NEXT: ld1h { z0.s }, p0/z, [x8, z0.s, uxtw] 223; CHECK-NEXT: ret 224 %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg, 225 <vscale x 4 x i32> %base, 226 i64 63) 227 %res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32> 228 ret <vscale x 4 x i32> %res 229} 230 231define <vscale x 2 x i64> @gld1h_d_imm_offset_out_of_range(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) { 232; CHECK-LABEL: gld1h_d_imm_offset_out_of_range: 233; CHECK: // %bb.0: 234; CHECK-NEXT: mov w8, #63 235; CHECK-NEXT: ld1h { z0.d }, p0/z, [x8, z0.d] 236; CHECK-NEXT: ret 237 %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg, 238 <vscale x 2 x i64> %base, 239 i64 63) 240 %res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64> 241 ret <vscale x 2 x i64> %res 242} 243 244; LD1W 245define <vscale x 4 x i32> @gld1w_s_imm_offset_out_of_range(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) { 246; CHECK-LABEL: gld1w_s_imm_offset_out_of_range: 247; CHECK: // %bb.0: 248; CHECK-NEXT: mov w8, #125 249; CHECK-NEXT: ld1w { z0.s }, p0/z, [x8, z0.s, uxtw] 250; CHECK-NEXT: ret 251 %load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1> %pg, 252 <vscale x 4 x i32> %base, 253 i64 125) 254 ret <vscale x 4 x i32> %load 255} 256 257define <vscale x 2 x i64> @gld1w_d_imm_offset_out_of_range(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) { 258; CHECK-LABEL: gld1w_d_imm_offset_out_of_range: 259; CHECK: // %bb.0: 260; CHECK-NEXT: mov w8, #125 261; CHECK-NEXT: ld1w { z0.d }, p0/z, [x8, z0.d] 262; CHECK-NEXT: ret 263 %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg, 264 <vscale x 2 x i64> %base, 265 i64 125) 266 %res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64> 267 ret <vscale x 2 x i64> %res 268} 269 270define <vscale x 4 x float> @gld1w_s_imm_offset_out_of_range_float(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) { 271; CHECK-LABEL: gld1w_s_imm_offset_out_of_range_float: 272; CHECK: // %bb.0: 273; CHECK-NEXT: mov w8, #125 274; CHECK-NEXT: ld1w { z0.s }, p0/z, [x8, z0.s, uxtw] 275; CHECK-NEXT: ret 276 %load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x i1> %pg, 277 <vscale x 4 x i32> %base, 278 i64 125) 279 ret <vscale x 4 x float> %load 280} 281 282; LD1D 283define <vscale x 2 x i64> @gld1d_d_imm_offset_out_of_range(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) { 284; CHECK-LABEL: gld1d_d_imm_offset_out_of_range: 285; CHECK: // %bb.0: 286; CHECK-NEXT: mov w8, #249 287; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8, z0.d] 288; CHECK-NEXT: ret 289 %load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1> %pg, 290 <vscale x 2 x i64> %base, 291 i64 249) 292 ret <vscale x 2 x i64> %load 293} 294 295define <vscale x 2 x double> @gld1d_d_imm_offset_out_of_range_double(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) { 296; CHECK-LABEL: gld1d_d_imm_offset_out_of_range_double: 297; CHECK: // %bb.0: 298; CHECK-NEXT: mov w8, #249 299; CHECK-NEXT: ld1d { z0.d }, p0/z, [x8, z0.d] 300; CHECK-NEXT: ret 301 %load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x i1> %pg, 302 <vscale x 2 x i64> %base, 303 i64 249) 304 ret <vscale x 2 x double> %load 305} 306 307; 308; LD1SB, LD1SW, LD1SH: vector base + out of range immediate offset 309; e.g. ld1sb { z0.s }, p0/z, [x8, z0.s, uxtw] 310; 311 312; LD1SB 313define <vscale x 4 x i32> @gld1sb_s_imm_offset_out_of_range(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) { 314; CHECK-LABEL: gld1sb_s_imm_offset_out_of_range: 315; CHECK: // %bb.0: 316; CHECK-NEXT: mov w8, #32 317; CHECK-NEXT: ld1sb { z0.s }, p0/z, [x8, z0.s, uxtw] 318; CHECK-NEXT: ret 319 %load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i1> %pg, 320 <vscale x 4 x i32> %base, 321 i64 32) 322 %res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32> 323 ret <vscale x 4 x i32> %res 324} 325 326define <vscale x 2 x i64> @gld1sb_d_imm_offset_out_of_range(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) { 327; CHECK-LABEL: gld1sb_d_imm_offset_out_of_range: 328; CHECK: // %bb.0: 329; CHECK-NEXT: mov w8, #32 330; CHECK-NEXT: ld1sb { z0.d }, p0/z, [x8, z0.d] 331; CHECK-NEXT: ret 332 %load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg, 333 <vscale x 2 x i64> %base, 334 i64 32) 335 %res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64> 336 ret <vscale x 2 x i64> %res 337} 338 339; LD1SH 340define <vscale x 4 x i32> @gld1sh_s_imm_offset_out_of_range(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) { 341; CHECK-LABEL: gld1sh_s_imm_offset_out_of_range: 342; CHECK: // %bb.0: 343; CHECK-NEXT: mov w8, #63 344; CHECK-NEXT: ld1sh { z0.s }, p0/z, [x8, z0.s, uxtw] 345; CHECK-NEXT: ret 346 %load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg, 347 <vscale x 4 x i32> %base, 348 i64 63) 349 %res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32> 350 ret <vscale x 4 x i32> %res 351} 352 353define <vscale x 2 x i64> @gld1sh_d_imm_offset_out_of_range(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) { 354; CHECK-LABEL: gld1sh_d_imm_offset_out_of_range: 355; CHECK: // %bb.0: 356; CHECK-NEXT: mov w8, #63 357; CHECK-NEXT: ld1sh { z0.d }, p0/z, [x8, z0.d] 358; CHECK-NEXT: ret 359 %load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg, 360 <vscale x 2 x i64> %base, 361 i64 63) 362 %res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64> 363 ret <vscale x 2 x i64> %res 364} 365 366; LD1SW 367define <vscale x 2 x i64> @gld1sw_d_imm_offset_out_of_range(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) { 368; CHECK-LABEL: gld1sw_d_imm_offset_out_of_range: 369; CHECK: // %bb.0: 370; CHECK-NEXT: mov w8, #125 371; CHECK-NEXT: ld1sw { z0.d }, p0/z, [x8, z0.d] 372; CHECK-NEXT: ret 373 %load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg, 374 <vscale x 2 x i64> %base, 375 i64 125) 376 %res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64> 377 ret <vscale x 2 x i64> %res 378} 379 380; LD1B/LD1SB 381declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i64) 382declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64) 383 384; LD1H/LD1SH 385declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i64) 386declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64) 387 388; LD1W/LD1SW 389declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i64) 390declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64) 391 392declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i64) 393 394; LD1D 395declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64) 396 397declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64) 398