1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s 3 4; 5; ABS 6; 7 8define <vscale x 16 x i8> @abs_i8(<vscale x 16 x i8> %a, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b) { 9; CHECK-LABEL: abs_i8: 10; CHECK: // %bb.0: 11; CHECK-NEXT: abs z0.b, p0/m, z1.b 12; CHECK-NEXT: ret 13 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.abs.nxv16i8(<vscale x 16 x i8> %a, 14 <vscale x 16 x i1> %pg, 15 <vscale x 16 x i8> %b) 16 ret <vscale x 16 x i8> %out 17} 18 19define <vscale x 8 x i16> @abs_i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b) { 20; CHECK-LABEL: abs_i16: 21; CHECK: // %bb.0: 22; CHECK-NEXT: abs z0.h, p0/m, z1.h 23; CHECK-NEXT: ret 24 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.abs.nxv8i16(<vscale x 8 x i16> %a, 25 <vscale x 8 x i1> %pg, 26 <vscale x 8 x i16> %b) 27 ret <vscale x 8 x i16> %out 28} 29 30define <vscale x 4 x i32> @abs_i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b) { 31; CHECK-LABEL: abs_i32: 32; CHECK: // %bb.0: 33; CHECK-NEXT: abs z0.s, p0/m, z1.s 34; CHECK-NEXT: ret 35 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.abs.nxv4i32(<vscale x 4 x i32> %a, 36 <vscale x 4 x i1> %pg, 37 <vscale x 4 x i32> %b) 38 ret <vscale x 4 x i32> %out 39} 40 41define <vscale x 2 x i64> @abs_i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) { 42; CHECK-LABEL: abs_i64: 43; CHECK: // %bb.0: 44; CHECK-NEXT: abs z0.d, p0/m, z1.d 45; CHECK-NEXT: ret 46 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.abs.nxv2i64(<vscale x 2 x i64> %a, 47 <vscale x 2 x i1> %pg, 48 <vscale x 2 x i64> %b) 49 ret <vscale x 2 x i64> %out 50} 51 52; 53; NEG 54; 55 56define <vscale x 16 x i8> @neg_i8(<vscale x 16 x i8> %a, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b) { 57; CHECK-LABEL: neg_i8: 58; CHECK: // %bb.0: 59; CHECK-NEXT: neg z0.b, p0/m, z1.b 60; CHECK-NEXT: ret 61 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.neg.nxv16i8(<vscale x 16 x i8> %a, 62 <vscale x 16 x i1> %pg, 63 <vscale x 16 x i8> %b) 64 ret <vscale x 16 x i8> %out 65} 66 67define <vscale x 8 x i16> @neg_i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b) { 68; CHECK-LABEL: neg_i16: 69; CHECK: // %bb.0: 70; CHECK-NEXT: neg z0.h, p0/m, z1.h 71; CHECK-NEXT: ret 72 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.neg.nxv8i16(<vscale x 8 x i16> %a, 73 <vscale x 8 x i1> %pg, 74 <vscale x 8 x i16> %b) 75 ret <vscale x 8 x i16> %out 76} 77 78define <vscale x 4 x i32> @neg_i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b) { 79; CHECK-LABEL: neg_i32: 80; CHECK: // %bb.0: 81; CHECK-NEXT: neg z0.s, p0/m, z1.s 82; CHECK-NEXT: ret 83 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.neg.nxv4i32(<vscale x 4 x i32> %a, 84 <vscale x 4 x i1> %pg, 85 <vscale x 4 x i32> %b) 86 ret <vscale x 4 x i32> %out 87} 88 89define <vscale x 2 x i64> @neg_i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) { 90; CHECK-LABEL: neg_i64: 91; CHECK: // %bb.0: 92; CHECK-NEXT: neg z0.d, p0/m, z1.d 93; CHECK-NEXT: ret 94 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.neg.nxv2i64(<vscale x 2 x i64> %a, 95 <vscale x 2 x i1> %pg, 96 <vscale x 2 x i64> %b) 97 ret <vscale x 2 x i64> %out 98} 99 100; SDOT 101 102define <vscale x 4 x i32> @sdot_i32(<vscale x 4 x i32> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) { 103; CHECK-LABEL: sdot_i32: 104; CHECK: // %bb.0: 105; CHECK-NEXT: sdot z0.s, z1.b, z2.b 106; CHECK-NEXT: ret 107 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sdot.nxv4i32(<vscale x 4 x i32> %a, 108 <vscale x 16 x i8> %b, 109 <vscale x 16 x i8> %c) 110 ret <vscale x 4 x i32> %out 111} 112 113define <vscale x 2 x i64> @sdot_i64(<vscale x 2 x i64> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) { 114; CHECK-LABEL: sdot_i64: 115; CHECK: // %bb.0: 116; CHECK-NEXT: sdot z0.d, z1.h, z2.h 117; CHECK-NEXT: ret 118 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sdot.nxv2i64(<vscale x 2 x i64> %a, 119 <vscale x 8 x i16> %b, 120 <vscale x 8 x i16> %c) 121 ret <vscale x 2 x i64> %out 122} 123 124define <vscale x 2 x i64> @test_sdot_i64_zero(<vscale x 2 x i64> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) { 125; CHECK-LABEL: test_sdot_i64_zero: 126; CHECK: // %bb.0: // %entry 127; CHECK-NEXT: sdot z0.d, z1.h, z2.h 128; CHECK-NEXT: ret 129entry: 130 %vdot1.i = call <vscale x 2 x i64> @llvm.aarch64.sve.sdot.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) 131 %ret = add <vscale x 2 x i64> %vdot1.i, %a 132 ret <vscale x 2 x i64> %ret 133} 134 135define <vscale x 4 x i32> @test_sdot_i32_zero(<vscale x 4 x i32> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) { 136; CHECK-LABEL: test_sdot_i32_zero: 137; CHECK: // %bb.0: // %entry 138; CHECK-NEXT: sdot z0.s, z1.b, z2.b 139; CHECK-NEXT: ret 140entry: 141 %vdot1.i = call <vscale x 4 x i32> @llvm.aarch64.sve.sdot.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) 142 %ret = add <vscale x 4 x i32> %vdot1.i, %a 143 ret <vscale x 4 x i32> %ret 144} 145 146; SDOT (Indexed) 147 148define <vscale x 4 x i32> @sdot_lane_i32(<vscale x 4 x i32> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) { 149; CHECK-LABEL: sdot_lane_i32: 150; CHECK: // %bb.0: 151; CHECK-NEXT: sdot z0.s, z1.b, z2.b[2] 152; CHECK-NEXT: ret 153 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sdot.lane.nxv4i32(<vscale x 4 x i32> %a, 154 <vscale x 16 x i8> %b, 155 <vscale x 16 x i8> %c, 156 i32 2) 157 ret <vscale x 4 x i32> %out 158} 159 160define <vscale x 2 x i64> @sdot_lane_i64(<vscale x 2 x i64> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) { 161; CHECK-LABEL: sdot_lane_i64: 162; CHECK: // %bb.0: 163; CHECK-NEXT: sdot z0.d, z1.h, z2.h[1] 164; CHECK-NEXT: ret 165 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sdot.lane.nxv2i64(<vscale x 2 x i64> %a, 166 <vscale x 8 x i16> %b, 167 <vscale x 8 x i16> %c, 168 i32 1) 169 ret <vscale x 2 x i64> %out 170} 171 172; SQADD 173 174define <vscale x 16 x i8> @sqadd_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) { 175; CHECK-LABEL: sqadd_i8: 176; CHECK: // %bb.0: 177; CHECK-NEXT: sqadd z0.b, z0.b, z1.b 178; CHECK-NEXT: ret 179 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.x.nxv16i8(<vscale x 16 x i8> %a, 180 <vscale x 16 x i8> %b) 181 ret <vscale x 16 x i8> %out 182} 183 184define <vscale x 8 x i16> @sqadd_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { 185; CHECK-LABEL: sqadd_i16: 186; CHECK: // %bb.0: 187; CHECK-NEXT: sqadd z0.h, z0.h, z1.h 188; CHECK-NEXT: ret 189 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.x.nxv8i16(<vscale x 8 x i16> %a, 190 <vscale x 8 x i16> %b) 191 ret <vscale x 8 x i16> %out 192} 193 194define <vscale x 4 x i32> @sqadd_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { 195; CHECK-LABEL: sqadd_i32: 196; CHECK: // %bb.0: 197; CHECK-NEXT: sqadd z0.s, z0.s, z1.s 198; CHECK-NEXT: ret 199 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.x.nxv4i32(<vscale x 4 x i32> %a, 200 <vscale x 4 x i32> %b) 201 ret <vscale x 4 x i32> %out 202} 203 204define <vscale x 2 x i64> @sqadd_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { 205; CHECK-LABEL: sqadd_i64: 206; CHECK: // %bb.0: 207; CHECK-NEXT: sqadd z0.d, z0.d, z1.d 208; CHECK-NEXT: ret 209 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.x.nxv2i64(<vscale x 2 x i64> %a, 210 <vscale x 2 x i64> %b) 211 ret <vscale x 2 x i64> %out 212} 213 214; SQSUB 215 216define <vscale x 16 x i8> @sqsub_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) { 217; CHECK-LABEL: sqsub_i8: 218; CHECK: // %bb.0: 219; CHECK-NEXT: sqsub z0.b, z0.b, z1.b 220; CHECK-NEXT: ret 221 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.x.nxv16i8(<vscale x 16 x i8> %a, 222 <vscale x 16 x i8> %b) 223 ret <vscale x 16 x i8> %out 224} 225 226define <vscale x 8 x i16> @sqsub_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { 227; CHECK-LABEL: sqsub_i16: 228; CHECK: // %bb.0: 229; CHECK-NEXT: sqsub z0.h, z0.h, z1.h 230; CHECK-NEXT: ret 231 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.x.nxv8i16(<vscale x 8 x i16> %a, 232 <vscale x 8 x i16> %b) 233 ret <vscale x 8 x i16> %out 234} 235 236define <vscale x 4 x i32> @sqsub_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { 237; CHECK-LABEL: sqsub_i32: 238; CHECK: // %bb.0: 239; CHECK-NEXT: sqsub z0.s, z0.s, z1.s 240; CHECK-NEXT: ret 241 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.x.nxv4i32(<vscale x 4 x i32> %a, 242 <vscale x 4 x i32> %b) 243 ret <vscale x 4 x i32> %out 244} 245 246define <vscale x 2 x i64> @sqsub_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { 247; CHECK-LABEL: sqsub_i64: 248; CHECK: // %bb.0: 249; CHECK-NEXT: sqsub z0.d, z0.d, z1.d 250; CHECK-NEXT: ret 251 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.x.nxv2i64(<vscale x 2 x i64> %a, 252 <vscale x 2 x i64> %b) 253 ret <vscale x 2 x i64> %out 254} 255 256; UDOT 257 258define <vscale x 4 x i32> @udot_i32(<vscale x 4 x i32> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) { 259; CHECK-LABEL: udot_i32: 260; CHECK: // %bb.0: 261; CHECK-NEXT: udot z0.s, z1.b, z2.b 262; CHECK-NEXT: ret 263 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.udot.nxv4i32(<vscale x 4 x i32> %a, 264 <vscale x 16 x i8> %b, 265 <vscale x 16 x i8> %c) 266 ret <vscale x 4 x i32> %out 267} 268 269define <vscale x 2 x i64> @udot_i64(<vscale x 2 x i64> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) { 270; CHECK-LABEL: udot_i64: 271; CHECK: // %bb.0: 272; CHECK-NEXT: udot z0.d, z1.h, z2.h 273; CHECK-NEXT: ret 274 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.udot.nxv2i64(<vscale x 2 x i64> %a, 275 <vscale x 8 x i16> %b, 276 <vscale x 8 x i16> %c) 277 ret <vscale x 2 x i64> %out 278} 279 280define <vscale x 2 x i64> @test_udot_i64_zero(<vscale x 2 x i64> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) { 281; CHECK-LABEL: test_udot_i64_zero: 282; CHECK: // %bb.0: // %entry 283; CHECK-NEXT: udot z0.d, z1.h, z2.h 284; CHECK-NEXT: ret 285entry: 286 %vdot1.i = call <vscale x 2 x i64> @llvm.aarch64.sve.udot.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) 287 %ret = add <vscale x 2 x i64> %vdot1.i, %a 288 ret <vscale x 2 x i64> %ret 289} 290 291define <vscale x 4 x i32> @test_udot_i32_zero(<vscale x 4 x i32> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) { 292; CHECK-LABEL: test_udot_i32_zero: 293; CHECK: // %bb.0: // %entry 294; CHECK-NEXT: udot z0.s, z1.b, z2.b 295; CHECK-NEXT: ret 296entry: 297 %vdot1.i = call <vscale x 4 x i32> @llvm.aarch64.sve.udot.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) 298 %ret = add <vscale x 4 x i32> %vdot1.i, %a 299 ret <vscale x 4 x i32> %ret 300} 301 302; UDOT (Indexed) 303 304define <vscale x 4 x i32> @udot_lane_i32(<vscale x 4 x i32> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) { 305; CHECK-LABEL: udot_lane_i32: 306; CHECK: // %bb.0: 307; CHECK-NEXT: udot z0.s, z1.b, z2.b[2] 308; CHECK-NEXT: ret 309 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.udot.lane.nxv4i32(<vscale x 4 x i32> %a, 310 <vscale x 16 x i8> %b, 311 <vscale x 16 x i8> %c, 312 i32 2) 313 ret <vscale x 4 x i32> %out 314} 315 316; UQADD 317 318define <vscale x 16 x i8> @uqadd_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) { 319; CHECK-LABEL: uqadd_i8: 320; CHECK: // %bb.0: 321; CHECK-NEXT: uqadd z0.b, z0.b, z1.b 322; CHECK-NEXT: ret 323 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.x.nxv16i8(<vscale x 16 x i8> %a, 324 <vscale x 16 x i8> %b) 325 ret <vscale x 16 x i8> %out 326} 327 328define <vscale x 8 x i16> @uqadd_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { 329; CHECK-LABEL: uqadd_i16: 330; CHECK: // %bb.0: 331; CHECK-NEXT: uqadd z0.h, z0.h, z1.h 332; CHECK-NEXT: ret 333 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.x.nxv8i16(<vscale x 8 x i16> %a, 334 <vscale x 8 x i16> %b) 335 ret <vscale x 8 x i16> %out 336} 337 338define <vscale x 4 x i32> @uqadd_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { 339; CHECK-LABEL: uqadd_i32: 340; CHECK: // %bb.0: 341; CHECK-NEXT: uqadd z0.s, z0.s, z1.s 342; CHECK-NEXT: ret 343 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.x.nxv4i32(<vscale x 4 x i32> %a, 344 <vscale x 4 x i32> %b) 345 ret <vscale x 4 x i32> %out 346} 347 348define <vscale x 2 x i64> @uqadd_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { 349; CHECK-LABEL: uqadd_i64: 350; CHECK: // %bb.0: 351; CHECK-NEXT: uqadd z0.d, z0.d, z1.d 352; CHECK-NEXT: ret 353 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.x.nxv2i64(<vscale x 2 x i64> %a, 354 <vscale x 2 x i64> %b) 355 ret <vscale x 2 x i64> %out 356} 357 358; UQSUB 359 360define <vscale x 16 x i8> @uqsub_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) { 361; CHECK-LABEL: uqsub_i8: 362; CHECK: // %bb.0: 363; CHECK-NEXT: uqsub z0.b, z0.b, z1.b 364; CHECK-NEXT: ret 365 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.x.nxv16i8(<vscale x 16 x i8> %a, 366 <vscale x 16 x i8> %b) 367 ret <vscale x 16 x i8> %out 368} 369 370define <vscale x 8 x i16> @uqsub_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { 371; CHECK-LABEL: uqsub_i16: 372; CHECK: // %bb.0: 373; CHECK-NEXT: uqsub z0.h, z0.h, z1.h 374; CHECK-NEXT: ret 375 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.x.nxv8i16(<vscale x 8 x i16> %a, 376 <vscale x 8 x i16> %b) 377 ret <vscale x 8 x i16> %out 378} 379 380define <vscale x 4 x i32> @uqsub_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { 381; CHECK-LABEL: uqsub_i32: 382; CHECK: // %bb.0: 383; CHECK-NEXT: uqsub z0.s, z0.s, z1.s 384; CHECK-NEXT: ret 385 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.x.nxv4i32(<vscale x 4 x i32> %a, 386 <vscale x 4 x i32> %b) 387 ret <vscale x 4 x i32> %out 388} 389 390define <vscale x 2 x i64> @uqsub_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { 391; CHECK-LABEL: uqsub_i64: 392; CHECK: // %bb.0: 393; CHECK-NEXT: uqsub z0.d, z0.d, z1.d 394; CHECK-NEXT: ret 395 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.x.nxv2i64(<vscale x 2 x i64> %a, 396 <vscale x 2 x i64> %b) 397 ret <vscale x 2 x i64> %out 398} 399 400declare <vscale x 16 x i8> @llvm.aarch64.sve.abs.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, <vscale x 16 x i8>) 401declare <vscale x 8 x i16> @llvm.aarch64.sve.abs.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x i16>) 402declare <vscale x 4 x i32> @llvm.aarch64.sve.abs.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>) 403declare <vscale x 2 x i64> @llvm.aarch64.sve.abs.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>) 404 405declare <vscale x 16 x i8> @llvm.aarch64.sve.neg.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, <vscale x 16 x i8>) 406declare <vscale x 8 x i16> @llvm.aarch64.sve.neg.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x i16>) 407declare <vscale x 4 x i32> @llvm.aarch64.sve.neg.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>) 408declare <vscale x 2 x i64> @llvm.aarch64.sve.neg.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>) 409 410declare <vscale x 4 x i32> @llvm.aarch64.sve.sdot.nxv4i32(<vscale x 4 x i32>, <vscale x 16 x i8>, <vscale x 16 x i8>) 411declare <vscale x 2 x i64> @llvm.aarch64.sve.sdot.nxv2i64(<vscale x 2 x i64>, <vscale x 8 x i16>, <vscale x 8 x i16>) 412 413declare <vscale x 4 x i32> @llvm.aarch64.sve.sdot.lane.nxv4i32(<vscale x 4 x i32>, <vscale x 16 x i8>, <vscale x 16 x i8>, i32) 414declare <vscale x 2 x i64> @llvm.aarch64.sve.sdot.lane.nxv2i64(<vscale x 2 x i64>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32) 415 416declare <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.x.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>) 417declare <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>) 418declare <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.x.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>) 419declare <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.x.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>) 420 421declare <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.x.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>) 422declare <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>) 423declare <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.x.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>) 424declare <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.x.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>) 425 426declare <vscale x 4 x i32> @llvm.aarch64.sve.udot.nxv4i32(<vscale x 4 x i32>, <vscale x 16 x i8>, <vscale x 16 x i8>) 427declare <vscale x 2 x i64> @llvm.aarch64.sve.udot.nxv2i64(<vscale x 2 x i64>, <vscale x 8 x i16>, <vscale x 8 x i16>) 428 429declare <vscale x 4 x i32> @llvm.aarch64.sve.udot.lane.nxv4i32(<vscale x 4 x i32>, <vscale x 16 x i8>, <vscale x 16 x i8>, i32) 430declare <vscale x 2 x i64> @llvm.aarch64.sve.udot.lane.nxv2i64(<vscale x 2 x i64>, <vscale x 8 x i16>, <vscale x 8 x i16>, i32) 431 432declare <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.x.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>) 433declare <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>) 434declare <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.x.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>) 435declare <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.x.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>) 436 437declare <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.x.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>) 438declare <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>) 439declare <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.x.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>) 440declare <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.x.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>) 441 442