1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s 3 4define <vscale x 16 x i1> @facgt_fun(<vscale x 2 x i1> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) { 5; CHECK-LABEL: facgt_fun: 6; CHECK: // %bb.0: // %entry 7; CHECK-NEXT: facgt p0.d, p0/z, z0.d, z1.d 8; CHECK-NEXT: ret 9entry: 10 %0 = tail call <vscale x 2 x i1> @llvm.aarch64.sve.facgt.nxv2f64(<vscale x 2 x i1> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) 11 %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0) 12 ret <vscale x 16 x i1> %1 13} 14 15define <vscale x 16 x i1> @facge_fun(<vscale x 2 x i1> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) { 16; CHECK-LABEL: facge_fun: 17; CHECK: // %bb.0: // %entry 18; CHECK-NEXT: facge p0.d, p0/z, z0.d, z1.d 19; CHECK-NEXT: ret 20entry: 21 %0 = tail call <vscale x 2 x i1> @llvm.aarch64.sve.facge.nxv2f64(<vscale x 2 x i1> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) 22 %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0) 23 ret <vscale x 16 x i1> %1 24} 25 26define <vscale x 16 x i1> @whilege_fun(i32 %a, i32 %b) { 27; CHECK-LABEL: whilege_fun: 28; CHECK: // %bb.0: // %entry 29; CHECK-NEXT: whilege p0.d, w0, w1 30; CHECK-NEXT: ret 31entry: 32 %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i32(i32 %a, i32 %b) 33 %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0) 34 ret <vscale x 16 x i1> %1 35} 36 37define <vscale x 16 x i1> @whilegt_fun(i32 %a, i32 %b) { 38; CHECK-LABEL: whilegt_fun: 39; CHECK: // %bb.0: // %entry 40; CHECK-NEXT: whilegt p0.d, w0, w1 41; CHECK-NEXT: ret 42entry: 43 %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i32(i32 %a, i32 %b) 44 %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0) 45 ret <vscale x 16 x i1> %1 46} 47 48define <vscale x 16 x i1> @whilehi_fun(i32 %a, i32 %b) { 49; CHECK-LABEL: whilehi_fun: 50; CHECK: // %bb.0: // %entry 51; CHECK-NEXT: whilehi p0.d, w0, w1 52; CHECK-NEXT: ret 53entry: 54 %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i32(i32 %a, i32 %b) 55 %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0) 56 ret <vscale x 16 x i1> %1 57} 58 59define <vscale x 16 x i1> @whilehs_fun(i32 %a, i32 %b) { 60; CHECK-LABEL: whilehs_fun: 61; CHECK: // %bb.0: // %entry 62; CHECK-NEXT: whilehs p0.d, w0, w1 63; CHECK-NEXT: ret 64entry: 65 %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i32(i32 %a, i32 %b) 66 %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0) 67 ret <vscale x 16 x i1> %1 68} 69 70define <vscale x 16 x i1> @whilele_fun(i32 %a, i32 %b) { 71; CHECK-LABEL: whilele_fun: 72; CHECK: // %bb.0: // %entry 73; CHECK-NEXT: whilele p0.d, w0, w1 74; CHECK-NEXT: ret 75entry: 76 %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilele.nxv2i1.i32(i32 %a, i32 %b) 77 %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0) 78 ret <vscale x 16 x i1> %1 79} 80 81define <vscale x 16 x i1> @whilelo_fun(i32 %a, i32 %b) { 82; CHECK-LABEL: whilelo_fun: 83; CHECK: // %bb.0: // %entry 84; CHECK-NEXT: whilelo p0.d, w0, w1 85; CHECK-NEXT: ret 86entry: 87 %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i32(i32 %a, i32 %b) 88 %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0) 89 ret <vscale x 16 x i1> %1 90} 91 92define <vscale x 16 x i1> @whilels_fun(i32 %a, i32 %b) { 93; CHECK-LABEL: whilels_fun: 94; CHECK: // %bb.0: // %entry 95; CHECK-NEXT: whilels p0.d, w0, w1 96; CHECK-NEXT: ret 97entry: 98 %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilels.nxv2i1.i32(i32 %a, i32 %b) 99 %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0) 100 ret <vscale x 16 x i1> %1 101} 102 103define <vscale x 16 x i1> @whilelt_fun(i32 %a, i32 %b) { 104; CHECK-LABEL: whilelt_fun: 105; CHECK: // %bb.0: // %entry 106; CHECK-NEXT: whilelt p0.d, w0, w1 107; CHECK-NEXT: ret 108entry: 109 %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelt.nxv2i1.i32(i32 %a, i32 %b) 110 %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0) 111 ret <vscale x 16 x i1> %1 112} 113 114define <vscale x 16 x i1> @cmpeq_d_fun(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { 115; CHECK-LABEL: cmpeq_d_fun: 116; CHECK: // %bb.0: 117; CHECK-NEXT: cmpeq p0.d, p0/z, z0.d, z1.d 118; CHECK-NEXT: ret 119 %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpeq.nxv2i64(<vscale x 2 x i1> %pg, 120 <vscale x 2 x i64> %a, 121 <vscale x 2 x i64> %b) 122 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1) 123 ret <vscale x 16 x i1> %out 124} 125 126define <vscale x 16 x i1> @cmpeq_wide_s_fun(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) { 127; CHECK-LABEL: cmpeq_wide_s_fun: 128; CHECK: // %bb.0: 129; CHECK-NEXT: cmpeq p0.s, p0/z, z0.s, z1.d 130; CHECK-NEXT: ret 131 %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv4i32(<vscale x 4 x i1> %pg, 132 <vscale x 4 x i32> %a, 133 <vscale x 2 x i64> %b) 134 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1) 135 ret <vscale x 16 x i1> %out 136} 137 138define <vscale x 16 x i1> @cmpge_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { 139; CHECK-LABEL: cmpge_d: 140; CHECK: // %bb.0: 141; CHECK-NEXT: cmpge p0.d, p0/z, z0.d, z1.d 142; CHECK-NEXT: ret 143 %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpge.nxv2i64(<vscale x 2 x i1> %pg, 144 <vscale x 2 x i64> %a, 145 <vscale x 2 x i64> %b) 146 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1) 147 ret <vscale x 16 x i1> %out 148} 149 150define <vscale x 16 x i1> @cmpge_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) { 151; CHECK-LABEL: cmpge_wide_s: 152; CHECK: // %bb.0: 153; CHECK-NEXT: cmpge p0.s, p0/z, z0.s, z1.d 154; CHECK-NEXT: ret 155 %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.wide.nxv4i32(<vscale x 4 x i1> %pg, 156 <vscale x 4 x i32> %a, 157 <vscale x 2 x i64> %b) 158 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1) 159 ret <vscale x 16 x i1> %out 160} 161 162define <vscale x 16 x i1> @cmpgt_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { 163; CHECK-LABEL: cmpgt_d: 164; CHECK: // %bb.0: 165; CHECK-NEXT: cmpgt p0.d, p0/z, z0.d, z1.d 166; CHECK-NEXT: ret 167 %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpgt.nxv2i64(<vscale x 2 x i1> %pg, 168 <vscale x 2 x i64> %a, 169 <vscale x 2 x i64> %b) 170 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1) 171 ret <vscale x 16 x i1> %out 172} 173 174define <vscale x 16 x i1> @cmpgt_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) { 175; CHECK-LABEL: cmpgt_wide_s: 176; CHECK: // %bb.0: 177; CHECK-NEXT: cmpgt p0.s, p0/z, z0.s, z1.d 178; CHECK-NEXT: ret 179 %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv4i32(<vscale x 4 x i1> %pg, 180 <vscale x 4 x i32> %a, 181 <vscale x 2 x i64> %b) 182 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1) 183 ret <vscale x 16 x i1> %out 184} 185 186define <vscale x 16 x i1> @cmphi_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { 187; CHECK-LABEL: cmphi_d: 188; CHECK: // %bb.0: 189; CHECK-NEXT: cmphi p0.d, p0/z, z0.d, z1.d 190; CHECK-NEXT: ret 191 %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphi.nxv2i64(<vscale x 2 x i1> %pg, 192 <vscale x 2 x i64> %a, 193 <vscale x 2 x i64> %b) 194 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1) 195 ret <vscale x 16 x i1> %out 196} 197 198define <vscale x 16 x i1> @cmphi_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) { 199; CHECK-LABEL: cmphi_wide_s: 200; CHECK: // %bb.0: 201; CHECK-NEXT: cmphi p0.s, p0/z, z0.s, z1.d 202; CHECK-NEXT: ret 203 %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.wide.nxv4i32(<vscale x 4 x i1> %pg, 204 <vscale x 4 x i32> %a, 205 <vscale x 2 x i64> %b) 206 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1) 207 ret <vscale x 16 x i1> %out 208} 209 210define <vscale x 16 x i1> @cmphs_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { 211; CHECK-LABEL: cmphs_d: 212; CHECK: // %bb.0: 213; CHECK-NEXT: cmphs p0.d, p0/z, z0.d, z1.d 214; CHECK-NEXT: ret 215 %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphs.nxv2i64(<vscale x 2 x i1> %pg, 216 <vscale x 2 x i64> %a, 217 <vscale x 2 x i64> %b) 218 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1) 219 ret <vscale x 16 x i1> %out 220} 221 222define <vscale x 16 x i1> @cmphs_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) { 223; CHECK-LABEL: cmphs_wide_s: 224; CHECK: // %bb.0: 225; CHECK-NEXT: cmphs p0.s, p0/z, z0.s, z1.d 226; CHECK-NEXT: ret 227 %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.wide.nxv4i32(<vscale x 4 x i1> %pg, 228 <vscale x 4 x i32> %a, 229 <vscale x 2 x i64> %b) 230 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1) 231 ret <vscale x 16 x i1> %out 232} 233 234define <vscale x 16 x i1> @cmple_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) { 235; CHECK-LABEL: cmple_wide_s: 236; CHECK: // %bb.0: 237; CHECK-NEXT: cmple p0.s, p0/z, z0.s, z1.d 238; CHECK-NEXT: ret 239 %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmple.wide.nxv4i32(<vscale x 4 x i1> %pg, 240 <vscale x 4 x i32> %a, 241 <vscale x 2 x i64> %b) 242 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1) 243 ret <vscale x 16 x i1> %out 244} 245 246define <vscale x 16 x i1> @cmplo_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) { 247; CHECK-LABEL: cmplo_wide_s: 248; CHECK: // %bb.0: 249; CHECK-NEXT: cmplo p0.s, p0/z, z0.s, z1.d 250; CHECK-NEXT: ret 251 %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmplo.wide.nxv4i32(<vscale x 4 x i1> %pg, 252 <vscale x 4 x i32> %a, 253 <vscale x 2 x i64> %b) 254 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1) 255 ret <vscale x 16 x i1> %out 256} 257 258define <vscale x 16 x i1> @cmpls_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) { 259; CHECK-LABEL: cmpls_wide_s: 260; CHECK: // %bb.0: 261; CHECK-NEXT: cmpls p0.s, p0/z, z0.s, z1.d 262; CHECK-NEXT: ret 263 %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpls.wide.nxv4i32(<vscale x 4 x i1> %pg, 264 <vscale x 4 x i32> %a, 265 <vscale x 2 x i64> %b) 266 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1) 267 ret <vscale x 16 x i1> %out 268} 269 270define <vscale x 16 x i1> @cmplt_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) { 271; CHECK-LABEL: cmplt_wide_s: 272; CHECK: // %bb.0: 273; CHECK-NEXT: cmplt p0.s, p0/z, z0.s, z1.d 274; CHECK-NEXT: ret 275 %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmplt.wide.nxv4i32(<vscale x 4 x i1> %pg, 276 <vscale x 4 x i32> %a, 277 <vscale x 2 x i64> %b) 278 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1) 279 ret <vscale x 16 x i1> %out 280} 281 282define <vscale x 16 x i1> @cmpne_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { 283; CHECK-LABEL: cmpne_d: 284; CHECK: // %bb.0: 285; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, z1.d 286; CHECK-NEXT: ret 287 %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1> %pg, 288 <vscale x 2 x i64> %a, 289 <vscale x 2 x i64> %b) 290 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1) 291 ret <vscale x 16 x i1> %out 292} 293 294define <vscale x 16 x i1> @cmpne_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) { 295; CHECK-LABEL: cmpne_wide_s: 296; CHECK: // %bb.0: 297; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, z1.d 298; CHECK-NEXT: ret 299 %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1> %pg, 300 <vscale x 4 x i32> %a, 301 <vscale x 2 x i64> %b) 302 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1) 303 ret <vscale x 16 x i1> %out 304} 305 306define <vscale x 16 x i1> @fcmeq_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) { 307; CHECK-LABEL: fcmeq_d: 308; CHECK: // %bb.0: 309; CHECK-NEXT: fcmeq p0.d, p0/z, z0.d, z1.d 310; CHECK-NEXT: ret 311 %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpeq.nxv2f64(<vscale x 2 x i1> %pg, 312 <vscale x 2 x double> %a, 313 <vscale x 2 x double> %b) 314 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1) 315 ret <vscale x 16 x i1> %out 316} 317 318define <vscale x 16 x i1> @fcmgt_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) { 319; CHECK-LABEL: fcmgt_d: 320; CHECK: // %bb.0: 321; CHECK-NEXT: fcmgt p0.d, p0/z, z0.d, z1.d 322; CHECK-NEXT: ret 323 %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpgt.nxv2f64(<vscale x 2 x i1> %pg, 324 <vscale x 2 x double> %a, 325 <vscale x 2 x double> %b) 326 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1) 327 ret <vscale x 16 x i1> %out 328} 329 330define <vscale x 16 x i1> @fcmne_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) { 331; CHECK-LABEL: fcmne_d: 332; CHECK: // %bb.0: 333; CHECK-NEXT: fcmne p0.d, p0/z, z0.d, z1.d 334; CHECK-NEXT: ret 335 %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpne.nxv2f64(<vscale x 2 x i1> %pg, 336 <vscale x 2 x double> %a, 337 <vscale x 2 x double> %b) 338 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1) 339 ret <vscale x 16 x i1> %out 340} 341 342define <vscale x 16 x i1> @fcmuo_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) { 343; CHECK-LABEL: fcmuo_d: 344; CHECK: // %bb.0: 345; CHECK-NEXT: fcmuo p0.d, p0/z, z0.d, z1.d 346; CHECK-NEXT: ret 347 %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpuo.nxv2f64(<vscale x 2 x i1> %pg, 348 <vscale x 2 x double> %a, 349 <vscale x 2 x double> %b) 350 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1) 351 ret <vscale x 16 x i1> %out 352} 353 354define <vscale x 16 x i1> @match_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { 355; CHECK-LABEL: match_i16: 356; CHECK: // %bb.0: 357; CHECK-NEXT: match p0.h, p0/z, z0.h, z1.h 358; CHECK-NEXT: ret 359 %1 = call <vscale x 8 x i1> @llvm.aarch64.sve.match.nxv8i16(<vscale x 8 x i1> %pg, 360 <vscale x 8 x i16> %a, 361 <vscale x 8 x i16> %b) 362 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %1) 363 ret <vscale x 16 x i1> %out 364} 365 366define <vscale x 16 x i1> @nmatch_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { 367; CHECK-LABEL: nmatch_i16: 368; CHECK: // %bb.0: 369; CHECK-NEXT: nmatch p0.h, p0/z, z0.h, z1.h 370; CHECK-NEXT: ret 371 %1 = call <vscale x 8 x i1> @llvm.aarch64.sve.nmatch.nxv8i16(<vscale x 8 x i1> %pg, 372 <vscale x 8 x i16> %a, 373 <vscale x 8 x i16> %b) 374 %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %1) 375 ret <vscale x 16 x i1> %out 376} 377 378declare <vscale x 2 x i1> @llvm.aarch64.sve.facgt.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>) 379declare <vscale x 2 x i1> @llvm.aarch64.sve.facge.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>) 380declare <vscale x 2 x i1> @llvm.aarch64.sve.fcmpeq.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>) 381declare <vscale x 2 x i1> @llvm.aarch64.sve.fcmpge.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>) 382declare <vscale x 2 x i1> @llvm.aarch64.sve.fcmpgt.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>) 383declare <vscale x 2 x i1> @llvm.aarch64.sve.fcmpne.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>) 384declare <vscale x 2 x i1> @llvm.aarch64.sve.fcmpuo.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>) 385declare <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i32(i32, i32) 386declare <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i32(i32, i32) 387declare <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i32(i32, i32) 388declare <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i32(i32, i32) 389declare <vscale x 2 x i1> @llvm.aarch64.sve.whilele.nxv2i1.i32(i32, i32) 390declare <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i32(i32, i32) 391declare <vscale x 2 x i1> @llvm.aarch64.sve.whilels.nxv2i1.i32(i32, i32) 392declare <vscale x 2 x i1> @llvm.aarch64.sve.whilelt.nxv2i1.i32(i32, i32) 393declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpeq.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) 394declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>) 395declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpge.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) 396declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>) 397declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpgt.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) 398declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>) 399declare <vscale x 2 x i1> @llvm.aarch64.sve.cmphi.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) 400declare <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>) 401declare <vscale x 2 x i1> @llvm.aarch64.sve.cmphs.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) 402declare <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>) 403declare <vscale x 4 x i1> @llvm.aarch64.sve.cmple.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>) 404declare <vscale x 4 x i1> @llvm.aarch64.sve.cmplo.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>) 405declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpls.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>) 406declare <vscale x 4 x i1> @llvm.aarch64.sve.cmplt.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>) 407declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) 408declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>) 409declare <vscale x 8 x i1> @llvm.aarch64.sve.match.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>) 410declare <vscale x 8 x i1> @llvm.aarch64.sve.nmatch.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>) 411declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1>) 412declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1>) 413declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1>) 414