1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2 | FileCheck %s --check-prefixes=CHECK,SSE2 3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE41 4; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefixes=CHECK,AVX 5; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefixes=CHECK,AVX 6; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f | FileCheck %s --check-prefixes=CHECK,AVX 7; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512bw | FileCheck %s --check-prefixes=CHECK,AVX 8 9declare i64 @llvm.vector.reduce.or.v2i64(<2 x i64>) 10 11define i1 @parseHeaders(ptr %ptr) nounwind { 12; SSE2-LABEL: parseHeaders: 13; SSE2: # %bb.0: 14; SSE2-NEXT: movdqu (%rdi), %xmm0 15; SSE2-NEXT: pxor %xmm1, %xmm1 16; SSE2-NEXT: pcmpeqd %xmm0, %xmm1 17; SSE2-NEXT: movmskps %xmm1, %eax 18; SSE2-NEXT: xorl $15, %eax 19; SSE2-NEXT: sete %al 20; SSE2-NEXT: retq 21; 22; SSE41-LABEL: parseHeaders: 23; SSE41: # %bb.0: 24; SSE41-NEXT: movdqu (%rdi), %xmm0 25; SSE41-NEXT: ptest %xmm0, %xmm0 26; SSE41-NEXT: sete %al 27; SSE41-NEXT: retq 28; 29; AVX-LABEL: parseHeaders: 30; AVX: # %bb.0: 31; AVX-NEXT: vmovdqu (%rdi), %xmm0 32; AVX-NEXT: vptest %xmm0, %xmm0 33; AVX-NEXT: sete %al 34; AVX-NEXT: retq 35 %vload = load <2 x i64>, ptr %ptr, align 8 36 %vreduce = call i64 @llvm.vector.reduce.or.v2i64(<2 x i64> %vload) 37 %vcheck = icmp eq i64 %vreduce, 0 38 ret i1 %vcheck 39} 40 41define i1 @parseHeaders2_scalar_or(ptr %ptr) nounwind { 42; SSE2-LABEL: parseHeaders2_scalar_or: 43; SSE2: # %bb.0: 44; SSE2-NEXT: movdqu (%rdi), %xmm0 45; SSE2-NEXT: pxor %xmm1, %xmm1 46; SSE2-NEXT: pcmpeqd %xmm0, %xmm1 47; SSE2-NEXT: movmskps %xmm1, %eax 48; SSE2-NEXT: xorl $15, %eax 49; SSE2-NEXT: sete %al 50; SSE2-NEXT: retq 51; 52; SSE41-LABEL: parseHeaders2_scalar_or: 53; SSE41: # %bb.0: 54; SSE41-NEXT: movdqu (%rdi), %xmm0 55; SSE41-NEXT: ptest %xmm0, %xmm0 56; SSE41-NEXT: sete %al 57; SSE41-NEXT: retq 58; 59; AVX-LABEL: parseHeaders2_scalar_or: 60; AVX: # %bb.0: 61; AVX-NEXT: vmovdqu (%rdi), %xmm0 62; AVX-NEXT: vptest %xmm0, %xmm0 63; AVX-NEXT: sete %al 64; AVX-NEXT: retq 65 %vload = load <2 x i64>, ptr %ptr, align 8 66 %v1 = extractelement <2 x i64> %vload, i32 0 67 %v2 = extractelement <2 x i64> %vload, i32 1 68 %vreduce = or i64 %v1, %v2 69 %vcheck = icmp eq i64 %vreduce, 0 70 ret i1 %vcheck 71} 72 73define i1 @parseHeaders2_scalar_and(ptr %ptr) nounwind { 74; CHECK-LABEL: parseHeaders2_scalar_and: 75; CHECK: # %bb.0: 76; CHECK-NEXT: movq (%rdi), %rax 77; CHECK-NEXT: testq %rax, 8(%rdi) 78; CHECK-NEXT: sete %al 79; CHECK-NEXT: retq 80 %vload = load <2 x i64>, ptr %ptr, align 8 81 %v1 = extractelement <2 x i64> %vload, i32 0 82 %v2 = extractelement <2 x i64> %vload, i32 1 83 %vreduce = and i64 %v1, %v2 84 %vcheck = icmp eq i64 %vreduce, 0 85 ret i1 %vcheck 86} 87