1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86 3; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64 4 5; This test makes sure that the compiler does not crash with an 6; assertion failure when trying to fold a vector shift left 7; by immediate count if the type of the input vector is different 8; to the result type. 9; 10; This happens for example when lowering a shift left of a MVT::v16i8 vector. 11; This is custom lowered into the following sequence: 12; count << 5 13; A = VSHLI(MVT::v8i16, r & (char16)15, 4) 14; B = BITCAST MVT::v16i8, A 15; VSELECT(r, B, count); 16; count += count 17; C = VSHLI(MVT::v8i16, r & (char16)63, 2) 18; D = BITCAST MVT::v16i8, C 19; r = VSELECT(r, C, count); 20; count += count 21; VSELECT(r, r+r, count); 22; count = count << 5; 23; 24; Where 'r' is a vector of type MVT::v16i8, and 25; 'count' is the vector shift count. 26 27define <16 x i8> @do_not_crash(ptr, ptr, ptr, i32, i64, i8) { 28; X86-LABEL: do_not_crash: 29; X86: # %bb.0: # %entry 30; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 31; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx 32; X86-NEXT: movb %al, (%ecx) 33; X86-NEXT: movd %eax, %xmm1 34; X86-NEXT: psllq $56, %xmm1 35; X86-NEXT: pcmpeqd %xmm3, %xmm3 36; X86-NEXT: psllw $5, %xmm1 37; X86-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 38; X86-NEXT: pxor %xmm2, %xmm2 39; X86-NEXT: pxor %xmm0, %xmm0 40; X86-NEXT: pcmpgtb %xmm1, %xmm0 41; X86-NEXT: pxor %xmm0, %xmm3 42; X86-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 43; X86-NEXT: por %xmm3, %xmm0 44; X86-NEXT: paddb %xmm1, %xmm1 45; X86-NEXT: pxor %xmm3, %xmm3 46; X86-NEXT: pcmpgtb %xmm1, %xmm3 47; X86-NEXT: movdqa %xmm3, %xmm4 48; X86-NEXT: pandn %xmm0, %xmm4 49; X86-NEXT: psllw $2, %xmm0 50; X86-NEXT: pand %xmm3, %xmm0 51; X86-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 52; X86-NEXT: por %xmm4, %xmm0 53; X86-NEXT: paddb %xmm1, %xmm1 54; X86-NEXT: pcmpgtb %xmm1, %xmm2 55; X86-NEXT: movdqa %xmm2, %xmm1 56; X86-NEXT: pandn %xmm0, %xmm1 57; X86-NEXT: paddb %xmm0, %xmm0 58; X86-NEXT: pand %xmm2, %xmm0 59; X86-NEXT: por %xmm1, %xmm0 60; X86-NEXT: retl 61; 62; X64-LABEL: do_not_crash: 63; X64: # %bb.0: # %entry 64; X64-NEXT: movb %r9b, (%rdi) 65; X64-NEXT: movd %r9d, %xmm1 66; X64-NEXT: psllq $56, %xmm1 67; X64-NEXT: pcmpeqd %xmm2, %xmm2 68; X64-NEXT: psllw $5, %xmm1 69; X64-NEXT: por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 70; X64-NEXT: pxor %xmm3, %xmm3 71; X64-NEXT: pxor %xmm0, %xmm0 72; X64-NEXT: pcmpgtb %xmm1, %xmm0 73; X64-NEXT: pxor %xmm0, %xmm2 74; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 75; X64-NEXT: por %xmm2, %xmm0 76; X64-NEXT: paddb %xmm1, %xmm1 77; X64-NEXT: pxor %xmm2, %xmm2 78; X64-NEXT: pcmpgtb %xmm1, %xmm2 79; X64-NEXT: movdqa %xmm2, %xmm4 80; X64-NEXT: pandn %xmm0, %xmm4 81; X64-NEXT: psllw $2, %xmm0 82; X64-NEXT: pand %xmm2, %xmm0 83; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 84; X64-NEXT: por %xmm4, %xmm0 85; X64-NEXT: paddb %xmm1, %xmm1 86; X64-NEXT: pcmpgtb %xmm1, %xmm3 87; X64-NEXT: movdqa %xmm3, %xmm1 88; X64-NEXT: pandn %xmm0, %xmm1 89; X64-NEXT: paddb %xmm0, %xmm0 90; X64-NEXT: pand %xmm3, %xmm0 91; X64-NEXT: por %xmm1, %xmm0 92; X64-NEXT: retq 93entry: 94 store i8 %5, ptr %0 95 %L5 = load i8, ptr %0 96 %I8 = insertelement <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, i8 %L5, i32 7 97 %B51 = shl <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, %I8 98 ret <16 x i8> %B51 99} 100