1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s --check-prefix=X64 3; RUN: llc < %s -mtriple=i686 -mattr=cmov | FileCheck %s --check-prefix=X86 4 5declare i4 @llvm.sdiv.fix.sat.i4 (i4, i4, i32) 6declare i15 @llvm.sdiv.fix.sat.i15 (i15, i15, i32) 7declare i16 @llvm.sdiv.fix.sat.i16 (i16, i16, i32) 8declare i18 @llvm.sdiv.fix.sat.i18 (i18, i18, i32) 9declare i64 @llvm.sdiv.fix.sat.i64 (i64, i64, i32) 10declare <4 x i32> @llvm.sdiv.fix.sat.v4i32(<4 x i32>, <4 x i32>, i32) 11 12define i16 @func(i16 %x, i16 %y) nounwind { 13; 14; X64-LABEL: func: 15; X64: # %bb.0: 16; X64-NEXT: movswl %si, %esi 17; X64-NEXT: movswl %di, %ecx 18; X64-NEXT: shll $8, %ecx 19; X64-NEXT: movl %ecx, %eax 20; X64-NEXT: cltd 21; X64-NEXT: idivl %esi 22; X64-NEXT: # kill: def $eax killed $eax def $rax 23; X64-NEXT: leal -1(%rax), %edi 24; X64-NEXT: testl %esi, %esi 25; X64-NEXT: sets %sil 26; X64-NEXT: testl %ecx, %ecx 27; X64-NEXT: sets %cl 28; X64-NEXT: xorb %sil, %cl 29; X64-NEXT: testl %edx, %edx 30; X64-NEXT: setne %dl 31; X64-NEXT: testb %cl, %dl 32; X64-NEXT: cmovnel %edi, %eax 33; X64-NEXT: cmpl $65535, %eax # imm = 0xFFFF 34; X64-NEXT: movl $65535, %ecx # imm = 0xFFFF 35; X64-NEXT: cmovgel %ecx, %eax 36; X64-NEXT: cmpl $-65535, %eax # imm = 0xFFFF0001 37; X64-NEXT: movl $-65536, %ecx # imm = 0xFFFF0000 38; X64-NEXT: cmovll %ecx, %eax 39; X64-NEXT: shrl %eax 40; X64-NEXT: # kill: def $ax killed $ax killed $rax 41; X64-NEXT: retq 42; 43; X86-LABEL: func: 44; X86: # %bb.0: 45; X86-NEXT: pushl %ebx 46; X86-NEXT: pushl %edi 47; X86-NEXT: pushl %esi 48; X86-NEXT: movswl {{[0-9]+}}(%esp), %esi 49; X86-NEXT: movswl {{[0-9]+}}(%esp), %ecx 50; X86-NEXT: shll $8, %ecx 51; X86-NEXT: movl %ecx, %eax 52; X86-NEXT: cltd 53; X86-NEXT: idivl %esi 54; X86-NEXT: leal -1(%eax), %edi 55; X86-NEXT: testl %esi, %esi 56; X86-NEXT: sets %bl 57; X86-NEXT: testl %ecx, %ecx 58; X86-NEXT: sets %cl 59; X86-NEXT: xorb %bl, %cl 60; X86-NEXT: testl %edx, %edx 61; X86-NEXT: setne %dl 62; X86-NEXT: testb %cl, %dl 63; X86-NEXT: cmovnel %edi, %eax 64; X86-NEXT: cmpl $65535, %eax # imm = 0xFFFF 65; X86-NEXT: movl $65535, %ecx # imm = 0xFFFF 66; X86-NEXT: cmovgel %ecx, %eax 67; X86-NEXT: cmpl $-65535, %eax # imm = 0xFFFF0001 68; X86-NEXT: movl $-65536, %ecx # imm = 0xFFFF0000 69; X86-NEXT: cmovll %ecx, %eax 70; X86-NEXT: shrl %eax 71; X86-NEXT: # kill: def $ax killed $ax killed $eax 72; X86-NEXT: popl %esi 73; X86-NEXT: popl %edi 74; X86-NEXT: popl %ebx 75; X86-NEXT: retl 76 %tmp = call i16 @llvm.sdiv.fix.sat.i16(i16 %x, i16 %y, i32 7) 77 ret i16 %tmp 78} 79 80define i16 @func2(i8 %x, i8 %y) nounwind { 81; 82; X64-LABEL: func2: 83; X64: # %bb.0: 84; X64-NEXT: movsbl %sil, %esi 85; X64-NEXT: movsbl %dil, %ecx 86; X64-NEXT: shll $14, %ecx 87; X64-NEXT: movl %ecx, %eax 88; X64-NEXT: cltd 89; X64-NEXT: idivl %esi 90; X64-NEXT: # kill: def $eax killed $eax def $rax 91; X64-NEXT: leal -1(%rax), %edi 92; X64-NEXT: testl %esi, %esi 93; X64-NEXT: sets %sil 94; X64-NEXT: testl %ecx, %ecx 95; X64-NEXT: sets %cl 96; X64-NEXT: xorb %sil, %cl 97; X64-NEXT: testl %edx, %edx 98; X64-NEXT: setne %dl 99; X64-NEXT: testb %cl, %dl 100; X64-NEXT: cmovnel %edi, %eax 101; X64-NEXT: cmpl $16383, %eax # imm = 0x3FFF 102; X64-NEXT: movl $16383, %ecx # imm = 0x3FFF 103; X64-NEXT: cmovgel %ecx, %eax 104; X64-NEXT: cmpl $-16383, %eax # imm = 0xC001 105; X64-NEXT: movl $-16384, %ecx # imm = 0xC000 106; X64-NEXT: cmovll %ecx, %eax 107; X64-NEXT: # kill: def $ax killed $ax killed $rax 108; X64-NEXT: retq 109; 110; X86-LABEL: func2: 111; X86: # %bb.0: 112; X86-NEXT: pushl %ebx 113; X86-NEXT: pushl %edi 114; X86-NEXT: pushl %esi 115; X86-NEXT: movsbl {{[0-9]+}}(%esp), %esi 116; X86-NEXT: movsbl {{[0-9]+}}(%esp), %ecx 117; X86-NEXT: shll $14, %ecx 118; X86-NEXT: movl %ecx, %eax 119; X86-NEXT: cltd 120; X86-NEXT: idivl %esi 121; X86-NEXT: leal -1(%eax), %edi 122; X86-NEXT: testl %esi, %esi 123; X86-NEXT: sets %bl 124; X86-NEXT: testl %ecx, %ecx 125; X86-NEXT: sets %cl 126; X86-NEXT: xorb %bl, %cl 127; X86-NEXT: testl %edx, %edx 128; X86-NEXT: setne %dl 129; X86-NEXT: testb %cl, %dl 130; X86-NEXT: cmovnel %edi, %eax 131; X86-NEXT: cmpl $16383, %eax # imm = 0x3FFF 132; X86-NEXT: movl $16383, %ecx # imm = 0x3FFF 133; X86-NEXT: cmovgel %ecx, %eax 134; X86-NEXT: cmpl $-16383, %eax # imm = 0xC001 135; X86-NEXT: movl $-16384, %ecx # imm = 0xC000 136; X86-NEXT: cmovll %ecx, %eax 137; X86-NEXT: # kill: def $ax killed $ax killed $eax 138; X86-NEXT: popl %esi 139; X86-NEXT: popl %edi 140; X86-NEXT: popl %ebx 141; X86-NEXT: retl 142 %x2 = sext i8 %x to i15 143 %y2 = sext i8 %y to i15 144 %tmp = call i15 @llvm.sdiv.fix.sat.i15(i15 %x2, i15 %y2, i32 14) 145 %tmp2 = sext i15 %tmp to i16 146 ret i16 %tmp2 147} 148 149define i16 @func3(i15 %x, i8 %y) nounwind { 150; 151; X64-LABEL: func3: 152; X64: # %bb.0: 153; X64-NEXT: shll $8, %esi 154; X64-NEXT: movswl %si, %ecx 155; X64-NEXT: addl %edi, %edi 156; X64-NEXT: shrl $4, %ecx 157; X64-NEXT: movl %edi, %eax 158; X64-NEXT: cwtd 159; X64-NEXT: idivw %cx 160; X64-NEXT: # kill: def $ax killed $ax def $rax 161; X64-NEXT: leal -1(%rax), %esi 162; X64-NEXT: testw %di, %di 163; X64-NEXT: sets %dil 164; X64-NEXT: testw %cx, %cx 165; X64-NEXT: sets %cl 166; X64-NEXT: xorb %dil, %cl 167; X64-NEXT: testw %dx, %dx 168; X64-NEXT: setne %dl 169; X64-NEXT: testb %cl, %dl 170; X64-NEXT: cmovnel %esi, %eax 171; X64-NEXT: movswl %ax, %ecx 172; X64-NEXT: cmpl $16383, %ecx # imm = 0x3FFF 173; X64-NEXT: movl $16383, %ecx # imm = 0x3FFF 174; X64-NEXT: cmovgel %ecx, %eax 175; X64-NEXT: movswl %ax, %ecx 176; X64-NEXT: cmpl $-16383, %ecx # imm = 0xC001 177; X64-NEXT: movl $49152, %ecx # imm = 0xC000 178; X64-NEXT: cmovll %ecx, %eax 179; X64-NEXT: # kill: def $ax killed $ax killed $rax 180; X64-NEXT: retq 181; 182; X86-LABEL: func3: 183; X86: # %bb.0: 184; X86-NEXT: pushl %edi 185; X86-NEXT: pushl %esi 186; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx 187; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 188; X86-NEXT: shll $8, %eax 189; X86-NEXT: movswl %ax, %esi 190; X86-NEXT: addl %ecx, %ecx 191; X86-NEXT: shrl $4, %esi 192; X86-NEXT: movl %ecx, %eax 193; X86-NEXT: cwtd 194; X86-NEXT: idivw %si 195; X86-NEXT: # kill: def $ax killed $ax def $eax 196; X86-NEXT: leal -1(%eax), %edi 197; X86-NEXT: testw %cx, %cx 198; X86-NEXT: sets %cl 199; X86-NEXT: testw %si, %si 200; X86-NEXT: sets %ch 201; X86-NEXT: xorb %cl, %ch 202; X86-NEXT: testw %dx, %dx 203; X86-NEXT: setne %cl 204; X86-NEXT: testb %ch, %cl 205; X86-NEXT: cmovnel %edi, %eax 206; X86-NEXT: movswl %ax, %ecx 207; X86-NEXT: cmpl $16383, %ecx # imm = 0x3FFF 208; X86-NEXT: movl $16383, %ecx # imm = 0x3FFF 209; X86-NEXT: cmovgel %ecx, %eax 210; X86-NEXT: movswl %ax, %ecx 211; X86-NEXT: cmpl $-16383, %ecx # imm = 0xC001 212; X86-NEXT: movl $49152, %ecx # imm = 0xC000 213; X86-NEXT: cmovll %ecx, %eax 214; X86-NEXT: # kill: def $ax killed $ax killed $eax 215; X86-NEXT: popl %esi 216; X86-NEXT: popl %edi 217; X86-NEXT: retl 218 %y2 = sext i8 %y to i15 219 %y3 = shl i15 %y2, 7 220 %tmp = call i15 @llvm.sdiv.fix.sat.i15(i15 %x, i15 %y3, i32 4) 221 %tmp2 = sext i15 %tmp to i16 222 ret i16 %tmp2 223} 224 225define i4 @func4(i4 %x, i4 %y) nounwind { 226; 227; X64-LABEL: func4: 228; X64: # %bb.0: 229; X64-NEXT: shlb $4, %sil 230; X64-NEXT: sarb $4, %sil 231; X64-NEXT: shlb $4, %dil 232; X64-NEXT: sarb $4, %dil 233; X64-NEXT: shlb $2, %dil 234; X64-NEXT: movsbl %dil, %ecx 235; X64-NEXT: movl %ecx, %eax 236; X64-NEXT: idivb %sil 237; X64-NEXT: movsbl %ah, %edx 238; X64-NEXT: movzbl %al, %eax 239; X64-NEXT: leal -1(%rax), %edi 240; X64-NEXT: movzbl %dil, %edi 241; X64-NEXT: testb %sil, %sil 242; X64-NEXT: sets %sil 243; X64-NEXT: testb %cl, %cl 244; X64-NEXT: sets %cl 245; X64-NEXT: xorb %sil, %cl 246; X64-NEXT: testb %dl, %dl 247; X64-NEXT: setne %dl 248; X64-NEXT: testb %cl, %dl 249; X64-NEXT: cmovel %eax, %edi 250; X64-NEXT: cmpb $7, %dil 251; X64-NEXT: movl $7, %ecx 252; X64-NEXT: cmovll %edi, %ecx 253; X64-NEXT: cmpb $-7, %cl 254; X64-NEXT: movl $248, %eax 255; X64-NEXT: cmovgel %ecx, %eax 256; X64-NEXT: # kill: def $al killed $al killed $eax 257; X64-NEXT: retq 258; 259; X86-LABEL: func4: 260; X86: # %bb.0: 261; X86-NEXT: pushl %ebx 262; X86-NEXT: pushl %esi 263; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx 264; X86-NEXT: shlb $4, %cl 265; X86-NEXT: sarb $4, %cl 266; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edx 267; X86-NEXT: shlb $4, %dl 268; X86-NEXT: sarb $4, %dl 269; X86-NEXT: shlb $2, %dl 270; X86-NEXT: movsbl %dl, %eax 271; X86-NEXT: idivb %cl 272; X86-NEXT: movsbl %ah, %ebx 273; X86-NEXT: movzbl %al, %esi 274; X86-NEXT: decb %al 275; X86-NEXT: movzbl %al, %eax 276; X86-NEXT: testb %cl, %cl 277; X86-NEXT: sets %cl 278; X86-NEXT: testb %dl, %dl 279; X86-NEXT: sets %dl 280; X86-NEXT: xorb %cl, %dl 281; X86-NEXT: testb %bl, %bl 282; X86-NEXT: setne %cl 283; X86-NEXT: testb %dl, %cl 284; X86-NEXT: cmovel %esi, %eax 285; X86-NEXT: cmpb $7, %al 286; X86-NEXT: movl $7, %ecx 287; X86-NEXT: cmovll %eax, %ecx 288; X86-NEXT: cmpb $-7, %cl 289; X86-NEXT: movl $248, %eax 290; X86-NEXT: cmovgel %ecx, %eax 291; X86-NEXT: # kill: def $al killed $al killed $eax 292; X86-NEXT: popl %esi 293; X86-NEXT: popl %ebx 294; X86-NEXT: retl 295 %tmp = call i4 @llvm.sdiv.fix.sat.i4(i4 %x, i4 %y, i32 2) 296 ret i4 %tmp 297} 298 299define i64 @func5(i64 %x, i64 %y) nounwind { 300; 301; X64-LABEL: func5: 302; X64: # %bb.0: 303; X64-NEXT: pushq %rbp 304; X64-NEXT: pushq %r15 305; X64-NEXT: pushq %r14 306; X64-NEXT: pushq %r13 307; X64-NEXT: pushq %r12 308; X64-NEXT: pushq %rbx 309; X64-NEXT: subq $24, %rsp 310; X64-NEXT: movq %rsi, %rdx 311; X64-NEXT: movq %rsi, (%rsp) # 8-byte Spill 312; X64-NEXT: movq %rdi, %r14 313; X64-NEXT: leaq (%rdi,%rdi), %rax 314; X64-NEXT: movq %rdi, %r15 315; X64-NEXT: sarq $63, %r15 316; X64-NEXT: shldq $31, %rax, %r15 317; X64-NEXT: shlq $32, %r14 318; X64-NEXT: movq %rsi, %r12 319; X64-NEXT: sarq $63, %r12 320; X64-NEXT: movq %r14, %rdi 321; X64-NEXT: movq %r15, %rsi 322; X64-NEXT: movq %r12, %rcx 323; X64-NEXT: callq __divti3@PLT 324; X64-NEXT: movq %rax, %r13 325; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill 326; X64-NEXT: movq %rdx, %rbp 327; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill 328; X64-NEXT: subq $1, %r13 329; X64-NEXT: sbbq $0, %rbp 330; X64-NEXT: testq %r15, %r15 331; X64-NEXT: sets %al 332; X64-NEXT: testq %r12, %r12 333; X64-NEXT: sets %bl 334; X64-NEXT: xorb %al, %bl 335; X64-NEXT: movq %r14, %rdi 336; X64-NEXT: movq %r15, %rsi 337; X64-NEXT: movq (%rsp), %rdx # 8-byte Reload 338; X64-NEXT: movq %r12, %rcx 339; X64-NEXT: callq __modti3@PLT 340; X64-NEXT: orq %rax, %rdx 341; X64-NEXT: setne %al 342; X64-NEXT: testb %bl, %al 343; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload 344; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload 345; X64-NEXT: movq %rbp, %rcx 346; X64-NEXT: sarq $63, %rcx 347; X64-NEXT: andq %rbp, %rcx 348; X64-NEXT: testq %rbp, %rbp 349; X64-NEXT: movq $-1, %rdx 350; X64-NEXT: cmovgq %rdx, %r13 351; X64-NEXT: xorl %eax, %eax 352; X64-NEXT: cmpq $-1, %rcx 353; X64-NEXT: cmovlq %rdx, %rcx 354; X64-NEXT: cmovgeq %r13, %rax 355; X64-NEXT: shrdq $1, %rcx, %rax 356; X64-NEXT: addq $24, %rsp 357; X64-NEXT: popq %rbx 358; X64-NEXT: popq %r12 359; X64-NEXT: popq %r13 360; X64-NEXT: popq %r14 361; X64-NEXT: popq %r15 362; X64-NEXT: popq %rbp 363; X64-NEXT: retq 364; 365; X86-LABEL: func5: 366; X86: # %bb.0: 367; X86-NEXT: pushl %ebp 368; X86-NEXT: movl %esp, %ebp 369; X86-NEXT: pushl %ebx 370; X86-NEXT: pushl %edi 371; X86-NEXT: pushl %esi 372; X86-NEXT: andl $-16, %esp 373; X86-NEXT: subl $96, %esp 374; X86-NEXT: movl 8(%ebp), %ecx 375; X86-NEXT: movl 12(%ebp), %eax 376; X86-NEXT: movl 20(%ebp), %esi 377; X86-NEXT: movl %esi, %ebx 378; X86-NEXT: sarl $31, %ebx 379; X86-NEXT: movl %eax, %edx 380; X86-NEXT: sarl $31, %edx 381; X86-NEXT: movl %edx, %edi 382; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 383; X86-NEXT: shldl $31, %eax, %edi 384; X86-NEXT: shldl $31, %ecx, %eax 385; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 386; X86-NEXT: shll $31, %ecx 387; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 388; X86-NEXT: pushl %ebx 389; X86-NEXT: pushl %ebx 390; X86-NEXT: pushl %esi 391; X86-NEXT: pushl 16(%ebp) 392; X86-NEXT: pushl %edx 393; X86-NEXT: pushl %edi 394; X86-NEXT: pushl %eax 395; X86-NEXT: pushl %ecx 396; X86-NEXT: leal {{[0-9]+}}(%esp), %eax 397; X86-NEXT: pushl %eax 398; X86-NEXT: calll __divti3 399; X86-NEXT: addl $32, %esp 400; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 401; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 402; X86-NEXT: movl {{[0-9]+}}(%esp), %esi 403; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 404; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx 405; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 406; X86-NEXT: subl $1, %esi 407; X86-NEXT: sbbl $0, %ecx 408; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 409; X86-NEXT: sbbl $0, %eax 410; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 411; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 412; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 413; X86-NEXT: sbbl $0, %eax 414; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 415; X86-NEXT: testl %ebx, %ebx 416; X86-NEXT: sets %al 417; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload 418; X86-NEXT: testl %ecx, %ecx 419; X86-NEXT: sets %dl 420; X86-NEXT: xorb %al, %dl 421; X86-NEXT: movb %dl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill 422; X86-NEXT: leal {{[0-9]+}}(%esp), %eax 423; X86-NEXT: pushl %ebx 424; X86-NEXT: pushl %ebx 425; X86-NEXT: pushl 20(%ebp) 426; X86-NEXT: pushl 16(%ebp) 427; X86-NEXT: pushl %ecx 428; X86-NEXT: pushl %edi 429; X86-NEXT: pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload 430; X86-NEXT: pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload 431; X86-NEXT: pushl %eax 432; X86-NEXT: calll __modti3 433; X86-NEXT: addl $32, %esp 434; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 435; X86-NEXT: orl {{[0-9]+}}(%esp), %eax 436; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx 437; X86-NEXT: orl {{[0-9]+}}(%esp), %ecx 438; X86-NEXT: orl %eax, %ecx 439; X86-NEXT: setne %al 440; X86-NEXT: testb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload 441; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload 442; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload 443; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload 444; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload 445; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload 446; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload 447; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload 448; X86-NEXT: cmpl $-1, %esi 449; X86-NEXT: movl %eax, %ecx 450; X86-NEXT: sbbl $2147483647, %ecx # imm = 0x7FFFFFFF 451; X86-NEXT: movl %edx, %ecx 452; X86-NEXT: movl %edx, %ebx 453; X86-NEXT: sbbl $0, %ecx 454; X86-NEXT: movl %edi, %ecx 455; X86-NEXT: sbbl $0, %ecx 456; X86-NEXT: movl $2147483647, %edx # imm = 0x7FFFFFFF 457; X86-NEXT: cmovll %eax, %edx 458; X86-NEXT: movl $0, %ecx 459; X86-NEXT: cmovgel %ecx, %edi 460; X86-NEXT: movl %edi, %eax 461; X86-NEXT: cmovgel %ecx, %ebx 462; X86-NEXT: movl $-1, %ecx 463; X86-NEXT: cmovgel %ecx, %esi 464; X86-NEXT: movl %esi, %edi 465; X86-NEXT: negl %edi 466; X86-NEXT: movl $-2147483648, %edi # imm = 0x80000000 467; X86-NEXT: sbbl %edx, %edi 468; X86-NEXT: movl $-1, %edi 469; X86-NEXT: sbbl %ebx, %edi 470; X86-NEXT: sbbl %eax, %ecx 471; X86-NEXT: movl $0, %eax 472; X86-NEXT: cmovgel %eax, %esi 473; X86-NEXT: movl $-2147483648, %eax # imm = 0x80000000 474; X86-NEXT: cmovgel %eax, %edx 475; X86-NEXT: movl %esi, %eax 476; X86-NEXT: leal -12(%ebp), %esp 477; X86-NEXT: popl %esi 478; X86-NEXT: popl %edi 479; X86-NEXT: popl %ebx 480; X86-NEXT: popl %ebp 481; X86-NEXT: retl 482 %tmp = call i64 @llvm.sdiv.fix.sat.i64(i64 %x, i64 %y, i32 31) 483 ret i64 %tmp 484} 485 486define i18 @func6(i16 %x, i16 %y) nounwind { 487; 488; X64-LABEL: func6: 489; X64: # %bb.0: 490; X64-NEXT: movswl %di, %ecx 491; X64-NEXT: movswl %si, %esi 492; X64-NEXT: shll $7, %ecx 493; X64-NEXT: movl %ecx, %eax 494; X64-NEXT: cltd 495; X64-NEXT: idivl %esi 496; X64-NEXT: # kill: def $eax killed $eax def $rax 497; X64-NEXT: leal -1(%rax), %edi 498; X64-NEXT: testl %esi, %esi 499; X64-NEXT: sets %sil 500; X64-NEXT: testl %ecx, %ecx 501; X64-NEXT: sets %cl 502; X64-NEXT: xorb %sil, %cl 503; X64-NEXT: testl %edx, %edx 504; X64-NEXT: setne %dl 505; X64-NEXT: testb %cl, %dl 506; X64-NEXT: cmovnel %edi, %eax 507; X64-NEXT: cmpl $131071, %eax # imm = 0x1FFFF 508; X64-NEXT: movl $131071, %ecx # imm = 0x1FFFF 509; X64-NEXT: cmovgel %ecx, %eax 510; X64-NEXT: cmpl $-131071, %eax # imm = 0xFFFE0001 511; X64-NEXT: movl $-131072, %ecx # imm = 0xFFFE0000 512; X64-NEXT: cmovll %ecx, %eax 513; X64-NEXT: # kill: def $eax killed $eax killed $rax 514; X64-NEXT: retq 515; 516; X86-LABEL: func6: 517; X86: # %bb.0: 518; X86-NEXT: pushl %ebx 519; X86-NEXT: pushl %edi 520; X86-NEXT: pushl %esi 521; X86-NEXT: movswl {{[0-9]+}}(%esp), %esi 522; X86-NEXT: movswl {{[0-9]+}}(%esp), %ecx 523; X86-NEXT: shll $7, %ecx 524; X86-NEXT: movl %ecx, %eax 525; X86-NEXT: cltd 526; X86-NEXT: idivl %esi 527; X86-NEXT: leal -1(%eax), %edi 528; X86-NEXT: testl %esi, %esi 529; X86-NEXT: sets %bl 530; X86-NEXT: testl %ecx, %ecx 531; X86-NEXT: sets %cl 532; X86-NEXT: xorb %bl, %cl 533; X86-NEXT: testl %edx, %edx 534; X86-NEXT: setne %dl 535; X86-NEXT: testb %cl, %dl 536; X86-NEXT: cmovnel %edi, %eax 537; X86-NEXT: cmpl $131071, %eax # imm = 0x1FFFF 538; X86-NEXT: movl $131071, %ecx # imm = 0x1FFFF 539; X86-NEXT: cmovgel %ecx, %eax 540; X86-NEXT: cmpl $-131071, %eax # imm = 0xFFFE0001 541; X86-NEXT: movl $-131072, %ecx # imm = 0xFFFE0000 542; X86-NEXT: cmovll %ecx, %eax 543; X86-NEXT: popl %esi 544; X86-NEXT: popl %edi 545; X86-NEXT: popl %ebx 546; X86-NEXT: retl 547 %x2 = sext i16 %x to i18 548 %y2 = sext i16 %y to i18 549 %tmp = call i18 @llvm.sdiv.fix.sat.i18(i18 %x2, i18 %y2, i32 7) 550 ret i18 %tmp 551} 552 553define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind { 554; 555; X64-LABEL: vec: 556; X64: # %bb.0: 557; X64-NEXT: pushq %rbp 558; X64-NEXT: pushq %r15 559; X64-NEXT: pushq %r14 560; X64-NEXT: pushq %r13 561; X64-NEXT: pushq %r12 562; X64-NEXT: pushq %rbx 563; X64-NEXT: subq $120, %rsp 564; X64-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill 565; X64-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill 566; X64-NEXT: pxor %xmm2, %xmm2 567; X64-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm0[2],xmm2[3],xmm0[3] 568; X64-NEXT: psrlq $31, %xmm2 569; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,2,2,3] 570; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3] 571; X64-NEXT: psrad $31, %xmm2 572; X64-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] 573; X64-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill 574; X64-NEXT: movq %xmm3, %rbx 575; X64-NEXT: movq %rbx, %r13 576; X64-NEXT: sarq $63, %r13 577; X64-NEXT: shldq $31, %rbx, %r13 578; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] 579; X64-NEXT: pxor %xmm0, %xmm0 580; X64-NEXT: pcmpgtd %xmm1, %xmm0 581; X64-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] 582; X64-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill 583; X64-NEXT: movq %xmm1, %rdx 584; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill 585; X64-NEXT: movq %rdx, %r15 586; X64-NEXT: sarq $63, %r15 587; X64-NEXT: movq %rbx, %r12 588; X64-NEXT: shlq $31, %r12 589; X64-NEXT: movq %r12, %rdi 590; X64-NEXT: movq %r13, %rsi 591; X64-NEXT: movq %r15, %rcx 592; X64-NEXT: callq __divti3@PLT 593; X64-NEXT: movq %rax, %rbp 594; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill 595; X64-NEXT: movq %rdx, %r14 596; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill 597; X64-NEXT: subq $1, %rbp 598; X64-NEXT: sbbq $0, %r14 599; X64-NEXT: shrq $63, %rbx 600; X64-NEXT: xorl %r15d, %ebx 601; X64-NEXT: movq %r12, %rdi 602; X64-NEXT: movq %r13, %rsi 603; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload 604; X64-NEXT: movq %r15, %rcx 605; X64-NEXT: callq __modti3@PLT 606; X64-NEXT: orq %rax, %rdx 607; X64-NEXT: setne %al 608; X64-NEXT: testb %bl, %al 609; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload 610; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload 611; X64-NEXT: xorl %ecx, %ecx 612; X64-NEXT: movl $4294967295, %edx # imm = 0xFFFFFFFF 613; X64-NEXT: cmpq %rdx, %rbp 614; X64-NEXT: movq %r14, %rax 615; X64-NEXT: sbbq $0, %rax 616; X64-NEXT: cmovgeq %rcx, %r14 617; X64-NEXT: cmovgeq %rdx, %rbp 618; X64-NEXT: movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000 619; X64-NEXT: cmpq %rbp, %rcx 620; X64-NEXT: movq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill 621; X64-NEXT: movq $-1, %rax 622; X64-NEXT: sbbq %r14, %rax 623; X64-NEXT: cmovgeq %rcx, %rbp 624; X64-NEXT: movq %rbp, %xmm0 625; X64-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill 626; X64-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload 627; X64-NEXT: # xmm0 = mem[2,3,2,3] 628; X64-NEXT: movq %xmm0, %rbx 629; X64-NEXT: movq %rbx, %r13 630; X64-NEXT: sarq $63, %r13 631; X64-NEXT: shldq $31, %rbx, %r13 632; X64-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload 633; X64-NEXT: # xmm0 = mem[2,3,2,3] 634; X64-NEXT: movq %xmm0, %rdx 635; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill 636; X64-NEXT: movq %rdx, %r15 637; X64-NEXT: sarq $63, %r15 638; X64-NEXT: movq %rbx, %r12 639; X64-NEXT: shlq $31, %r12 640; X64-NEXT: movq %r12, %rdi 641; X64-NEXT: movq %r13, %rsi 642; X64-NEXT: movq %r15, %rcx 643; X64-NEXT: callq __divti3@PLT 644; X64-NEXT: movq %rax, %rbp 645; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill 646; X64-NEXT: movq %rdx, %r14 647; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill 648; X64-NEXT: subq $1, %rbp 649; X64-NEXT: sbbq $0, %r14 650; X64-NEXT: shrq $63, %rbx 651; X64-NEXT: xorl %r15d, %ebx 652; X64-NEXT: movq %r12, %rdi 653; X64-NEXT: movq %r13, %rsi 654; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload 655; X64-NEXT: movq %r15, %rcx 656; X64-NEXT: callq __modti3@PLT 657; X64-NEXT: orq %rax, %rdx 658; X64-NEXT: setne %al 659; X64-NEXT: testb %bl, %al 660; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload 661; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload 662; X64-NEXT: movl $4294967295, %ecx # imm = 0xFFFFFFFF 663; X64-NEXT: cmpq %rcx, %rbp 664; X64-NEXT: movq %r14, %rax 665; X64-NEXT: sbbq $0, %rax 666; X64-NEXT: movl $0, %eax 667; X64-NEXT: cmovgeq %rax, %r14 668; X64-NEXT: cmovgeq %rcx, %rbp 669; X64-NEXT: movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000 670; X64-NEXT: cmpq %rbp, %rcx 671; X64-NEXT: movq $-1, %rax 672; X64-NEXT: sbbq %r14, %rax 673; X64-NEXT: cmovgeq %rcx, %rbp 674; X64-NEXT: movq %rbp, %xmm0 675; X64-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload 676; X64-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] 677; X64-NEXT: psrlq $1, %xmm1 678; X64-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill 679; X64-NEXT: pxor %xmm0, %xmm0 680; X64-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload 681; X64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] 682; X64-NEXT: psrlq $31, %xmm0 683; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] 684; X64-NEXT: psrad $31, %xmm1 685; X64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] 686; X64-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill 687; X64-NEXT: movq %xmm0, %rbx 688; X64-NEXT: movq %rbx, %r13 689; X64-NEXT: sarq $63, %r13 690; X64-NEXT: shldq $31, %rbx, %r13 691; X64-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload 692; X64-NEXT: pxor %xmm1, %xmm1 693; X64-NEXT: pcmpgtd %xmm0, %xmm1 694; X64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] 695; X64-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill 696; X64-NEXT: movq %xmm0, %rdx 697; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill 698; X64-NEXT: movq %rdx, %r15 699; X64-NEXT: sarq $63, %r15 700; X64-NEXT: movq %rbx, %r12 701; X64-NEXT: shlq $31, %r12 702; X64-NEXT: movq %r12, %rdi 703; X64-NEXT: movq %r13, %rsi 704; X64-NEXT: movq %r15, %rcx 705; X64-NEXT: callq __divti3@PLT 706; X64-NEXT: movq %rax, %rbp 707; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill 708; X64-NEXT: movq %rdx, %r14 709; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill 710; X64-NEXT: subq $1, %rbp 711; X64-NEXT: sbbq $0, %r14 712; X64-NEXT: shrq $63, %rbx 713; X64-NEXT: xorl %r15d, %ebx 714; X64-NEXT: movq %r12, %rdi 715; X64-NEXT: movq %r13, %rsi 716; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload 717; X64-NEXT: movq %r15, %rcx 718; X64-NEXT: callq __modti3@PLT 719; X64-NEXT: orq %rax, %rdx 720; X64-NEXT: setne %al 721; X64-NEXT: testb %bl, %al 722; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload 723; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload 724; X64-NEXT: movl $4294967295, %ecx # imm = 0xFFFFFFFF 725; X64-NEXT: cmpq %rcx, %rbp 726; X64-NEXT: movq %r14, %rax 727; X64-NEXT: sbbq $0, %rax 728; X64-NEXT: movl $0, %eax 729; X64-NEXT: cmovgeq %rax, %r14 730; X64-NEXT: cmovgeq %rcx, %rbp 731; X64-NEXT: movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000 732; X64-NEXT: cmpq %rbp, %rcx 733; X64-NEXT: movq $-1, %rax 734; X64-NEXT: sbbq %r14, %rax 735; X64-NEXT: cmovgeq %rcx, %rbp 736; X64-NEXT: movq %rbp, %xmm0 737; X64-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill 738; X64-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload 739; X64-NEXT: # xmm0 = mem[2,3,2,3] 740; X64-NEXT: movq %xmm0, %rbx 741; X64-NEXT: movq %rbx, %r13 742; X64-NEXT: sarq $63, %r13 743; X64-NEXT: shldq $31, %rbx, %r13 744; X64-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload 745; X64-NEXT: # xmm0 = mem[2,3,2,3] 746; X64-NEXT: movq %xmm0, %rdx 747; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill 748; X64-NEXT: movq %rdx, %r15 749; X64-NEXT: sarq $63, %r15 750; X64-NEXT: movq %rbx, %r12 751; X64-NEXT: shlq $31, %r12 752; X64-NEXT: movq %r12, %rdi 753; X64-NEXT: movq %r13, %rsi 754; X64-NEXT: movq %r15, %rcx 755; X64-NEXT: callq __divti3@PLT 756; X64-NEXT: movq %rax, %rbp 757; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill 758; X64-NEXT: movq %rdx, %r14 759; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill 760; X64-NEXT: subq $1, %rbp 761; X64-NEXT: sbbq $0, %r14 762; X64-NEXT: shrq $63, %rbx 763; X64-NEXT: xorl %r15d, %ebx 764; X64-NEXT: movq %r12, %rdi 765; X64-NEXT: movq %r13, %rsi 766; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload 767; X64-NEXT: movq %r15, %rcx 768; X64-NEXT: callq __modti3@PLT 769; X64-NEXT: orq %rax, %rdx 770; X64-NEXT: setne %al 771; X64-NEXT: testb %bl, %al 772; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload 773; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload 774; X64-NEXT: movl $4294967295, %ecx # imm = 0xFFFFFFFF 775; X64-NEXT: cmpq %rcx, %rbp 776; X64-NEXT: movq %r14, %rax 777; X64-NEXT: sbbq $0, %rax 778; X64-NEXT: movl $0, %eax 779; X64-NEXT: cmovgeq %rax, %r14 780; X64-NEXT: cmovgeq %rcx, %rbp 781; X64-NEXT: movabsq $-4294967296, %rax # imm = 0xFFFFFFFF00000000 782; X64-NEXT: cmpq %rbp, %rax 783; X64-NEXT: sbbq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill 784; X64-NEXT: cmovgeq %rax, %rbp 785; X64-NEXT: movq %rbp, %xmm1 786; X64-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload 787; X64-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] 788; X64-NEXT: psrlq $1, %xmm0 789; X64-NEXT: shufps $136, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload 790; X64-NEXT: # xmm0 = xmm0[0,2],mem[0,2] 791; X64-NEXT: addq $120, %rsp 792; X64-NEXT: popq %rbx 793; X64-NEXT: popq %r12 794; X64-NEXT: popq %r13 795; X64-NEXT: popq %r14 796; X64-NEXT: popq %r15 797; X64-NEXT: popq %rbp 798; X64-NEXT: retq 799; 800; X86-LABEL: vec: 801; X86: # %bb.0: 802; X86-NEXT: pushl %ebp 803; X86-NEXT: movl %esp, %ebp 804; X86-NEXT: pushl %ebx 805; X86-NEXT: pushl %edi 806; X86-NEXT: pushl %esi 807; X86-NEXT: andl $-16, %esp 808; X86-NEXT: subl $208, %esp 809; X86-NEXT: movl 36(%ebp), %esi 810; X86-NEXT: movl 16(%ebp), %ebx 811; X86-NEXT: movl 32(%ebp), %eax 812; X86-NEXT: movl %eax, %edi 813; X86-NEXT: movl %eax, %ecx 814; X86-NEXT: sarl $31, %edi 815; X86-NEXT: movl %ebx, %edx 816; X86-NEXT: sarl $31, %edx 817; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 818; X86-NEXT: leal (%ebx,%ebx), %eax 819; X86-NEXT: shrl $31, %ebx 820; X86-NEXT: shldl $31, %eax, %ebx 821; X86-NEXT: leal {{[0-9]+}}(%esp), %eax 822; X86-NEXT: pushl %edi 823; X86-NEXT: pushl %edi 824; X86-NEXT: pushl %edi 825; X86-NEXT: pushl %ecx 826; X86-NEXT: pushl %edx 827; X86-NEXT: pushl %edx 828; X86-NEXT: pushl %ebx 829; X86-NEXT: pushl $0 830; X86-NEXT: pushl %eax 831; X86-NEXT: calll __modti3 832; X86-NEXT: addl $32, %esp 833; X86-NEXT: sarl $31, %esi 834; X86-NEXT: movl 20(%ebp), %ecx 835; X86-NEXT: movl %ecx, %eax 836; X86-NEXT: sarl $31, %eax 837; X86-NEXT: leal (%ecx,%ecx), %edx 838; X86-NEXT: shrl $31, %ecx 839; X86-NEXT: shldl $31, %edx, %ecx 840; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 841; X86-NEXT: leal {{[0-9]+}}(%esp), %edx 842; X86-NEXT: pushl %esi 843; X86-NEXT: pushl %esi 844; X86-NEXT: pushl %esi 845; X86-NEXT: pushl 36(%ebp) 846; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 847; X86-NEXT: pushl %eax 848; X86-NEXT: pushl %eax 849; X86-NEXT: pushl %ecx 850; X86-NEXT: pushl $0 851; X86-NEXT: pushl %edx 852; X86-NEXT: calll __divti3 853; X86-NEXT: addl $32, %esp 854; X86-NEXT: leal {{[0-9]+}}(%esp), %eax 855; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 856; X86-NEXT: pushl %edi 857; X86-NEXT: pushl %edi 858; X86-NEXT: pushl %edi 859; X86-NEXT: pushl 32(%ebp) 860; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload 861; X86-NEXT: pushl %ecx 862; X86-NEXT: pushl %ecx 863; X86-NEXT: pushl %ebx 864; X86-NEXT: pushl $0 865; X86-NEXT: pushl %eax 866; X86-NEXT: calll __divti3 867; X86-NEXT: addl $32, %esp 868; X86-NEXT: leal {{[0-9]+}}(%esp), %eax 869; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 870; X86-NEXT: pushl %esi 871; X86-NEXT: pushl %esi 872; X86-NEXT: pushl %esi 873; X86-NEXT: pushl 36(%ebp) 874; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload 875; X86-NEXT: pushl %edi 876; X86-NEXT: pushl %edi 877; X86-NEXT: pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload 878; X86-NEXT: pushl $0 879; X86-NEXT: pushl %eax 880; X86-NEXT: calll __modti3 881; X86-NEXT: addl $32, %esp 882; X86-NEXT: movl 28(%ebp), %edx 883; X86-NEXT: movl %edx, %ebx 884; X86-NEXT: sarl $31, %ebx 885; X86-NEXT: movl 12(%ebp), %ecx 886; X86-NEXT: movl %ecx, %edi 887; X86-NEXT: sarl $31, %edi 888; X86-NEXT: leal (%ecx,%ecx), %eax 889; X86-NEXT: shrl $31, %ecx 890; X86-NEXT: shldl $31, %eax, %ecx 891; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 892; X86-NEXT: leal {{[0-9]+}}(%esp), %eax 893; X86-NEXT: pushl %ebx 894; X86-NEXT: pushl %ebx 895; X86-NEXT: pushl %ebx 896; X86-NEXT: pushl %edx 897; X86-NEXT: pushl %edi 898; X86-NEXT: pushl %edi 899; X86-NEXT: pushl %ecx 900; X86-NEXT: pushl $0 901; X86-NEXT: pushl %eax 902; X86-NEXT: calll __modti3 903; X86-NEXT: addl $32, %esp 904; X86-NEXT: movl 40(%ebp), %esi 905; X86-NEXT: sarl $31, %esi 906; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 907; X86-NEXT: movl 24(%ebp), %ecx 908; X86-NEXT: movl %ecx, %eax 909; X86-NEXT: sarl $31, %eax 910; X86-NEXT: leal (%ecx,%ecx), %edx 911; X86-NEXT: shrl $31, %ecx 912; X86-NEXT: shldl $31, %edx, %ecx 913; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 914; X86-NEXT: leal {{[0-9]+}}(%esp), %edx 915; X86-NEXT: pushl %esi 916; X86-NEXT: pushl %esi 917; X86-NEXT: pushl %esi 918; X86-NEXT: pushl 40(%ebp) 919; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 920; X86-NEXT: pushl %eax 921; X86-NEXT: pushl %eax 922; X86-NEXT: pushl %ecx 923; X86-NEXT: pushl $0 924; X86-NEXT: pushl %edx 925; X86-NEXT: calll __divti3 926; X86-NEXT: addl $32, %esp 927; X86-NEXT: leal {{[0-9]+}}(%esp), %eax 928; X86-NEXT: pushl %ebx 929; X86-NEXT: pushl %ebx 930; X86-NEXT: pushl %ebx 931; X86-NEXT: pushl 28(%ebp) 932; X86-NEXT: pushl %edi 933; X86-NEXT: pushl %edi 934; X86-NEXT: pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload 935; X86-NEXT: pushl $0 936; X86-NEXT: pushl %eax 937; X86-NEXT: calll __divti3 938; X86-NEXT: addl $32, %esp 939; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 940; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 941; X86-NEXT: subl $1, %eax 942; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx 943; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 944; X86-NEXT: sbbl $0, %ecx 945; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 946; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx 947; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 948; X86-NEXT: sbbl $0, %ecx 949; X86-NEXT: movl {{[0-9]+}}(%esp), %edx 950; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 951; X86-NEXT: sbbl $0, %edx 952; X86-NEXT: testl %ebx, %ebx 953; X86-NEXT: sets %bl 954; X86-NEXT: testl %edi, %edi 955; X86-NEXT: sets %bh 956; X86-NEXT: xorb %bl, %bh 957; X86-NEXT: movl {{[0-9]+}}(%esp), %edi 958; X86-NEXT: orl {{[0-9]+}}(%esp), %edi 959; X86-NEXT: movl {{[0-9]+}}(%esp), %esi 960; X86-NEXT: orl {{[0-9]+}}(%esp), %esi 961; X86-NEXT: orl %edi, %esi 962; X86-NEXT: setne %bl 963; X86-NEXT: testb %bh, %bl 964; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload 965; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload 966; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload 967; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload 968; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload 969; X86-NEXT: xorl %ebx, %ebx 970; X86-NEXT: cmpl $-1, %eax 971; X86-NEXT: movl %edi, %esi 972; X86-NEXT: sbbl $0, %esi 973; X86-NEXT: movl %ecx, %esi 974; X86-NEXT: sbbl $0, %esi 975; X86-NEXT: movl %edx, %esi 976; X86-NEXT: sbbl $0, %esi 977; X86-NEXT: cmovgel %ebx, %edx 978; X86-NEXT: cmovgel %ebx, %ecx 979; X86-NEXT: cmovgel %ebx, %edi 980; X86-NEXT: movl $-1, %esi 981; X86-NEXT: cmovgel %esi, %eax 982; X86-NEXT: movl %eax, %esi 983; X86-NEXT: negl %esi 984; X86-NEXT: movl $-1, %esi 985; X86-NEXT: sbbl %edi, %esi 986; X86-NEXT: movl $-1, %esi 987; X86-NEXT: sbbl %ecx, %esi 988; X86-NEXT: movl $-1, %ecx 989; X86-NEXT: sbbl %edx, %ecx 990; X86-NEXT: cmovgel %ebx, %eax 991; X86-NEXT: movl $-1, %edx 992; X86-NEXT: cmovgel %edx, %edi 993; X86-NEXT: shldl $31, %eax, %edi 994; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 995; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 996; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 997; X86-NEXT: subl $1, %eax 998; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx 999; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 1000; X86-NEXT: sbbl $0, %ecx 1001; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 1002; X86-NEXT: movl {{[0-9]+}}(%esp), %edi 1003; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 1004; X86-NEXT: sbbl $0, %edi 1005; X86-NEXT: movl {{[0-9]+}}(%esp), %edx 1006; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 1007; X86-NEXT: sbbl $0, %edx 1008; X86-NEXT: cmpl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload 1009; X86-NEXT: sets %bl 1010; X86-NEXT: cmpl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload 1011; X86-NEXT: sets %bh 1012; X86-NEXT: xorb %bl, %bh 1013; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx 1014; X86-NEXT: orl {{[0-9]+}}(%esp), %ecx 1015; X86-NEXT: movl {{[0-9]+}}(%esp), %esi 1016; X86-NEXT: orl {{[0-9]+}}(%esp), %esi 1017; X86-NEXT: orl %ecx, %esi 1018; X86-NEXT: setne %cl 1019; X86-NEXT: testb %bh, %cl 1020; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload 1021; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload 1022; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload 1023; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload 1024; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload 1025; X86-NEXT: cmpl $-1, %eax 1026; X86-NEXT: movl %esi, %ecx 1027; X86-NEXT: sbbl $0, %ecx 1028; X86-NEXT: movl %edi, %ecx 1029; X86-NEXT: sbbl $0, %ecx 1030; X86-NEXT: movl %edx, %ecx 1031; X86-NEXT: sbbl $0, %ecx 1032; X86-NEXT: movl $0, %ecx 1033; X86-NEXT: cmovgel %ecx, %edx 1034; X86-NEXT: cmovgel %ecx, %edi 1035; X86-NEXT: cmovgel %ecx, %esi 1036; X86-NEXT: movl $-1, %ebx 1037; X86-NEXT: cmovgel %ebx, %eax 1038; X86-NEXT: movl %eax, %ecx 1039; X86-NEXT: negl %ecx 1040; X86-NEXT: movl $-1, %ecx 1041; X86-NEXT: sbbl %esi, %ecx 1042; X86-NEXT: movl $-1, %ecx 1043; X86-NEXT: sbbl %edi, %ecx 1044; X86-NEXT: movl $-1, %ecx 1045; X86-NEXT: sbbl %edx, %ecx 1046; X86-NEXT: movl $0, %ecx 1047; X86-NEXT: cmovgel %ecx, %eax 1048; X86-NEXT: cmovgel %ebx, %esi 1049; X86-NEXT: shldl $31, %eax, %esi 1050; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 1051; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 1052; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 1053; X86-NEXT: subl $1, %eax 1054; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx 1055; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 1056; X86-NEXT: sbbl $0, %ecx 1057; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 1058; X86-NEXT: movl {{[0-9]+}}(%esp), %edi 1059; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 1060; X86-NEXT: sbbl $0, %edi 1061; X86-NEXT: movl {{[0-9]+}}(%esp), %edx 1062; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 1063; X86-NEXT: sbbl $0, %edx 1064; X86-NEXT: cmpl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload 1065; X86-NEXT: sets %bl 1066; X86-NEXT: cmpl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload 1067; X86-NEXT: sets %bh 1068; X86-NEXT: xorb %bl, %bh 1069; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx 1070; X86-NEXT: orl {{[0-9]+}}(%esp), %ecx 1071; X86-NEXT: movl {{[0-9]+}}(%esp), %esi 1072; X86-NEXT: orl {{[0-9]+}}(%esp), %esi 1073; X86-NEXT: orl %ecx, %esi 1074; X86-NEXT: setne %cl 1075; X86-NEXT: testb %bh, %cl 1076; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload 1077; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload 1078; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload 1079; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload 1080; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload 1081; X86-NEXT: cmpl $-1, %eax 1082; X86-NEXT: movl %ebx, %ecx 1083; X86-NEXT: sbbl $0, %ecx 1084; X86-NEXT: movl %edi, %ecx 1085; X86-NEXT: sbbl $0, %ecx 1086; X86-NEXT: movl %edx, %ecx 1087; X86-NEXT: sbbl $0, %ecx 1088; X86-NEXT: movl $0, %ecx 1089; X86-NEXT: cmovgel %ecx, %edx 1090; X86-NEXT: cmovgel %ecx, %edi 1091; X86-NEXT: cmovgel %ecx, %ebx 1092; X86-NEXT: movl $-1, %esi 1093; X86-NEXT: cmovgel %esi, %eax 1094; X86-NEXT: movl %eax, %ecx 1095; X86-NEXT: negl %ecx 1096; X86-NEXT: movl $-1, %ecx 1097; X86-NEXT: sbbl %ebx, %ecx 1098; X86-NEXT: movl $-1, %ecx 1099; X86-NEXT: sbbl %edi, %ecx 1100; X86-NEXT: movl $-1, %ecx 1101; X86-NEXT: sbbl %edx, %ecx 1102; X86-NEXT: movl $0, %ecx 1103; X86-NEXT: cmovgel %ecx, %eax 1104; X86-NEXT: cmovgel %esi, %ebx 1105; X86-NEXT: shldl $31, %eax, %ebx 1106; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 1107; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx 1108; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 1109; X86-NEXT: subl $1, %ebx 1110; X86-NEXT: movl {{[0-9]+}}(%esp), %edi 1111; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 1112; X86-NEXT: sbbl $0, %edi 1113; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 1114; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 1115; X86-NEXT: sbbl $0, %eax 1116; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 1117; X86-NEXT: movl {{[0-9]+}}(%esp), %esi 1118; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill 1119; X86-NEXT: sbbl $0, %esi 1120; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload 1121; X86-NEXT: testl %ecx, %ecx 1122; X86-NEXT: sets %al 1123; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload 1124; X86-NEXT: testl %edx, %edx 1125; X86-NEXT: sets %ah 1126; X86-NEXT: xorb %al, %ah 1127; X86-NEXT: movb %ah, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill 1128; X86-NEXT: leal {{[0-9]+}}(%esp), %eax 1129; X86-NEXT: pushl %ecx 1130; X86-NEXT: pushl %ecx 1131; X86-NEXT: pushl %ecx 1132; X86-NEXT: pushl 40(%ebp) 1133; X86-NEXT: pushl %edx 1134; X86-NEXT: pushl %edx 1135; X86-NEXT: pushl {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload 1136; X86-NEXT: pushl $0 1137; X86-NEXT: pushl %eax 1138; X86-NEXT: calll __modti3 1139; X86-NEXT: addl $32, %esp 1140; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 1141; X86-NEXT: orl {{[0-9]+}}(%esp), %eax 1142; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx 1143; X86-NEXT: orl {{[0-9]+}}(%esp), %ecx 1144; X86-NEXT: orl %eax, %ecx 1145; X86-NEXT: setne %al 1146; X86-NEXT: testb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Reload 1147; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload 1148; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload 1149; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload 1150; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload 1151; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload 1152; X86-NEXT: cmpl $-1, %ebx 1153; X86-NEXT: movl %edi, %eax 1154; X86-NEXT: sbbl $0, %eax 1155; X86-NEXT: movl %ecx, %eax 1156; X86-NEXT: sbbl $0, %eax 1157; X86-NEXT: movl %esi, %eax 1158; X86-NEXT: sbbl $0, %eax 1159; X86-NEXT: movl $0, %eax 1160; X86-NEXT: cmovgel %eax, %esi 1161; X86-NEXT: cmovgel %eax, %ecx 1162; X86-NEXT: cmovgel %eax, %edi 1163; X86-NEXT: movl $-1, %edx 1164; X86-NEXT: cmovgel %edx, %ebx 1165; X86-NEXT: movl %ebx, %eax 1166; X86-NEXT: negl %eax 1167; X86-NEXT: movl $-1, %eax 1168; X86-NEXT: sbbl %edi, %eax 1169; X86-NEXT: movl $-1, %eax 1170; X86-NEXT: sbbl %ecx, %eax 1171; X86-NEXT: movl $-1, %eax 1172; X86-NEXT: sbbl %esi, %eax 1173; X86-NEXT: movl $0, %eax 1174; X86-NEXT: cmovgel %eax, %ebx 1175; X86-NEXT: cmovgel %edx, %edi 1176; X86-NEXT: shldl $31, %ebx, %edi 1177; X86-NEXT: movl 8(%ebp), %eax 1178; X86-NEXT: movl %edi, 12(%eax) 1179; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload 1180; X86-NEXT: movl %ecx, 8(%eax) 1181; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload 1182; X86-NEXT: movl %ecx, 4(%eax) 1183; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload 1184; X86-NEXT: movl %ecx, (%eax) 1185; X86-NEXT: leal -12(%ebp), %esp 1186; X86-NEXT: popl %esi 1187; X86-NEXT: popl %edi 1188; X86-NEXT: popl %ebx 1189; X86-NEXT: popl %ebp 1190; X86-NEXT: retl $4 1191 %tmp = call <4 x i32> @llvm.sdiv.fix.sat.v4i32(<4 x i32> %x, <4 x i32> %y, i32 31) 1192 ret <4 x i32> %tmp 1193} 1194