xref: /llvm-project/llvm/test/CodeGen/AMDGPU/v_ashr_pk.ll (revision b80a157d12ebeebb85fa0a1e53beb5b46dc16d36)
1*b80a157dSMatt Arsenault; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
2*b80a157dSMatt Arsenault; RUN: llc -mtriple=amdgcn -mcpu=gfx950 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX950 %s
3*b80a157dSMatt Arsenaultdefine amdgpu_kernel void @v_ashr_pk_i8_i32(ptr addrspace(1) %out, i32 %src0, i32 %src1, i32 %src2) #0 {
4*b80a157dSMatt Arsenault; GFX950-LABEL: v_ashr_pk_i8_i32:
5*b80a157dSMatt Arsenault; GFX950:       ; %bb.0:
6*b80a157dSMatt Arsenault; GFX950-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x2c
7*b80a157dSMatt Arsenault; GFX950-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x24
8*b80a157dSMatt Arsenault; GFX950-NEXT:    v_mov_b32_e32 v1, 0xffffff80
9*b80a157dSMatt Arsenault; GFX950-NEXT:    v_mov_b32_e32 v2, 0x7f
10*b80a157dSMatt Arsenault; GFX950-NEXT:    v_mov_b32_e32 v0, 0
11*b80a157dSMatt Arsenault; GFX950-NEXT:    s_waitcnt lgkmcnt(0)
12*b80a157dSMatt Arsenault; GFX950-NEXT:    s_ashr_i32 s1, s1, s2
13*b80a157dSMatt Arsenault; GFX950-NEXT:    s_ashr_i32 s0, s0, s2
14*b80a157dSMatt Arsenault; GFX950-NEXT:    v_med3_i32 v3, s0, v1, v2
15*b80a157dSMatt Arsenault; GFX950-NEXT:    v_med3_i32 v1, s1, v1, v2
16*b80a157dSMatt Arsenault; GFX950-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
17*b80a157dSMatt Arsenault; GFX950-NEXT:    v_or_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
18*b80a157dSMatt Arsenault; GFX950-NEXT:    global_store_short v0, v1, s[6:7]
19*b80a157dSMatt Arsenault; GFX950-NEXT:    s_endpgm
20*b80a157dSMatt Arsenault  %insert.0 = insertelement <2 x i32> poison, i32 %src0, i64 0
21*b80a157dSMatt Arsenault  %build_vector = insertelement <2 x i32> %insert.0, i32 %src1, i64 1
22*b80a157dSMatt Arsenault  %src2.clamp = and i32 %src2, 31
23*b80a157dSMatt Arsenault  %insert.1 = insertelement <2 x i32> poison, i32 %src2.clamp, i64 0
24*b80a157dSMatt Arsenault  %src2.broadcast = shufflevector <2 x i32> %insert.1, <2 x i32> poison, <2 x i32> zeroinitializer
25*b80a157dSMatt Arsenault  %ashr = ashr <2 x i32> %build_vector, %src2.broadcast
26*b80a157dSMatt Arsenault  %sat.low = tail call <2 x i32> @llvm.smax.v2i32(<2 x i32> %ashr, <2 x i32> <i32 -128, i32 -128>)
27*b80a157dSMatt Arsenault  %sat.hi = tail call <2 x i32> @llvm.smin.v2i32(<2 x i32> %sat.low, <2 x i32> <i32 127, i32 127>)
28*b80a157dSMatt Arsenault  %trunc = trunc nsw <2 x i32> %sat.hi to <2 x i8>
29*b80a157dSMatt Arsenault  %ret = bitcast <2 x i8> %trunc to i16
30*b80a157dSMatt Arsenault  store i16 %ret, ptr addrspace(1) %out
31*b80a157dSMatt Arsenault  ret void
32*b80a157dSMatt Arsenault}
33*b80a157dSMatt Arsenault
34*b80a157dSMatt Arsenaultdefine amdgpu_kernel void @v_ashr_pk_u8_i32(ptr addrspace(1) %out, i32 %src0, i32 %src1, i32 %src2) #0 {
35*b80a157dSMatt Arsenault; GFX950-LABEL: v_ashr_pk_u8_i32:
36*b80a157dSMatt Arsenault; GFX950:       ; %bb.0:
37*b80a157dSMatt Arsenault; GFX950-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x2c
38*b80a157dSMatt Arsenault; GFX950-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x24
39*b80a157dSMatt Arsenault; GFX950-NEXT:    v_mov_b32_e32 v1, 0xff
40*b80a157dSMatt Arsenault; GFX950-NEXT:    v_mov_b32_e32 v0, 0
41*b80a157dSMatt Arsenault; GFX950-NEXT:    s_waitcnt lgkmcnt(0)
42*b80a157dSMatt Arsenault; GFX950-NEXT:    s_ashr_i32 s1, s1, s2
43*b80a157dSMatt Arsenault; GFX950-NEXT:    s_ashr_i32 s0, s0, s2
44*b80a157dSMatt Arsenault; GFX950-NEXT:    v_med3_i32 v2, s0, 0, v1
45*b80a157dSMatt Arsenault; GFX950-NEXT:    v_med3_i32 v1, s1, 0, v1
46*b80a157dSMatt Arsenault; GFX950-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
47*b80a157dSMatt Arsenault; GFX950-NEXT:    v_or_b32_e32 v1, v2, v1
48*b80a157dSMatt Arsenault; GFX950-NEXT:    global_store_short v0, v1, s[6:7]
49*b80a157dSMatt Arsenault; GFX950-NEXT:    s_endpgm
50*b80a157dSMatt Arsenault  %insert.0 = insertelement <2 x i32> poison, i32 %src0, i64 0
51*b80a157dSMatt Arsenault  %build_vector = insertelement <2 x i32> %insert.0, i32 %src1, i64 1
52*b80a157dSMatt Arsenault  %src2.clamp = and i32 %src2, 31
53*b80a157dSMatt Arsenault  %insert.1 = insertelement <2 x i32> poison, i32 %src2.clamp, i64 0
54*b80a157dSMatt Arsenault  %src2.broadcast = shufflevector <2 x i32> %insert.1, <2 x i32> poison, <2 x i32> zeroinitializer
55*b80a157dSMatt Arsenault  %ashr = ashr <2 x i32> %build_vector, %src2.broadcast
56*b80a157dSMatt Arsenault  %sat.low = tail call <2 x i32> @llvm.smax.v2i32(<2 x i32> %ashr, <2 x i32> <i32 0, i32 0>)
57*b80a157dSMatt Arsenault  %sat.hi = tail call <2 x i32> @llvm.smin.v2i32(<2 x i32> %sat.low, <2 x i32> <i32 255, i32 255>)
58*b80a157dSMatt Arsenault  %trunc = trunc nsw <2 x i32> %sat.hi to <2 x i8>
59*b80a157dSMatt Arsenault  %ret = bitcast <2 x i8> %trunc to i16
60*b80a157dSMatt Arsenault  store i16 %ret, ptr addrspace(1) %out
61*b80a157dSMatt Arsenault  ret void
62*b80a157dSMatt Arsenault}
63