xref: /llvm-project/llvm/test/Instrumentation/MemorySanitizer/abs-vector.ll (revision 41d5033eb162cb92b684855166cabfa3983b74c6)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt %s -S -msan-check-access-address=0 -passes=msan 2>&1 | FileCheck %s
3; RUN: opt %s -S -msan-check-access-address=0 -msan-track-origins=2 -passes=msan 2>&1 | FileCheck %s --check-prefixes=CHECK,ORIGIN
4
5target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
6target triple = "x86_64-unknown-linux-gnu"
7
8define <4 x i64> @test_mm256_abs_epi8(<4 x i64> %a) local_unnamed_addr #0 {
9; CHECK-LABEL: @test_mm256_abs_epi8(
10; CHECK-NEXT:  entry:
11; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
12; ORIGIN-NEXT:   [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
13; CHECK:         call void @llvm.donothing()
14; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <32 x i8>
15; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <32 x i8>
16; CHECK-NEXT:    [[TMP4:%.*]] = tail call <32 x i8> @llvm.abs.v32i8(<32 x i8> [[TMP3]], i1 false)
17; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <32 x i8> [[TMP2]] to <4 x i64>
18; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <32 x i8> [[TMP4]] to <4 x i64>
19; CHECK-NEXT:    store <4 x i64> [[TMP5]], ptr @__msan_retval_tls, align 8
20; ORIGIN-NEXT:   store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
21; CHECK:         ret <4 x i64> [[TMP6]]
22;
23entry:
24  %0 = bitcast <4 x i64> %a to <32 x i8>
25  %1 = tail call <32 x i8> @llvm.abs.v32i8(<32 x i8> %0, i1 false)
26  %2 = bitcast <32 x i8> %1 to <4 x i64>
27  ret <4 x i64> %2
28}
29
30define <4 x i64> @test_mm256_abs_epi16(<4 x i64> %a) local_unnamed_addr #0 {
31; CHECK-LABEL: @test_mm256_abs_epi16(
32; CHECK-NEXT:  entry:
33; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
34; ORIGIN-NEXT:   [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
35; CHECK:         call void @llvm.donothing()
36; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <16 x i16>
37; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <16 x i16>
38; CHECK-NEXT:    [[TMP4:%.*]] = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> [[TMP3]], i1 false)
39; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <16 x i16> [[TMP2]] to <4 x i64>
40; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <16 x i16> [[TMP4]] to <4 x i64>
41; CHECK-NEXT:    store <4 x i64> [[TMP5]], ptr @__msan_retval_tls, align 8
42; ORIGIN-NEXT:   store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
43; CHECK:         ret <4 x i64> [[TMP6]]
44;
45entry:
46  %0 = bitcast <4 x i64> %a to <16 x i16>
47  %1 = tail call <16 x i16> @llvm.abs.v16i16(<16 x i16> %0, i1 false)
48  %2 = bitcast <16 x i16> %1 to <4 x i64>
49  ret <4 x i64> %2
50}
51
52define <4 x i64> @test_mm256_abs_epi32(<4 x i64> %a) local_unnamed_addr #0 {
53; CHECK-LABEL: @test_mm256_abs_epi32(
54; CHECK-NEXT:  entry:
55; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
56; ORIGIN-NEXT:   [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
57; CHECK:         call void @llvm.donothing()
58; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i64> [[TMP0]] to <8 x i32>
59; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x i64> [[A:%.*]] to <8 x i32>
60; CHECK-NEXT:    [[TMP4:%.*]] = tail call <8 x i32> @llvm.abs.v8i32(<8 x i32> [[TMP3]], i1 false)
61; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <8 x i32> [[TMP2]] to <4 x i64>
62; CHECK-NEXT:    [[TMP6:%.*]] = bitcast <8 x i32> [[TMP4]] to <4 x i64>
63; CHECK-NEXT:    store <4 x i64> [[TMP5]], ptr @__msan_retval_tls, align 8
64; ORIGIN-NEXT:   store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
65; CHECK:         ret <4 x i64> [[TMP6]]
66;
67entry:
68  %0 = bitcast <4 x i64> %a to <8 x i32>
69  %1 = tail call <8 x i32> @llvm.abs.v8i32(<8 x i32> %0, i1 false)
70  %2 = bitcast <8 x i32> %1 to <4 x i64>
71  ret <4 x i64> %2
72}
73
74define <4 x double> @test_fabs(<4 x double> %a) local_unnamed_addr #0 {
75; CHECK-LABEL: @test_fabs(
76; CHECK-NEXT:  entry:
77; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i64>, ptr @__msan_param_tls, align 8
78; ORIGIN-NEXT:   [[TMP1:%.*]] = load i32, ptr @__msan_param_origin_tls, align 4
79; CHECK:         call void @llvm.donothing()
80; CHECK-NEXT:    [[TMP2:%.*]] = tail call <4 x double> @llvm.fabs.v4f64(<4 x double> [[A:%.*]])
81; CHECK-NEXT:    store <4 x i64> [[TMP0]], ptr @__msan_retval_tls, align 8
82; ORIGIN-NEXT:   store i32 [[TMP1]], ptr @__msan_retval_origin_tls, align 4
83; CHECK:         ret <4 x double> [[TMP2]]
84;
85entry:
86  %0 = tail call <4 x double> @llvm.fabs.v4f64(<4 x double> %a)
87  ret <4 x double> %0
88}
89
90declare <32 x i8> @llvm.abs.v32i8(<32 x i8>, i1 immarg) #1
91declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1 immarg) #1
92declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1 immarg) #1
93declare <4 x double> @llvm.fabs.v4f64(<4 x double>) #1
94
95attributes #0 = { nounwind readnone sanitize_memory }
96attributes #1 = { nounwind readnone speculatable willreturn }
97
98!llvm.module.flags = !{!0}
99!llvm.ident = !{!1}
100
101!0 = !{i32 1, !"wchar_size", i32 4}
102!1 = !{!"clang version 12.0.0"}
103