1; RUN: opt < %s -dfsan -S | FileCheck %s --check-prefixes=CHECK,LEGACY 2; RUN: opt < %s -dfsan -dfsan-fast-16-labels=true -dfsan-event-callbacks=true -S | FileCheck %s --check-prefixes=CHECK,EVENT_CALLBACKS 3; RUN: opt < %s -dfsan -dfsan-args-abi -S | FileCheck %s --check-prefixes=CHECK,ARGS_ABI 4; RUN: opt < %s -dfsan -dfsan-fast-16-labels=true -S | FileCheck %s --check-prefixes=CHECK,FAST16 5; RUN: opt < %s -dfsan -dfsan-fast-16-labels=true -dfsan-combine-pointer-labels-on-load=false -S | FileCheck %s --check-prefixes=CHECK,NO_COMBINE_LOAD_PTR 6; RUN: opt < %s -dfsan -dfsan-fast-16-labels=true -dfsan-combine-pointer-labels-on-store=true -S | FileCheck %s --check-prefixes=CHECK,COMBINE_STORE_PTR 7; RUN: opt < %s -dfsan -dfsan-fast-16-labels=true -dfsan-debug-nonzero-labels -S | FileCheck %s --check-prefixes=CHECK,DEBUG_NONZERO_LABELS 8target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" 9target triple = "x86_64-unknown-linux-gnu" 10 11; CHECK: @__dfsan_arg_tls = external thread_local(initialexec) global [[TLS_ARR:\[100 x i64\]]] 12; CHECK: @__dfsan_retval_tls = external thread_local(initialexec) global [[TLS_ARR]] 13; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]] 14; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]] 15 16define [4 x i8] @pass_array([4 x i8] %a) { 17 ; NO_COMBINE_LOAD_PTR: @"dfs$pass_array" 18 ; NO_COMBINE_LOAD_PTR: %1 = load [4 x i[[#SBITS]]], [4 x i[[#SBITS]]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to [4 x i[[#SBITS]]]*), align [[ALIGN:2]] 19 ; NO_COMBINE_LOAD_PTR: store [4 x i[[#SBITS]]] %1, [4 x i[[#SBITS]]]* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to [4 x i[[#SBITS]]]*), align [[ALIGN]] 20 21 ; ARGS_ABI: @"dfs$pass_array" 22 ; ARGS_ABI: ret { [4 x i8], i[[#SBITS]] } 23 24 ; DEBUG_NONZERO_LABELS: @"dfs$pass_array" 25 ; DEBUG_NONZERO_LABELS: [[L:%.*]] = load [4 x i[[#SBITS]]], [4 x i[[#SBITS]]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to [4 x i[[#SBITS]]]*), align [[ALIGN:2]] 26 ; DEBUG_NONZERO_LABELS: [[L0:%.*]] = extractvalue [4 x i[[#SBITS]]] [[L]], 0 27 ; DEBUG_NONZERO_LABELS: [[L1:%.*]] = extractvalue [4 x i[[#SBITS]]] [[L]], 1 28 ; DEBUG_NONZERO_LABELS: [[L01:%.*]] = or i[[#SBITS]] [[L0]], [[L1]] 29 ; DEBUG_NONZERO_LABELS: [[L2:%.*]] = extractvalue [4 x i[[#SBITS]]] [[L]], 2 30 ; DEBUG_NONZERO_LABELS: [[L012:%.*]] = or i[[#SBITS]] [[L01]], [[L2]] 31 ; DEBUG_NONZERO_LABELS: [[L3:%.*]] = extractvalue [4 x i[[#SBITS]]] [[L]], 3 32 ; DEBUG_NONZERO_LABELS: [[L0123:%.*]] = or i[[#SBITS]] [[L012]], [[L3]] 33 ; DEBUG_NONZERO_LABELS: {{.*}} = icmp ne i[[#SBITS]] [[L0123]], 0 34 ; DEBUG_NONZERO_LABELS: call void @__dfsan_nonzero_label() 35 36 ret [4 x i8] %a 37} 38 39%ArrayOfStruct = type [4 x {i8*, i32}] 40 41define %ArrayOfStruct @pass_array_of_struct(%ArrayOfStruct %as) { 42 ; NO_COMBINE_LOAD_PTR: @"dfs$pass_array_of_struct" 43 ; NO_COMBINE_LOAD_PTR: %1 = load [4 x { i[[#SBITS]], i[[#SBITS]] }], [4 x { i[[#SBITS]], i[[#SBITS]] }]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to [4 x { i[[#SBITS]], i[[#SBITS]] }]*), align [[ALIGN:2]] 44 ; NO_COMBINE_LOAD_PTR: store [4 x { i[[#SBITS]], i[[#SBITS]] }] %1, [4 x { i[[#SBITS]], i[[#SBITS]] }]* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to [4 x { i[[#SBITS]], i[[#SBITS]] }]*), align [[ALIGN]] 45 46 ; ARGS_ABI: @"dfs$pass_array_of_struct" 47 ; ARGS_ABI: ret { [4 x { i8*, i32 }], i[[#SBITS]] } 48 ret %ArrayOfStruct %as 49} 50 51define [4 x i1]* @alloca_ret_array() { 52 ; NO_COMBINE_LOAD_PTR: @"dfs$alloca_ret_array" 53 ; NO_COMBINE_LOAD_PTR: store i[[#SBITS]] 0, i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to i[[#SBITS]]*), align 2 54 %p = alloca [4 x i1] 55 ret [4 x i1]* %p 56} 57 58define [4 x i1] @load_alloca_array() { 59 ; NO_COMBINE_LOAD_PTR-LABEL: @"dfs$load_alloca_array" 60 ; NO_COMBINE_LOAD_PTR-NEXT: %[[#R:]] = alloca i[[#SBITS]], align [[#SBYTES]] 61 ; NO_COMBINE_LOAD_PTR-NEXT: %p = alloca [4 x i1] 62 ; NO_COMBINE_LOAD_PTR-NEXT: %[[#R+1]] = load i[[#SBITS]], i[[#SBITS]]* %[[#R]], align [[#SBYTES]] 63 ; NO_COMBINE_LOAD_PTR-NEXT: %[[#R+2]] = insertvalue [4 x i[[#SBITS]]] undef, i[[#SBITS]] %[[#R+1]], 0 64 ; NO_COMBINE_LOAD_PTR-NEXT: %[[#R+3]] = insertvalue [4 x i[[#SBITS]]] %[[#R+2]], i[[#SBITS]] %[[#R+1]], 1 65 ; NO_COMBINE_LOAD_PTR-NEXT: %[[#R+4]] = insertvalue [4 x i[[#SBITS]]] %[[#R+3]], i[[#SBITS]] %[[#R+1]], 2 66 ; NO_COMBINE_LOAD_PTR-NEXT: %[[#R+5]] = insertvalue [4 x i[[#SBITS]]] %[[#R+4]], i[[#SBITS]] %[[#R+1]], 3 67 ; NO_COMBINE_LOAD_PTR-NEXT: %a = load [4 x i1], [4 x i1]* %p 68 ; NO_COMBINE_LOAD_PTR-NEXT: store [4 x i[[#SBITS]]] %[[#R+5]], [4 x i[[#SBITS]]]* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to [4 x i[[#SBITS]]]*), align 2 69 ; NO_COMBINE_LOAD_PTR-NEXT: ret [4 x i1] %a 70 71 %p = alloca [4 x i1] 72 %a = load [4 x i1], [4 x i1]* %p 73 ret [4 x i1] %a 74} 75 76define [0 x i1] @load_array0([0 x i1]* %p) { 77 ; NO_COMBINE_LOAD_PTR: @"dfs$load_array0" 78 ; NO_COMBINE_LOAD_PTR: store [0 x i[[#SBITS]]] zeroinitializer, [0 x i[[#SBITS]]]* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to [0 x i[[#SBITS]]]*), align 2 79 %a = load [0 x i1], [0 x i1]* %p 80 ret [0 x i1] %a 81} 82 83define [1 x i1] @load_array1([1 x i1]* %p) { 84 ; NO_COMBINE_LOAD_PTR: @"dfs$load_array1" 85 ; NO_COMBINE_LOAD_PTR: [[L:%.*]] = load i[[#SBITS]], 86 ; NO_COMBINE_LOAD_PTR: [[S:%.*]] = insertvalue [1 x i[[#SBITS]]] undef, i[[#SBITS]] [[L]], 0 87 ; NO_COMBINE_LOAD_PTR: store [1 x i[[#SBITS]]] [[S]], [1 x i[[#SBITS]]]* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to [1 x i[[#SBITS]]]*), align 2 88 89 ; EVENT_CALLBACKS: @"dfs$load_array1" 90 ; EVENT_CALLBACKS: [[L:%.*]] = or i[[#SBITS]] 91 ; EVENT_CALLBACKS: call void @__dfsan_load_callback(i[[#SBITS]] [[L]], i8* {{.*}}) 92 93 ; FAST16: @"dfs$load_array1" 94 ; FAST16: [[P:%.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN:2]] 95 ; FAST16: [[L:%.*]] = load i[[#SBITS]], i[[#SBITS]]* {{.*}}, align [[#SBYTES]] 96 ; FAST16: [[U:%.*]] = or i[[#SBITS]] [[L]], [[P]] 97 ; FAST16: [[S1:%.*]] = insertvalue [1 x i[[#SBITS]]] undef, i[[#SBITS]] [[U]], 0 98 ; FAST16: store [1 x i[[#SBITS]]] [[S1]], [1 x i[[#SBITS]]]* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to [1 x i[[#SBITS]]]*), align [[ALIGN]] 99 100 ; LEGACY: @"dfs$load_array1" 101 ; LEGACY: [[P:%.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN:2]] 102 ; LEGACY: [[L:%.*]] = load i[[#SBITS]], i[[#SBITS]]* {{.*}}, align [[#SBYTES]] 103 ; LEGACY: [[U:%.*]] = call zeroext i[[#SBITS]] @__dfsan_union(i[[#SBITS]] zeroext [[L]], i[[#SBITS]] zeroext [[P]]) 104 ; LEGACY: [[PH:%.*]] = phi i[[#SBITS]] [ [[U]], {{.*}} ], [ [[L]], {{.*}} ] 105 ; LEGACY: store i[[#SBITS]] [[PH]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]] 106 107 %a = load [1 x i1], [1 x i1]* %p 108 ret [1 x i1] %a 109} 110 111define [2 x i1] @load_array2([2 x i1]* %p) { 112 ; NO_COMBINE_LOAD_PTR: @"dfs$load_array2" 113 ; NO_COMBINE_LOAD_PTR: [[P1:%.*]] = getelementptr i[[#SBITS]], i[[#SBITS]]* [[P0:%.*]], i64 1 114 ; NO_COMBINE_LOAD_PTR-DAG: [[E1:%.*]] = load i[[#SBITS]], i[[#SBITS]]* [[P1]], align [[#SBYTES]] 115 ; NO_COMBINE_LOAD_PTR-DAG: [[E0:%.*]] = load i[[#SBITS]], i[[#SBITS]]* [[P0]], align [[#SBYTES]] 116 ; NO_COMBINE_LOAD_PTR: [[U:%.*]] = or i[[#SBITS]] [[E0]], [[E1]] 117 ; NO_COMBINE_LOAD_PTR: [[S1:%.*]] = insertvalue [2 x i[[#SBITS]]] undef, i[[#SBITS]] [[U]], 0 118 ; NO_COMBINE_LOAD_PTR: [[S2:%.*]] = insertvalue [2 x i[[#SBITS]]] [[S1]], i[[#SBITS]] [[U]], 1 119 ; NO_COMBINE_LOAD_PTR: store [2 x i[[#SBITS]]] [[S2]], [2 x i[[#SBITS]]]* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to [2 x i[[#SBITS]]]*), align [[ALIGN:2]] 120 121 ; EVENT_CALLBACKS: @"dfs$load_array2" 122 ; EVENT_CALLBACKS: [[O1:%.*]] = or i[[#SBITS]] 123 ; EVENT_CALLBACKS: [[O2:%.*]] = or i[[#SBITS]] [[O1]] 124 ; EVENT_CALLBACKS: call void @__dfsan_load_callback(i[[#SBITS]] [[O2]], i8* {{.*}}) 125 126 ; FAST16: @"dfs$load_array2" 127 ; FAST16: [[P:%.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN:2]] 128 ; FAST16: [[O:%.*]] = or i[[#SBITS]] 129 ; FAST16: [[U:%.*]] = or i[[#SBITS]] [[O]], [[P]] 130 ; FAST16: [[S:%.*]] = insertvalue [2 x i[[#SBITS]]] undef, i[[#SBITS]] [[U]], 0 131 ; FAST16: [[S1:%.*]] = insertvalue [2 x i[[#SBITS]]] [[S]], i[[#SBITS]] [[U]], 1 132 ; FAST16: store [2 x i[[#SBITS]]] [[S1]], [2 x i[[#SBITS]]]* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to [2 x i[[#SBITS]]]*), align [[ALIGN]] 133 %a = load [2 x i1], [2 x i1]* %p 134 ret [2 x i1] %a 135} 136 137define [4 x i1] @load_array4([4 x i1]* %p) { 138 ; NO_COMBINE_LOAD_PTR: @"dfs$load_array4" 139 ; NO_COMBINE_LOAD_PTR: [[T:%.*]] = trunc i[[#mul(4, SBITS)]] {{.*}} to i[[#SBITS]] 140 ; NO_COMBINE_LOAD_PTR: [[S1:%.*]] = insertvalue [4 x i[[#SBITS]]] undef, i[[#SBITS]] [[T]], 0 141 ; NO_COMBINE_LOAD_PTR: [[S2:%.*]] = insertvalue [4 x i[[#SBITS]]] [[S1]], i[[#SBITS]] [[T]], 1 142 ; NO_COMBINE_LOAD_PTR: [[S3:%.*]] = insertvalue [4 x i[[#SBITS]]] [[S2]], i[[#SBITS]] [[T]], 2 143 ; NO_COMBINE_LOAD_PTR: [[S4:%.*]] = insertvalue [4 x i[[#SBITS]]] [[S3]], i[[#SBITS]] [[T]], 3 144 ; NO_COMBINE_LOAD_PTR: store [4 x i[[#SBITS]]] [[S4]], [4 x i[[#SBITS]]]* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to [4 x i[[#SBITS]]]*), align 2 145 146 ; EVENT_CALLBACKS: @"dfs$load_array4" 147 ; EVENT_CALLBACKS: [[O0:%.*]] = or i[[#mul(4, SBITS)]] 148 ; EVENT_CALLBACKS: [[O1:%.*]] = or i[[#mul(4, SBITS)]] [[O0]] 149 ; EVENT_CALLBACKS: [[O2:%.*]] = trunc i[[#mul(4, SBITS)]] [[O1]] to i[[#SBITS]] 150 ; EVENT_CALLBACKS: [[O3:%.*]] = or i[[#SBITS]] [[O2]] 151 ; EVENT_CALLBACKS: call void @__dfsan_load_callback(i[[#SBITS]] [[O3]], i8* {{.*}}) 152 153 ; FAST16: @"dfs$load_array4" 154 ; FAST16: [[T:%.*]] = trunc i[[#mul(4, SBITS)]] {{.*}} to i[[#SBITS]] 155 ; FAST16: [[O:%.*]] = or i[[#SBITS]] [[T]] 156 ; FAST16: [[S1:%.*]] = insertvalue [4 x i[[#SBITS]]] undef, i[[#SBITS]] [[O]], 0 157 ; FAST16: [[S2:%.*]] = insertvalue [4 x i[[#SBITS]]] [[S1]], i[[#SBITS]] [[O]], 1 158 ; FAST16: [[S3:%.*]] = insertvalue [4 x i[[#SBITS]]] [[S2]], i[[#SBITS]] [[O]], 2 159 ; FAST16: [[S4:%.*]] = insertvalue [4 x i[[#SBITS]]] [[S3]], i[[#SBITS]] [[O]], 3 160 ; FAST16: store [4 x i[[#SBITS]]] [[S4]], [4 x i[[#SBITS]]]* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to [4 x i[[#SBITS]]]*), align 2 161 162 ; LEGACY: @"dfs$load_array4" 163 ; LEGACY: [[P:%.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN:2]] 164 ; LEGACY: [[PH1:%.*]] = phi i[[#SBITS]] 165 ; LEGACY: [[U:%.*]] = call zeroext i[[#SBITS]] @__dfsan_union(i[[#SBITS]] zeroext [[PH1]], i[[#SBITS]] zeroext [[P]]) 166 ; LEGACY: [[PH:%.*]] = phi i[[#SBITS]] [ [[U]], {{.*}} ], [ [[PH1]], {{.*}} ] 167 ; LEGACY: store i[[#SBITS]] [[PH]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to i[[#SBITS]]*), align [[ALIGN]] 168 169 %a = load [4 x i1], [4 x i1]* %p 170 ret [4 x i1] %a 171} 172 173define i1 @extract_array([4 x i1] %a) { 174 ; NO_COMBINE_LOAD_PTR: @"dfs$extract_array" 175 ; NO_COMBINE_LOAD_PTR: [[AM:%.*]] = load [4 x i[[#SBITS]]], [4 x i[[#SBITS]]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to [4 x i[[#SBITS]]]*), align [[ALIGN:2]] 176 ; NO_COMBINE_LOAD_PTR: [[EM:%.*]] = extractvalue [4 x i[[#SBITS]]] [[AM]], 2 177 ; NO_COMBINE_LOAD_PTR: store i[[#SBITS]] [[EM]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to i[[#SBITS]]*), align 2 178 %e2 = extractvalue [4 x i1] %a, 2 179 ret i1 %e2 180} 181 182define [4 x i1] @insert_array([4 x i1] %a, i1 %e2) { 183 ; NO_COMBINE_LOAD_PTR: @"dfs$insert_array" 184 ; NO_COMBINE_LOAD_PTR: [[EM:%.*]] = load i[[#SBITS]], i[[#SBITS]]* 185 ; NO_COMBINE_LOAD_PTR-SAME: inttoptr (i64 add (i64 ptrtoint ([[TLS_ARR]]* @__dfsan_arg_tls to i64), i64 [[#mul(4, SBYTES)]]) to i[[#SBITS]]*), align [[ALIGN:2]] 186 ; NO_COMBINE_LOAD_PTR: [[AM:%.*]] = load [4 x i[[#SBITS]]], [4 x i[[#SBITS]]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to [4 x i[[#SBITS]]]*), align [[ALIGN]] 187 ; NO_COMBINE_LOAD_PTR: [[AM1:%.*]] = insertvalue [4 x i[[#SBITS]]] [[AM]], i[[#SBITS]] [[EM]], 0 188 ; NO_COMBINE_LOAD_PTR: store [4 x i[[#SBITS]]] [[AM1]], [4 x i[[#SBITS]]]* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to [4 x i[[#SBITS]]]*), align [[ALIGN]] 189 %a1 = insertvalue [4 x i1] %a, i1 %e2, 0 190 ret [4 x i1] %a1 191} 192 193define void @store_alloca_array([4 x i1] %a) { 194 ; FAST16: @"dfs$store_alloca_array" 195 ; FAST16: [[S:%.*]] = load [4 x i[[#SBITS]]], [4 x i[[#SBITS]]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to [4 x i[[#SBITS]]]*), align [[ALIGN:2]] 196 ; FAST16: [[SP:%.*]] = alloca i[[#SBITS]], align [[#SBYTES]] 197 ; FAST16: [[E0:%.*]] = extractvalue [4 x i[[#SBITS]]] [[S]], 0 198 ; FAST16: [[E1:%.*]] = extractvalue [4 x i[[#SBITS]]] [[S]], 1 199 ; FAST16: [[E01:%.*]] = or i[[#SBITS]] [[E0]], [[E1]] 200 ; FAST16: [[E2:%.*]] = extractvalue [4 x i[[#SBITS]]] [[S]], 2 201 ; FAST16: [[E012:%.*]] = or i[[#SBITS]] [[E01]], [[E2]] 202 ; FAST16: [[E3:%.*]] = extractvalue [4 x i[[#SBITS]]] [[S]], 3 203 ; FAST16: [[E0123:%.*]] = or i[[#SBITS]] [[E012]], [[E3]] 204 ; FAST16: store i[[#SBITS]] [[E0123]], i[[#SBITS]]* [[SP]], align [[#SBYTES]] 205 %p = alloca [4 x i1] 206 store [4 x i1] %a, [4 x i1]* %p 207 ret void 208} 209 210define void @store_zero_array([4 x i1]* %p) { 211 ; FAST16: @"dfs$store_zero_array" 212 ; FAST16: store i[[#mul(4, SBITS)]] 0, i[[#mul(4, SBITS)]]* {{.*}} 213 store [4 x i1] zeroinitializer, [4 x i1]* %p 214 ret void 215} 216 217define void @store_array2([2 x i1] %a, [2 x i1]* %p) { 218 ; LEGACY: @"dfs$store_array2" 219 ; LEGACY: [[S:%.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN:2]] 220 ; LEGACY: [[SP0:%.*]] = getelementptr i[[#SBITS]], i[[#SBITS]]* [[SP:%.*]], i32 0 221 ; LEGACY: store i[[#SBITS]] [[S]], i[[#SBITS]]* [[SP0]], align [[#SBYTES]] 222 ; LEGACY: [[SP1:%.*]] = getelementptr i[[#SBITS]], i[[#SBITS]]* [[SP]], i32 1 223 ; LEGACY: store i[[#SBITS]] [[S]], i[[#SBITS]]* [[SP1]], align [[#SBYTES]] 224 225 ; EVENT_CALLBACKS: @"dfs$store_array2" 226 ; EVENT_CALLBACKS: [[E12:%.*]] = or i[[#SBITS]] 227 ; EVENT_CALLBACKS: [[P:%.*]] = bitcast [2 x i1]* %p to i8* 228 ; EVENT_CALLBACKS: call void @__dfsan_store_callback(i[[#SBITS]] [[E12]], i8* [[P]]) 229 230 ; FAST16: @"dfs$store_array2" 231 ; FAST16: [[S:%.*]] = load [2 x i[[#SBITS]]], [2 x i[[#SBITS]]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to [2 x i[[#SBITS]]]*), align [[ALIGN:2]] 232 ; FAST16: [[E1:%.*]] = extractvalue [2 x i[[#SBITS]]] [[S]], 0 233 ; FAST16: [[E2:%.*]] = extractvalue [2 x i[[#SBITS]]] [[S]], 1 234 ; FAST16: [[E12:%.*]] = or i[[#SBITS]] [[E1]], [[E2]] 235 ; FAST16: [[SP0:%.*]] = getelementptr i[[#SBITS]], i[[#SBITS]]* [[SP:%.*]], i32 0 236 ; FAST16: store i[[#SBITS]] [[E12]], i[[#SBITS]]* [[SP0]], align [[#SBYTES]] 237 ; FAST16: [[SP1:%.*]] = getelementptr i[[#SBITS]], i[[#SBITS]]* [[SP]], i32 1 238 ; FAST16: store i[[#SBITS]] [[E12]], i[[#SBITS]]* [[SP1]], align [[#SBYTES]] 239 240 ; COMBINE_STORE_PTR: @"dfs$store_array2" 241 ; COMBINE_STORE_PTR: [[O:%.*]] = or i[[#SBITS]] 242 ; COMBINE_STORE_PTR: [[U:%.*]] = or i[[#SBITS]] [[O]] 243 ; COMBINE_STORE_PTR: [[P1:%.*]] = getelementptr i[[#SBITS]], i[[#SBITS]]* [[P:%.*]], i32 0 244 ; COMBINE_STORE_PTR: store i[[#SBITS]] [[U]], i[[#SBITS]]* [[P1]], align [[#SBYTES]] 245 ; COMBINE_STORE_PTR: [[P2:%.*]] = getelementptr i[[#SBITS]], i[[#SBITS]]* [[P]], i32 1 246 ; COMBINE_STORE_PTR: store i[[#SBITS]] [[U]], i[[#SBITS]]* [[P2]], align [[#SBYTES]] 247 248 store [2 x i1] %a, [2 x i1]* %p 249 ret void 250} 251 252define void @store_array17([17 x i1] %a, [17 x i1]* %p) { 253 ; FAST16: @"dfs$store_array17" 254 ; FAST16: %[[#R:]] = load [17 x i[[#SBITS]]], [17 x i[[#SBITS]]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to [17 x i[[#SBITS]]]*), align 2 255 ; FAST16: %[[#R+1]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 0 256 ; FAST16: %[[#R+2]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 1 257 ; FAST16: %[[#R+3]] = or i[[#SBITS]] %[[#R+1]], %[[#R+2]] 258 ; FAST16: %[[#R+4]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 2 259 ; FAST16: %[[#R+5]] = or i[[#SBITS]] %[[#R+3]], %[[#R+4]] 260 ; FAST16: %[[#R+6]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 3 261 ; FAST16: %[[#R+7]] = or i[[#SBITS]] %[[#R+5]], %[[#R+6]] 262 ; FAST16: %[[#R+8]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 4 263 ; FAST16: %[[#R+9]] = or i[[#SBITS]] %[[#R+7]], %[[#R+8]] 264 ; FAST16: %[[#R+10]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 5 265 ; FAST16: %[[#R+11]] = or i[[#SBITS]] %[[#R+9]], %[[#R+10]] 266 ; FAST16: %[[#R+12]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 6 267 ; FAST16: %[[#R+13]] = or i[[#SBITS]] %[[#R+11]], %[[#R+12]] 268 ; FAST16: %[[#R+14]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 7 269 ; FAST16: %[[#R+15]] = or i[[#SBITS]] %[[#R+13]], %[[#R+14]] 270 ; FAST16: %[[#R+16]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 8 271 ; FAST16: %[[#R+17]] = or i[[#SBITS]] %[[#R+15]], %[[#R+16]] 272 ; FAST16: %[[#R+18]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 9 273 ; FAST16: %[[#R+19]] = or i[[#SBITS]] %[[#R+17]], %[[#R+18]] 274 ; FAST16: %[[#R+20]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 10 275 ; FAST16: %[[#R+21]] = or i[[#SBITS]] %[[#R+19]], %[[#R+20]] 276 ; FAST16: %[[#R+22]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 11 277 ; FAST16: %[[#R+23]] = or i[[#SBITS]] %[[#R+21]], %[[#R+22]] 278 ; FAST16: %[[#R+24]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 12 279 ; FAST16: %[[#R+25]] = or i[[#SBITS]] %[[#R+23]], %[[#R+24]] 280 ; FAST16: %[[#R+26]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 13 281 ; FAST16: %[[#R+27]] = or i[[#SBITS]] %[[#R+25]], %[[#R+26]] 282 ; FAST16: %[[#R+28]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 14 283 ; FAST16: %[[#R+29]] = or i[[#SBITS]] %[[#R+27]], %[[#R+28]] 284 ; FAST16: %[[#R+30]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 15 285 ; FAST16: %[[#R+31]] = or i[[#SBITS]] %[[#R+29]], %[[#R+30]] 286 ; FAST16: %[[#R+32]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 16 287 ; FAST16: %[[#R+33]] = or i[[#SBITS]] %[[#R+31]], %[[#R+32]] 288 ; FAST16: %[[#VREG:]] = insertelement <8 x i[[#SBITS]]> undef, i[[#SBITS]] %[[#R+33]], i32 0 289 ; FAST16: %[[#VREG+1]] = insertelement <8 x i[[#SBITS]]> %[[#VREG]], i[[#SBITS]] %[[#R+33]], i32 1 290 ; FAST16: %[[#VREG+2]] = insertelement <8 x i[[#SBITS]]> %[[#VREG+1]], i[[#SBITS]] %[[#R+33]], i32 2 291 ; FAST16: %[[#VREG+3]] = insertelement <8 x i[[#SBITS]]> %[[#VREG+2]], i[[#SBITS]] %[[#R+33]], i32 3 292 ; FAST16: %[[#VREG+4]] = insertelement <8 x i[[#SBITS]]> %[[#VREG+3]], i[[#SBITS]] %[[#R+33]], i32 4 293 ; FAST16: %[[#VREG+5]] = insertelement <8 x i[[#SBITS]]> %[[#VREG+4]], i[[#SBITS]] %[[#R+33]], i32 5 294 ; FAST16: %[[#VREG+6]] = insertelement <8 x i[[#SBITS]]> %[[#VREG+5]], i[[#SBITS]] %[[#R+33]], i32 6 295 ; FAST16: %[[#VREG+7]] = insertelement <8 x i[[#SBITS]]> %[[#VREG+6]], i[[#SBITS]] %[[#R+33]], i32 7 296 ; FAST16: %[[#VREG+8]] = bitcast i[[#SBITS]]* %[[P:.*]] to <8 x i[[#SBITS]]>* 297 ; FAST16: %[[#VREG+9]] = getelementptr <8 x i[[#SBITS]]>, <8 x i[[#SBITS]]>* %[[#VREG+8]], i32 0 298 ; FAST16: store <8 x i[[#SBITS]]> %[[#VREG+7]], <8 x i[[#SBITS]]>* %[[#VREG+9]], align [[#SBYTES]] 299 ; FAST16: %[[#VREG+10]] = getelementptr <8 x i[[#SBITS]]>, <8 x i[[#SBITS]]>* %[[#VREG+8]], i32 1 300 ; FAST16: store <8 x i[[#SBITS]]> %[[#VREG+7]], <8 x i[[#SBITS]]>* %[[#VREG+10]], align [[#SBYTES]] 301 ; FAST16: %[[#VREG+11]] = getelementptr i[[#SBITS]], i[[#SBITS]]* %[[P]], i32 16 302 ; FAST16: store i[[#SBITS]] %[[#R+33]], i[[#SBITS]]* %[[#VREG+11]], align [[#SBYTES]] 303 store [17 x i1] %a, [17 x i1]* %p 304 ret void 305} 306 307define [2 x i32] @const_array() { 308 ; FAST16: @"dfs$const_array" 309 ; FAST16: store [2 x i[[#SBITS]]] zeroinitializer, [2 x i[[#SBITS]]]* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to [2 x i[[#SBITS]]]*), align 2 310 ret [2 x i32] [ i32 42, i32 11 ] 311} 312 313define [4 x i8] @call_array([4 x i8] %a) { 314 ; FAST16-LABEL: @"dfs$call_array" 315 ; FAST16: %[[#R:]] = load [4 x i[[#SBITS]]], [4 x i[[#SBITS]]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to [4 x i[[#SBITS]]]*), align [[ALIGN:2]] 316 ; FAST16: store [4 x i[[#SBITS]]] %[[#R]], [4 x i[[#SBITS]]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to [4 x i[[#SBITS]]]*), align [[ALIGN]] 317 ; FAST16: %_dfsret = load [4 x i[[#SBITS]]], [4 x i[[#SBITS]]]* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to [4 x i[[#SBITS]]]*), align [[ALIGN]] 318 ; FAST16: store [4 x i[[#SBITS]]] %_dfsret, [4 x i[[#SBITS]]]* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to [4 x i[[#SBITS]]]*), align [[ALIGN]] 319 320 %r = call [4 x i8] @pass_array([4 x i8] %a) 321 ret [4 x i8] %r 322} 323 324%LargeArr = type [1000 x i8] 325 326define i8 @fun_with_large_args(i1 %i, %LargeArr %a) { 327 ; FAST16: @"dfs$fun_with_large_args" 328 ; FAST16: store i[[#SBITS]] 0, i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to i[[#SBITS]]*), align 2 329 %r = extractvalue %LargeArr %a, 0 330 ret i8 %r 331} 332 333define %LargeArr @fun_with_large_ret() { 334 ; FAST16: @"dfs$fun_with_large_ret" 335 ; FAST16-NEXT: ret [1000 x i8] zeroinitializer 336 ret %LargeArr zeroinitializer 337} 338 339define i8 @call_fun_with_large_ret() { 340 ; FAST16: @"dfs$call_fun_with_large_ret" 341 ; FAST16: store i[[#SBITS]] 0, i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_retval_tls to i[[#SBITS]]*), align 2 342 %r = call %LargeArr @fun_with_large_ret() 343 %e = extractvalue %LargeArr %r, 0 344 ret i8 %e 345} 346 347define i8 @call_fun_with_large_args(i1 %i, %LargeArr %a) { 348 ; FAST16: @"dfs$call_fun_with_large_args" 349 ; FAST16: [[I:%.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN:2]] 350 ; FAST16: store i[[#SBITS]] [[I]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN]] 351 ; FAST16: %r = call i8 @"dfs$fun_with_large_args"(i1 %i, [1000 x i8] %a) 352 353 %r = call i8 @fun_with_large_args(i1 %i, %LargeArr %a) 354 ret i8 %r 355} 356