xref: /llvm-project/llvm/test/CodeGen/ARM/and-load-combine.ll (revision d41059a9f6d04d3314636f6da524ba74b3442f3b)
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=armv7 %s -o - | FileCheck %s --check-prefix=ARM
3; RUN: llc -mtriple=armv7eb %s -o - | FileCheck %s --check-prefix=ARMEB
4; RUN: llc -mtriple=armv6m %s -o - | FileCheck %s --check-prefix=THUMB1
5; RUN: llc -mtriple=thumbv8m.main %s -o - | FileCheck %s --check-prefix=THUMB2
6
7define arm_aapcscc zeroext i1 @cmp_xor8_short_short(i16* nocapture readonly %a,
8                                                    i16* nocapture readonly %b) {
9; ARM-LABEL: cmp_xor8_short_short:
10; ARM:       @ %bb.0: @ %entry
11; ARM-NEXT:    ldrb r0, [r0]
12; ARM-NEXT:    ldrb r1, [r1]
13; ARM-NEXT:    eor r0, r1, r0
14; ARM-NEXT:    clz r0, r0
15; ARM-NEXT:    lsr r0, r0, #5
16; ARM-NEXT:    bx lr
17;
18; ARMEB-LABEL: cmp_xor8_short_short:
19; ARMEB:       @ %bb.0: @ %entry
20; ARMEB-NEXT:    ldrb r0, [r0, #1]
21; ARMEB-NEXT:    ldrb r1, [r1, #1]
22; ARMEB-NEXT:    eor r0, r1, r0
23; ARMEB-NEXT:    clz r0, r0
24; ARMEB-NEXT:    lsr r0, r0, #5
25; ARMEB-NEXT:    bx lr
26;
27; THUMB1-LABEL: cmp_xor8_short_short:
28; THUMB1:       @ %bb.0: @ %entry
29; THUMB1-NEXT:    ldrb r0, [r0]
30; THUMB1-NEXT:    ldrb r1, [r1]
31; THUMB1-NEXT:    eors r1, r0
32; THUMB1-NEXT:    movs r0, #0
33; THUMB1-NEXT:    subs r0, r0, r1
34; THUMB1-NEXT:    adcs r0, r1
35; THUMB1-NEXT:    bx lr
36;
37; THUMB2-LABEL: cmp_xor8_short_short:
38; THUMB2:       @ %bb.0: @ %entry
39; THUMB2-NEXT:    ldrb r0, [r0]
40; THUMB2-NEXT:    ldrb r1, [r1]
41; THUMB2-NEXT:    eors r0, r1
42; THUMB2-NEXT:    clz r0, r0
43; THUMB2-NEXT:    lsrs r0, r0, #5
44; THUMB2-NEXT:    bx lr
45entry:
46  %0 = load i16, i16* %a, align 2
47  %1 = load i16, i16* %b, align 2
48  %xor2 = xor i16 %1, %0
49  %2 = and i16 %xor2, 255
50  %cmp = icmp eq i16 %2, 0
51  ret i1 %cmp
52}
53
54define arm_aapcscc zeroext i1 @cmp_xor8_short_int(i16* nocapture readonly %a,
55                                                  i32* nocapture readonly %b) {
56; ARM-LABEL: cmp_xor8_short_int:
57; ARM:       @ %bb.0: @ %entry
58; ARM-NEXT:    ldrb r0, [r0]
59; ARM-NEXT:    ldrb r1, [r1]
60; ARM-NEXT:    eor r0, r1, r0
61; ARM-NEXT:    clz r0, r0
62; ARM-NEXT:    lsr r0, r0, #5
63; ARM-NEXT:    bx lr
64;
65; ARMEB-LABEL: cmp_xor8_short_int:
66; ARMEB:       @ %bb.0: @ %entry
67; ARMEB-NEXT:    ldrb r0, [r0, #1]
68; ARMEB-NEXT:    ldrb r1, [r1, #3]
69; ARMEB-NEXT:    eor r0, r1, r0
70; ARMEB-NEXT:    clz r0, r0
71; ARMEB-NEXT:    lsr r0, r0, #5
72; ARMEB-NEXT:    bx lr
73;
74; THUMB1-LABEL: cmp_xor8_short_int:
75; THUMB1:       @ %bb.0: @ %entry
76; THUMB1-NEXT:    ldrb r0, [r0]
77; THUMB1-NEXT:    ldrb r1, [r1]
78; THUMB1-NEXT:    eors r1, r0
79; THUMB1-NEXT:    movs r0, #0
80; THUMB1-NEXT:    subs r0, r0, r1
81; THUMB1-NEXT:    adcs r0, r1
82; THUMB1-NEXT:    bx lr
83;
84; THUMB2-LABEL: cmp_xor8_short_int:
85; THUMB2:       @ %bb.0: @ %entry
86; THUMB2-NEXT:    ldrb r0, [r0]
87; THUMB2-NEXT:    ldrb r1, [r1]
88; THUMB2-NEXT:    eors r0, r1
89; THUMB2-NEXT:    clz r0, r0
90; THUMB2-NEXT:    lsrs r0, r0, #5
91; THUMB2-NEXT:    bx lr
92entry:
93  %0 = load i16, i16* %a, align 2
94  %conv = zext i16 %0 to i32
95  %1 = load i32, i32* %b, align 4
96  %xor = xor i32 %1, %conv
97  %and = and i32 %xor, 255
98  %cmp = icmp eq i32 %and, 0
99  ret i1 %cmp
100}
101
102define arm_aapcscc zeroext i1 @cmp_xor8_int_int(i32* nocapture readonly %a,
103                                                i32* nocapture readonly %b) {
104; ARM-LABEL: cmp_xor8_int_int:
105; ARM:       @ %bb.0: @ %entry
106; ARM-NEXT:    ldrb r0, [r0]
107; ARM-NEXT:    ldrb r1, [r1]
108; ARM-NEXT:    eor r0, r1, r0
109; ARM-NEXT:    clz r0, r0
110; ARM-NEXT:    lsr r0, r0, #5
111; ARM-NEXT:    bx lr
112;
113; ARMEB-LABEL: cmp_xor8_int_int:
114; ARMEB:       @ %bb.0: @ %entry
115; ARMEB-NEXT:    ldrb r0, [r0, #3]
116; ARMEB-NEXT:    ldrb r1, [r1, #3]
117; ARMEB-NEXT:    eor r0, r1, r0
118; ARMEB-NEXT:    clz r0, r0
119; ARMEB-NEXT:    lsr r0, r0, #5
120; ARMEB-NEXT:    bx lr
121;
122; THUMB1-LABEL: cmp_xor8_int_int:
123; THUMB1:       @ %bb.0: @ %entry
124; THUMB1-NEXT:    ldrb r0, [r0]
125; THUMB1-NEXT:    ldrb r1, [r1]
126; THUMB1-NEXT:    eors r1, r0
127; THUMB1-NEXT:    movs r0, #0
128; THUMB1-NEXT:    subs r0, r0, r1
129; THUMB1-NEXT:    adcs r0, r1
130; THUMB1-NEXT:    bx lr
131;
132; THUMB2-LABEL: cmp_xor8_int_int:
133; THUMB2:       @ %bb.0: @ %entry
134; THUMB2-NEXT:    ldrb r0, [r0]
135; THUMB2-NEXT:    ldrb r1, [r1]
136; THUMB2-NEXT:    eors r0, r1
137; THUMB2-NEXT:    clz r0, r0
138; THUMB2-NEXT:    lsrs r0, r0, #5
139; THUMB2-NEXT:    bx lr
140entry:
141  %0 = load i32, i32* %a, align 4
142  %1 = load i32, i32* %b, align 4
143  %xor = xor i32 %1, %0
144  %and = and i32 %xor, 255
145  %cmp = icmp eq i32 %and, 0
146  ret i1 %cmp
147}
148
149define arm_aapcscc zeroext i1 @cmp_xor16(i32* nocapture readonly %a,
150                                         i32* nocapture readonly %b) {
151; ARM-LABEL: cmp_xor16:
152; ARM:       @ %bb.0: @ %entry
153; ARM-NEXT:    ldrh r0, [r0]
154; ARM-NEXT:    ldrh r1, [r1]
155; ARM-NEXT:    eor r0, r1, r0
156; ARM-NEXT:    clz r0, r0
157; ARM-NEXT:    lsr r0, r0, #5
158; ARM-NEXT:    bx lr
159;
160; ARMEB-LABEL: cmp_xor16:
161; ARMEB:       @ %bb.0: @ %entry
162; ARMEB-NEXT:    ldrh r0, [r0, #2]
163; ARMEB-NEXT:    ldrh r1, [r1, #2]
164; ARMEB-NEXT:    eor r0, r1, r0
165; ARMEB-NEXT:    clz r0, r0
166; ARMEB-NEXT:    lsr r0, r0, #5
167; ARMEB-NEXT:    bx lr
168;
169; THUMB1-LABEL: cmp_xor16:
170; THUMB1:       @ %bb.0: @ %entry
171; THUMB1-NEXT:    ldrh r0, [r0]
172; THUMB1-NEXT:    ldrh r1, [r1]
173; THUMB1-NEXT:    eors r1, r0
174; THUMB1-NEXT:    movs r0, #0
175; THUMB1-NEXT:    subs r0, r0, r1
176; THUMB1-NEXT:    adcs r0, r1
177; THUMB1-NEXT:    bx lr
178;
179; THUMB2-LABEL: cmp_xor16:
180; THUMB2:       @ %bb.0: @ %entry
181; THUMB2-NEXT:    ldrh r0, [r0]
182; THUMB2-NEXT:    ldrh r1, [r1]
183; THUMB2-NEXT:    eors r0, r1
184; THUMB2-NEXT:    clz r0, r0
185; THUMB2-NEXT:    lsrs r0, r0, #5
186; THUMB2-NEXT:    bx lr
187entry:
188  %0 = load i32, i32* %a, align 4
189  %1 = load i32, i32* %b, align 4
190  %xor = xor i32 %1, %0
191  %and = and i32 %xor, 65535
192  %cmp = icmp eq i32 %and, 0
193  ret i1 %cmp
194}
195
196define arm_aapcscc zeroext i1 @cmp_or8_short_short(i16* nocapture readonly %a,
197                                                   i16* nocapture readonly %b) {
198; ARM-LABEL: cmp_or8_short_short:
199; ARM:       @ %bb.0: @ %entry
200; ARM-NEXT:    ldrb r0, [r0]
201; ARM-NEXT:    ldrb r1, [r1]
202; ARM-NEXT:    orr r0, r1, r0
203; ARM-NEXT:    clz r0, r0
204; ARM-NEXT:    lsr r0, r0, #5
205; ARM-NEXT:    bx lr
206;
207; ARMEB-LABEL: cmp_or8_short_short:
208; ARMEB:       @ %bb.0: @ %entry
209; ARMEB-NEXT:    ldrb r0, [r0, #1]
210; ARMEB-NEXT:    ldrb r1, [r1, #1]
211; ARMEB-NEXT:    orr r0, r1, r0
212; ARMEB-NEXT:    clz r0, r0
213; ARMEB-NEXT:    lsr r0, r0, #5
214; ARMEB-NEXT:    bx lr
215;
216; THUMB1-LABEL: cmp_or8_short_short:
217; THUMB1:       @ %bb.0: @ %entry
218; THUMB1-NEXT:    ldrb r0, [r0]
219; THUMB1-NEXT:    ldrb r1, [r1]
220; THUMB1-NEXT:    orrs r1, r0
221; THUMB1-NEXT:    movs r0, #0
222; THUMB1-NEXT:    subs r0, r0, r1
223; THUMB1-NEXT:    adcs r0, r1
224; THUMB1-NEXT:    bx lr
225;
226; THUMB2-LABEL: cmp_or8_short_short:
227; THUMB2:       @ %bb.0: @ %entry
228; THUMB2-NEXT:    ldrb r0, [r0]
229; THUMB2-NEXT:    ldrb r1, [r1]
230; THUMB2-NEXT:    orrs r0, r1
231; THUMB2-NEXT:    clz r0, r0
232; THUMB2-NEXT:    lsrs r0, r0, #5
233; THUMB2-NEXT:    bx lr
234entry:
235  %0 = load i16, i16* %a, align 2
236  %1 = load i16, i16* %b, align 2
237  %or2 = or i16 %1, %0
238  %2 = and i16 %or2, 255
239  %cmp = icmp eq i16 %2, 0
240  ret i1 %cmp
241}
242
243define arm_aapcscc zeroext i1 @cmp_or8_short_int(i16* nocapture readonly %a,
244                                                 i32* nocapture readonly %b) {
245; ARM-LABEL: cmp_or8_short_int:
246; ARM:       @ %bb.0: @ %entry
247; ARM-NEXT:    ldrb r0, [r0]
248; ARM-NEXT:    ldrb r1, [r1]
249; ARM-NEXT:    orr r0, r1, r0
250; ARM-NEXT:    clz r0, r0
251; ARM-NEXT:    lsr r0, r0, #5
252; ARM-NEXT:    bx lr
253;
254; ARMEB-LABEL: cmp_or8_short_int:
255; ARMEB:       @ %bb.0: @ %entry
256; ARMEB-NEXT:    ldrb r0, [r0, #1]
257; ARMEB-NEXT:    ldrb r1, [r1, #3]
258; ARMEB-NEXT:    orr r0, r1, r0
259; ARMEB-NEXT:    clz r0, r0
260; ARMEB-NEXT:    lsr r0, r0, #5
261; ARMEB-NEXT:    bx lr
262;
263; THUMB1-LABEL: cmp_or8_short_int:
264; THUMB1:       @ %bb.0: @ %entry
265; THUMB1-NEXT:    ldrb r0, [r0]
266; THUMB1-NEXT:    ldrb r1, [r1]
267; THUMB1-NEXT:    orrs r1, r0
268; THUMB1-NEXT:    movs r0, #0
269; THUMB1-NEXT:    subs r0, r0, r1
270; THUMB1-NEXT:    adcs r0, r1
271; THUMB1-NEXT:    bx lr
272;
273; THUMB2-LABEL: cmp_or8_short_int:
274; THUMB2:       @ %bb.0: @ %entry
275; THUMB2-NEXT:    ldrb r0, [r0]
276; THUMB2-NEXT:    ldrb r1, [r1]
277; THUMB2-NEXT:    orrs r0, r1
278; THUMB2-NEXT:    clz r0, r0
279; THUMB2-NEXT:    lsrs r0, r0, #5
280; THUMB2-NEXT:    bx lr
281entry:
282  %0 = load i16, i16* %a, align 2
283  %conv = zext i16 %0 to i32
284  %1 = load i32, i32* %b, align 4
285  %or = or i32 %1, %conv
286  %and = and i32 %or, 255
287  %cmp = icmp eq i32 %and, 0
288  ret i1 %cmp
289}
290
291define arm_aapcscc zeroext i1 @cmp_or8_int_int(i32* nocapture readonly %a,
292                                               i32* nocapture readonly %b) {
293; ARM-LABEL: cmp_or8_int_int:
294; ARM:       @ %bb.0: @ %entry
295; ARM-NEXT:    ldrb r0, [r0]
296; ARM-NEXT:    ldrb r1, [r1]
297; ARM-NEXT:    orr r0, r1, r0
298; ARM-NEXT:    clz r0, r0
299; ARM-NEXT:    lsr r0, r0, #5
300; ARM-NEXT:    bx lr
301;
302; ARMEB-LABEL: cmp_or8_int_int:
303; ARMEB:       @ %bb.0: @ %entry
304; ARMEB-NEXT:    ldrb r0, [r0, #3]
305; ARMEB-NEXT:    ldrb r1, [r1, #3]
306; ARMEB-NEXT:    orr r0, r1, r0
307; ARMEB-NEXT:    clz r0, r0
308; ARMEB-NEXT:    lsr r0, r0, #5
309; ARMEB-NEXT:    bx lr
310;
311; THUMB1-LABEL: cmp_or8_int_int:
312; THUMB1:       @ %bb.0: @ %entry
313; THUMB1-NEXT:    ldrb r0, [r0]
314; THUMB1-NEXT:    ldrb r1, [r1]
315; THUMB1-NEXT:    orrs r1, r0
316; THUMB1-NEXT:    movs r0, #0
317; THUMB1-NEXT:    subs r0, r0, r1
318; THUMB1-NEXT:    adcs r0, r1
319; THUMB1-NEXT:    bx lr
320;
321; THUMB2-LABEL: cmp_or8_int_int:
322; THUMB2:       @ %bb.0: @ %entry
323; THUMB2-NEXT:    ldrb r0, [r0]
324; THUMB2-NEXT:    ldrb r1, [r1]
325; THUMB2-NEXT:    orrs r0, r1
326; THUMB2-NEXT:    clz r0, r0
327; THUMB2-NEXT:    lsrs r0, r0, #5
328; THUMB2-NEXT:    bx lr
329entry:
330  %0 = load i32, i32* %a, align 4
331  %1 = load i32, i32* %b, align 4
332  %or = or i32 %1, %0
333  %and = and i32 %or, 255
334  %cmp = icmp eq i32 %and, 0
335  ret i1 %cmp
336}
337
338define arm_aapcscc zeroext i1 @cmp_or16(i32* nocapture readonly %a,
339                                        i32* nocapture readonly %b) {
340; ARM-LABEL: cmp_or16:
341; ARM:       @ %bb.0: @ %entry
342; ARM-NEXT:    ldrh r0, [r0]
343; ARM-NEXT:    ldrh r1, [r1]
344; ARM-NEXT:    orr r0, r1, r0
345; ARM-NEXT:    clz r0, r0
346; ARM-NEXT:    lsr r0, r0, #5
347; ARM-NEXT:    bx lr
348;
349; ARMEB-LABEL: cmp_or16:
350; ARMEB:       @ %bb.0: @ %entry
351; ARMEB-NEXT:    ldrh r0, [r0, #2]
352; ARMEB-NEXT:    ldrh r1, [r1, #2]
353; ARMEB-NEXT:    orr r0, r1, r0
354; ARMEB-NEXT:    clz r0, r0
355; ARMEB-NEXT:    lsr r0, r0, #5
356; ARMEB-NEXT:    bx lr
357;
358; THUMB1-LABEL: cmp_or16:
359; THUMB1:       @ %bb.0: @ %entry
360; THUMB1-NEXT:    ldrh r0, [r0]
361; THUMB1-NEXT:    ldrh r1, [r1]
362; THUMB1-NEXT:    orrs r1, r0
363; THUMB1-NEXT:    movs r0, #0
364; THUMB1-NEXT:    subs r0, r0, r1
365; THUMB1-NEXT:    adcs r0, r1
366; THUMB1-NEXT:    bx lr
367;
368; THUMB2-LABEL: cmp_or16:
369; THUMB2:       @ %bb.0: @ %entry
370; THUMB2-NEXT:    ldrh r0, [r0]
371; THUMB2-NEXT:    ldrh r1, [r1]
372; THUMB2-NEXT:    orrs r0, r1
373; THUMB2-NEXT:    clz r0, r0
374; THUMB2-NEXT:    lsrs r0, r0, #5
375; THUMB2-NEXT:    bx lr
376entry:
377  %0 = load i32, i32* %a, align 4
378  %1 = load i32, i32* %b, align 4
379  %or = or i32 %1, %0
380  %and = and i32 %or, 65535
381  %cmp = icmp eq i32 %and, 0
382  ret i1 %cmp
383}
384
385define arm_aapcscc zeroext i1 @cmp_and8_short_short(i16* nocapture readonly %a,
386                                                    i16* nocapture readonly %b) {
387; ARM-LABEL: cmp_and8_short_short:
388; ARM:       @ %bb.0: @ %entry
389; ARM-NEXT:    ldrb r1, [r1]
390; ARM-NEXT:    ldrb r0, [r0]
391; ARM-NEXT:    and r0, r0, r1
392; ARM-NEXT:    clz r0, r0
393; ARM-NEXT:    lsr r0, r0, #5
394; ARM-NEXT:    bx lr
395;
396; ARMEB-LABEL: cmp_and8_short_short:
397; ARMEB:       @ %bb.0: @ %entry
398; ARMEB-NEXT:    ldrb r1, [r1, #1]
399; ARMEB-NEXT:    ldrb r0, [r0, #1]
400; ARMEB-NEXT:    and r0, r0, r1
401; ARMEB-NEXT:    clz r0, r0
402; ARMEB-NEXT:    lsr r0, r0, #5
403; ARMEB-NEXT:    bx lr
404;
405; THUMB1-LABEL: cmp_and8_short_short:
406; THUMB1:       @ %bb.0: @ %entry
407; THUMB1-NEXT:    ldrb r1, [r1]
408; THUMB1-NEXT:    ldrb r2, [r0]
409; THUMB1-NEXT:    ands r2, r1
410; THUMB1-NEXT:    movs r0, #0
411; THUMB1-NEXT:    subs r0, r0, r2
412; THUMB1-NEXT:    adcs r0, r2
413; THUMB1-NEXT:    bx lr
414;
415; THUMB2-LABEL: cmp_and8_short_short:
416; THUMB2:       @ %bb.0: @ %entry
417; THUMB2-NEXT:    ldrb r1, [r1]
418; THUMB2-NEXT:    ldrb r0, [r0]
419; THUMB2-NEXT:    ands r0, r1
420; THUMB2-NEXT:    clz r0, r0
421; THUMB2-NEXT:    lsrs r0, r0, #5
422; THUMB2-NEXT:    bx lr
423entry:
424  %0 = load i16, i16* %a, align 2
425  %1 = load i16, i16* %b, align 2
426  %and3 = and i16 %0, 255
427  %2 = and i16 %and3, %1
428  %cmp = icmp eq i16 %2, 0
429  ret i1 %cmp
430}
431
432define arm_aapcscc zeroext i1 @cmp_and8_short_int(i16* nocapture readonly %a,
433                                                  i32* nocapture readonly %b) {
434; ARM-LABEL: cmp_and8_short_int:
435; ARM:       @ %bb.0: @ %entry
436; ARM-NEXT:    ldrb r0, [r0]
437; ARM-NEXT:    ldrb r1, [r1]
438; ARM-NEXT:    and r0, r1, r0
439; ARM-NEXT:    clz r0, r0
440; ARM-NEXT:    lsr r0, r0, #5
441; ARM-NEXT:    bx lr
442;
443; ARMEB-LABEL: cmp_and8_short_int:
444; ARMEB:       @ %bb.0: @ %entry
445; ARMEB-NEXT:    ldrb r0, [r0, #1]
446; ARMEB-NEXT:    ldrb r1, [r1, #3]
447; ARMEB-NEXT:    and r0, r1, r0
448; ARMEB-NEXT:    clz r0, r0
449; ARMEB-NEXT:    lsr r0, r0, #5
450; ARMEB-NEXT:    bx lr
451;
452; THUMB1-LABEL: cmp_and8_short_int:
453; THUMB1:       @ %bb.0: @ %entry
454; THUMB1-NEXT:    ldrb r0, [r0]
455; THUMB1-NEXT:    ldrb r1, [r1]
456; THUMB1-NEXT:    ands r1, r0
457; THUMB1-NEXT:    movs r0, #0
458; THUMB1-NEXT:    subs r0, r0, r1
459; THUMB1-NEXT:    adcs r0, r1
460; THUMB1-NEXT:    bx lr
461;
462; THUMB2-LABEL: cmp_and8_short_int:
463; THUMB2:       @ %bb.0: @ %entry
464; THUMB2-NEXT:    ldrb r0, [r0]
465; THUMB2-NEXT:    ldrb r1, [r1]
466; THUMB2-NEXT:    ands r0, r1
467; THUMB2-NEXT:    clz r0, r0
468; THUMB2-NEXT:    lsrs r0, r0, #5
469; THUMB2-NEXT:    bx lr
470entry:
471  %0 = load i16, i16* %a, align 2
472  %1 = load i32, i32* %b, align 4
473  %2 = and i16 %0, 255
474  %and = zext i16 %2 to i32
475  %and1 = and i32 %1, %and
476  %cmp = icmp eq i32 %and1, 0
477  ret i1 %cmp
478}
479
480define arm_aapcscc zeroext i1 @cmp_and8_int_int(i32* nocapture readonly %a,
481                                                i32* nocapture readonly %b) {
482; ARM-LABEL: cmp_and8_int_int:
483; ARM:       @ %bb.0: @ %entry
484; ARM-NEXT:    ldrb r1, [r1]
485; ARM-NEXT:    ldrb r0, [r0]
486; ARM-NEXT:    and r0, r0, r1
487; ARM-NEXT:    clz r0, r0
488; ARM-NEXT:    lsr r0, r0, #5
489; ARM-NEXT:    bx lr
490;
491; ARMEB-LABEL: cmp_and8_int_int:
492; ARMEB:       @ %bb.0: @ %entry
493; ARMEB-NEXT:    ldrb r1, [r1, #3]
494; ARMEB-NEXT:    ldrb r0, [r0, #3]
495; ARMEB-NEXT:    and r0, r0, r1
496; ARMEB-NEXT:    clz r0, r0
497; ARMEB-NEXT:    lsr r0, r0, #5
498; ARMEB-NEXT:    bx lr
499;
500; THUMB1-LABEL: cmp_and8_int_int:
501; THUMB1:       @ %bb.0: @ %entry
502; THUMB1-NEXT:    ldrb r1, [r1]
503; THUMB1-NEXT:    ldrb r2, [r0]
504; THUMB1-NEXT:    ands r2, r1
505; THUMB1-NEXT:    movs r0, #0
506; THUMB1-NEXT:    subs r0, r0, r2
507; THUMB1-NEXT:    adcs r0, r2
508; THUMB1-NEXT:    bx lr
509;
510; THUMB2-LABEL: cmp_and8_int_int:
511; THUMB2:       @ %bb.0: @ %entry
512; THUMB2-NEXT:    ldrb r1, [r1]
513; THUMB2-NEXT:    ldrb r0, [r0]
514; THUMB2-NEXT:    ands r0, r1
515; THUMB2-NEXT:    clz r0, r0
516; THUMB2-NEXT:    lsrs r0, r0, #5
517; THUMB2-NEXT:    bx lr
518entry:
519  %0 = load i32, i32* %a, align 4
520  %1 = load i32, i32* %b, align 4
521  %and = and i32 %0, 255
522  %and1 = and i32 %and, %1
523  %cmp = icmp eq i32 %and1, 0
524  ret i1 %cmp
525}
526
527define arm_aapcscc zeroext i1 @cmp_and16(i32* nocapture readonly %a,
528                                         i32* nocapture readonly %b) {
529; ARM-LABEL: cmp_and16:
530; ARM:       @ %bb.0: @ %entry
531; ARM-NEXT:    ldrh r1, [r1]
532; ARM-NEXT:    ldrh r0, [r0]
533; ARM-NEXT:    and r0, r0, r1
534; ARM-NEXT:    clz r0, r0
535; ARM-NEXT:    lsr r0, r0, #5
536; ARM-NEXT:    bx lr
537;
538; ARMEB-LABEL: cmp_and16:
539; ARMEB:       @ %bb.0: @ %entry
540; ARMEB-NEXT:    ldrh r1, [r1, #2]
541; ARMEB-NEXT:    ldrh r0, [r0, #2]
542; ARMEB-NEXT:    and r0, r0, r1
543; ARMEB-NEXT:    clz r0, r0
544; ARMEB-NEXT:    lsr r0, r0, #5
545; ARMEB-NEXT:    bx lr
546;
547; THUMB1-LABEL: cmp_and16:
548; THUMB1:       @ %bb.0: @ %entry
549; THUMB1-NEXT:    ldrh r1, [r1]
550; THUMB1-NEXT:    ldrh r2, [r0]
551; THUMB1-NEXT:    ands r2, r1
552; THUMB1-NEXT:    movs r0, #0
553; THUMB1-NEXT:    subs r0, r0, r2
554; THUMB1-NEXT:    adcs r0, r2
555; THUMB1-NEXT:    bx lr
556;
557; THUMB2-LABEL: cmp_and16:
558; THUMB2:       @ %bb.0: @ %entry
559; THUMB2-NEXT:    ldrh r1, [r1]
560; THUMB2-NEXT:    ldrh r0, [r0]
561; THUMB2-NEXT:    ands r0, r1
562; THUMB2-NEXT:    clz r0, r0
563; THUMB2-NEXT:    lsrs r0, r0, #5
564; THUMB2-NEXT:    bx lr
565entry:
566  %0 = load i32, i32* %a, align 4
567  %1 = load i32, i32* %b, align 4
568  %and = and i32 %0, 65535
569  %and1 = and i32 %and, %1
570  %cmp = icmp eq i32 %and1, 0
571  ret i1 %cmp
572}
573
574define arm_aapcscc i32 @add_and16(i32* nocapture readonly %a, i32 %y, i32 %z) {
575; ARM-LABEL: add_and16:
576; ARM:       @ %bb.0: @ %entry
577; ARM-NEXT:    add r1, r1, r2
578; ARM-NEXT:    ldrh r0, [r0]
579; ARM-NEXT:    uxth r1, r1
580; ARM-NEXT:    orr r0, r0, r1
581; ARM-NEXT:    bx lr
582;
583; ARMEB-LABEL: add_and16:
584; ARMEB:       @ %bb.0: @ %entry
585; ARMEB-NEXT:    add r1, r1, r2
586; ARMEB-NEXT:    ldrh r0, [r0, #2]
587; ARMEB-NEXT:    uxth r1, r1
588; ARMEB-NEXT:    orr r0, r0, r1
589; ARMEB-NEXT:    bx lr
590;
591; THUMB1-LABEL: add_and16:
592; THUMB1:       @ %bb.0: @ %entry
593; THUMB1-NEXT:    adds r1, r1, r2
594; THUMB1-NEXT:    uxth r1, r1
595; THUMB1-NEXT:    ldrh r0, [r0]
596; THUMB1-NEXT:    orrs r0, r1
597; THUMB1-NEXT:    bx lr
598;
599; THUMB2-LABEL: add_and16:
600; THUMB2:       @ %bb.0: @ %entry
601; THUMB2-NEXT:    add r1, r2
602; THUMB2-NEXT:    ldrh r0, [r0]
603; THUMB2-NEXT:    uxth r1, r1
604; THUMB2-NEXT:    orrs r0, r1
605; THUMB2-NEXT:    bx lr
606entry:
607  %x = load i32, i32* %a, align 4
608  %add = add i32 %y, %z
609  %or = or i32 %x, %add
610  %and = and i32 %or, 65535
611  ret i32 %and
612}
613
614define arm_aapcscc i32 @test1(i32* %a, i32* %b, i32 %x, i32 %y) {
615; ARM-LABEL: test1:
616; ARM:       @ %bb.0: @ %entry
617; ARM-NEXT:    mul r2, r2, r3
618; ARM-NEXT:    ldrh r1, [r1]
619; ARM-NEXT:    ldrh r0, [r0]
620; ARM-NEXT:    eor r0, r0, r1
621; ARM-NEXT:    uxth r1, r2
622; ARM-NEXT:    orr r0, r0, r1
623; ARM-NEXT:    bx lr
624;
625; ARMEB-LABEL: test1:
626; ARMEB:       @ %bb.0: @ %entry
627; ARMEB-NEXT:    mul r2, r2, r3
628; ARMEB-NEXT:    ldrh r1, [r1, #2]
629; ARMEB-NEXT:    ldrh r0, [r0, #2]
630; ARMEB-NEXT:    eor r0, r0, r1
631; ARMEB-NEXT:    uxth r1, r2
632; ARMEB-NEXT:    orr r0, r0, r1
633; ARMEB-NEXT:    bx lr
634;
635; THUMB1-LABEL: test1:
636; THUMB1:       @ %bb.0: @ %entry
637; THUMB1-NEXT:    push {r4, lr}
638; THUMB1-NEXT:    ldrh r1, [r1]
639; THUMB1-NEXT:    ldrh r4, [r0]
640; THUMB1-NEXT:    eors r4, r1
641; THUMB1-NEXT:    muls r2, r3, r2
642; THUMB1-NEXT:    uxth r0, r2
643; THUMB1-NEXT:    orrs r0, r4
644; THUMB1-NEXT:    pop {r4, pc}
645;
646; THUMB2-LABEL: test1:
647; THUMB2:       @ %bb.0: @ %entry
648; THUMB2-NEXT:    ldrh r1, [r1]
649; THUMB2-NEXT:    ldrh r0, [r0]
650; THUMB2-NEXT:    eors r0, r1
651; THUMB2-NEXT:    mul r1, r2, r3
652; THUMB2-NEXT:    uxth r1, r1
653; THUMB2-NEXT:    orrs r0, r1
654; THUMB2-NEXT:    bx lr
655entry:
656  %0 = load i32, i32* %a, align 4
657  %1 = load i32, i32* %b, align 4
658  %mul = mul i32 %x, %y
659  %xor = xor i32 %0, %1
660  %or = or i32 %xor, %mul
661  %and = and i32 %or, 65535
662  ret i32 %and
663}
664
665define arm_aapcscc i32 @test2(i32* %a, i32* %b, i32 %x, i32 %y) {
666; ARM-LABEL: test2:
667; ARM:       @ %bb.0: @ %entry
668; ARM-NEXT:    ldr r1, [r1]
669; ARM-NEXT:    ldr r0, [r0]
670; ARM-NEXT:    mul r1, r2, r1
671; ARM-NEXT:    eor r0, r0, r3
672; ARM-NEXT:    orr r0, r0, r1
673; ARM-NEXT:    uxth r0, r0
674; ARM-NEXT:    bx lr
675;
676; ARMEB-LABEL: test2:
677; ARMEB:       @ %bb.0: @ %entry
678; ARMEB-NEXT:    ldr r1, [r1]
679; ARMEB-NEXT:    ldr r0, [r0]
680; ARMEB-NEXT:    mul r1, r2, r1
681; ARMEB-NEXT:    eor r0, r0, r3
682; ARMEB-NEXT:    orr r0, r0, r1
683; ARMEB-NEXT:    uxth r0, r0
684; ARMEB-NEXT:    bx lr
685;
686; THUMB1-LABEL: test2:
687; THUMB1:       @ %bb.0: @ %entry
688; THUMB1-NEXT:    ldr r1, [r1]
689; THUMB1-NEXT:    muls r1, r2, r1
690; THUMB1-NEXT:    ldr r0, [r0]
691; THUMB1-NEXT:    eors r0, r3
692; THUMB1-NEXT:    orrs r0, r1
693; THUMB1-NEXT:    uxth r0, r0
694; THUMB1-NEXT:    bx lr
695;
696; THUMB2-LABEL: test2:
697; THUMB2:       @ %bb.0: @ %entry
698; THUMB2-NEXT:    ldr r1, [r1]
699; THUMB2-NEXT:    ldr r0, [r0]
700; THUMB2-NEXT:    muls r1, r2, r1
701; THUMB2-NEXT:    eors r0, r3
702; THUMB2-NEXT:    orrs r0, r1
703; THUMB2-NEXT:    uxth r0, r0
704; THUMB2-NEXT:    bx lr
705entry:
706  %0 = load i32, i32* %a, align 4
707  %1 = load i32, i32* %b, align 4
708  %mul = mul i32 %x, %1
709  %xor = xor i32 %0, %y
710  %or = or i32 %xor, %mul
711  %and = and i32 %or, 65535
712  ret i32 %and
713}
714
715define arm_aapcscc i32 @test3(i32* %a, i32* %b, i32 %x, i16* %y) {
716; ARM-LABEL: test3:
717; ARM:       @ %bb.0: @ %entry
718; ARM-NEXT:    ldr r0, [r0]
719; ARM-NEXT:    mul r1, r2, r0
720; ARM-NEXT:    ldrh r2, [r3]
721; ARM-NEXT:    eor r0, r0, r2
722; ARM-NEXT:    orr r0, r0, r1
723; ARM-NEXT:    uxth r0, r0
724; ARM-NEXT:    bx lr
725;
726; ARMEB-LABEL: test3:
727; ARMEB:       @ %bb.0: @ %entry
728; ARMEB-NEXT:    ldr r0, [r0]
729; ARMEB-NEXT:    mul r1, r2, r0
730; ARMEB-NEXT:    ldrh r2, [r3]
731; ARMEB-NEXT:    eor r0, r0, r2
732; ARMEB-NEXT:    orr r0, r0, r1
733; ARMEB-NEXT:    uxth r0, r0
734; ARMEB-NEXT:    bx lr
735;
736; THUMB1-LABEL: test3:
737; THUMB1:       @ %bb.0: @ %entry
738; THUMB1-NEXT:    ldr r0, [r0]
739; THUMB1-NEXT:    muls r2, r0, r2
740; THUMB1-NEXT:    ldrh r1, [r3]
741; THUMB1-NEXT:    eors r1, r0
742; THUMB1-NEXT:    orrs r1, r2
743; THUMB1-NEXT:    uxth r0, r1
744; THUMB1-NEXT:    bx lr
745;
746; THUMB2-LABEL: test3:
747; THUMB2:       @ %bb.0: @ %entry
748; THUMB2-NEXT:    ldr r0, [r0]
749; THUMB2-NEXT:    mul r1, r2, r0
750; THUMB2-NEXT:    ldrh r2, [r3]
751; THUMB2-NEXT:    eors r0, r2
752; THUMB2-NEXT:    orrs r0, r1
753; THUMB2-NEXT:    uxth r0, r0
754; THUMB2-NEXT:    bx lr
755entry:
756  %0 = load i32, i32* %a, align 4
757  %1 = load i16, i16* %y, align 4
758  %2 = zext i16 %1 to i32
759  %mul = mul i32 %x, %0
760  %xor = xor i32 %0, %2
761  %or = or i32 %xor, %mul
762  %and = and i32 %or, 65535
763  ret i32 %and
764}
765
766define arm_aapcscc i32 @test4(i32* %a, i32* %b, i32 %x, i32 %y) {
767; ARM-LABEL: test4:
768; ARM:       @ %bb.0: @ %entry
769; ARM-NEXT:    mul r2, r2, r3
770; ARM-NEXT:    ldrh r1, [r1]
771; ARM-NEXT:    ldrh r0, [r0]
772; ARM-NEXT:    eor r0, r0, r1
773; ARM-NEXT:    uxth r1, r2
774; ARM-NEXT:    orr r0, r0, r1
775; ARM-NEXT:    bx lr
776;
777; ARMEB-LABEL: test4:
778; ARMEB:       @ %bb.0: @ %entry
779; ARMEB-NEXT:    mul r2, r2, r3
780; ARMEB-NEXT:    ldrh r1, [r1, #2]
781; ARMEB-NEXT:    ldrh r0, [r0, #2]
782; ARMEB-NEXT:    eor r0, r0, r1
783; ARMEB-NEXT:    uxth r1, r2
784; ARMEB-NEXT:    orr r0, r0, r1
785; ARMEB-NEXT:    bx lr
786;
787; THUMB1-LABEL: test4:
788; THUMB1:       @ %bb.0: @ %entry
789; THUMB1-NEXT:    push {r4, lr}
790; THUMB1-NEXT:    ldrh r1, [r1]
791; THUMB1-NEXT:    ldrh r4, [r0]
792; THUMB1-NEXT:    eors r4, r1
793; THUMB1-NEXT:    muls r2, r3, r2
794; THUMB1-NEXT:    uxth r0, r2
795; THUMB1-NEXT:    orrs r0, r4
796; THUMB1-NEXT:    pop {r4, pc}
797;
798; THUMB2-LABEL: test4:
799; THUMB2:       @ %bb.0: @ %entry
800; THUMB2-NEXT:    ldrh r1, [r1]
801; THUMB2-NEXT:    ldrh r0, [r0]
802; THUMB2-NEXT:    eors r0, r1
803; THUMB2-NEXT:    mul r1, r2, r3
804; THUMB2-NEXT:    uxth r1, r1
805; THUMB2-NEXT:    orrs r0, r1
806; THUMB2-NEXT:    bx lr
807entry:
808  %0 = load i32, i32* %a, align 4
809  %1 = load i32, i32* %b, align 4
810  %mul = mul i32 %x, %y
811  %xor = xor i32 %0, %1
812  %or = or i32 %xor, %mul
813  %and = and i32 %or, 65535
814  ret i32 %and
815}
816
817define arm_aapcscc i32 @test5(i32* %a, i32* %b, i32 %x, i16 zeroext %y) {
818; ARM-LABEL: test5:
819; ARM:       @ %bb.0: @ %entry
820; ARM-NEXT:    ldr r1, [r1]
821; ARM-NEXT:    ldrh r0, [r0]
822; ARM-NEXT:    mul r1, r2, r1
823; ARM-NEXT:    eor r0, r0, r3
824; ARM-NEXT:    uxth r1, r1
825; ARM-NEXT:    orr r0, r0, r1
826; ARM-NEXT:    bx lr
827;
828; ARMEB-LABEL: test5:
829; ARMEB:       @ %bb.0: @ %entry
830; ARMEB-NEXT:    ldr r1, [r1]
831; ARMEB-NEXT:    ldrh r0, [r0, #2]
832; ARMEB-NEXT:    mul r1, r2, r1
833; ARMEB-NEXT:    eor r0, r0, r3
834; ARMEB-NEXT:    uxth r1, r1
835; ARMEB-NEXT:    orr r0, r0, r1
836; ARMEB-NEXT:    bx lr
837;
838; THUMB1-LABEL: test5:
839; THUMB1:       @ %bb.0: @ %entry
840; THUMB1-NEXT:    push {r4, lr}
841; THUMB1-NEXT:    ldrh r4, [r0]
842; THUMB1-NEXT:    eors r4, r3
843; THUMB1-NEXT:    ldr r0, [r1]
844; THUMB1-NEXT:    muls r0, r2, r0
845; THUMB1-NEXT:    uxth r0, r0
846; THUMB1-NEXT:    orrs r0, r4
847; THUMB1-NEXT:    pop {r4, pc}
848;
849; THUMB2-LABEL: test5:
850; THUMB2:       @ %bb.0: @ %entry
851; THUMB2-NEXT:    ldr r1, [r1]
852; THUMB2-NEXT:    ldrh r0, [r0]
853; THUMB2-NEXT:    muls r1, r2, r1
854; THUMB2-NEXT:    eors r0, r3
855; THUMB2-NEXT:    uxth r1, r1
856; THUMB2-NEXT:    orrs r0, r1
857; THUMB2-NEXT:    bx lr
858entry:
859  %0 = load i32, i32* %a, align 4
860  %1 = load i32, i32* %b, align 4
861  %mul = mul i32 %x, %1
862  %ext = zext i16 %y to i32
863  %xor = xor i32 %0, %ext
864  %or = or i32 %xor, %mul
865  %and = and i32 %or, 65535
866  ret i32 %and
867}
868
869define arm_aapcscc i1 @test6(i8* %x, i8 %y, i8 %z) {
870; ARM-LABEL: test6:
871; ARM:       @ %bb.0: @ %entry
872; ARM-NEXT:    ldrb r0, [r0]
873; ARM-NEXT:    and r0, r0, r1
874; ARM-NEXT:    uxtb r1, r2
875; ARM-NEXT:    sub r0, r0, r1
876; ARM-NEXT:    clz r0, r0
877; ARM-NEXT:    lsr r0, r0, #5
878; ARM-NEXT:    bx lr
879;
880; ARMEB-LABEL: test6:
881; ARMEB:       @ %bb.0: @ %entry
882; ARMEB-NEXT:    ldrb r0, [r0]
883; ARMEB-NEXT:    and r0, r0, r1
884; ARMEB-NEXT:    uxtb r1, r2
885; ARMEB-NEXT:    sub r0, r0, r1
886; ARMEB-NEXT:    clz r0, r0
887; ARMEB-NEXT:    lsr r0, r0, #5
888; ARMEB-NEXT:    bx lr
889;
890; THUMB1-LABEL: test6:
891; THUMB1:       @ %bb.0: @ %entry
892; THUMB1-NEXT:    ldrb r0, [r0]
893; THUMB1-NEXT:    ands r0, r1
894; THUMB1-NEXT:    uxtb r1, r2
895; THUMB1-NEXT:    subs r1, r0, r1
896; THUMB1-NEXT:    movs r0, #0
897; THUMB1-NEXT:    subs r0, r0, r1
898; THUMB1-NEXT:    adcs r0, r1
899; THUMB1-NEXT:    bx lr
900;
901; THUMB2-LABEL: test6:
902; THUMB2:       @ %bb.0: @ %entry
903; THUMB2-NEXT:    ldrb r0, [r0]
904; THUMB2-NEXT:    ands r0, r1
905; THUMB2-NEXT:    uxtb r1, r2
906; THUMB2-NEXT:    subs r0, r0, r1
907; THUMB2-NEXT:    clz r0, r0
908; THUMB2-NEXT:    lsrs r0, r0, #5
909; THUMB2-NEXT:    bx lr
910entry:
911  %0 = load i8, i8* %x, align 4
912  %1 = and i8 %0, %y
913  %2 = icmp eq i8 %1, %z
914  ret i1 %2
915}
916
917define arm_aapcscc i1 @test7(i16* %x, i16 %y, i8 %z) {
918; ARM-LABEL: test7:
919; ARM:       @ %bb.0: @ %entry
920; ARM-NEXT:    ldrb r0, [r0]
921; ARM-NEXT:    and r0, r0, r1
922; ARM-NEXT:    uxtb r1, r2
923; ARM-NEXT:    sub r0, r0, r1
924; ARM-NEXT:    clz r0, r0
925; ARM-NEXT:    lsr r0, r0, #5
926; ARM-NEXT:    bx lr
927;
928; ARMEB-LABEL: test7:
929; ARMEB:       @ %bb.0: @ %entry
930; ARMEB-NEXT:    ldrb r0, [r0, #1]
931; ARMEB-NEXT:    and r0, r0, r1
932; ARMEB-NEXT:    uxtb r1, r2
933; ARMEB-NEXT:    sub r0, r0, r1
934; ARMEB-NEXT:    clz r0, r0
935; ARMEB-NEXT:    lsr r0, r0, #5
936; ARMEB-NEXT:    bx lr
937;
938; THUMB1-LABEL: test7:
939; THUMB1:       @ %bb.0: @ %entry
940; THUMB1-NEXT:    ldrb r0, [r0]
941; THUMB1-NEXT:    ands r0, r1
942; THUMB1-NEXT:    uxtb r1, r2
943; THUMB1-NEXT:    subs r1, r0, r1
944; THUMB1-NEXT:    movs r0, #0
945; THUMB1-NEXT:    subs r0, r0, r1
946; THUMB1-NEXT:    adcs r0, r1
947; THUMB1-NEXT:    bx lr
948;
949; THUMB2-LABEL: test7:
950; THUMB2:       @ %bb.0: @ %entry
951; THUMB2-NEXT:    ldrb r0, [r0]
952; THUMB2-NEXT:    ands r0, r1
953; THUMB2-NEXT:    uxtb r1, r2
954; THUMB2-NEXT:    subs r0, r0, r1
955; THUMB2-NEXT:    clz r0, r0
956; THUMB2-NEXT:    lsrs r0, r0, #5
957; THUMB2-NEXT:    bx lr
958entry:
959  %0 = load i16, i16* %x, align 4
960  %1 = and i16 %0, %y
961  %2 = trunc i16 %1 to i8
962  %3 = icmp eq i8 %2, %z
963  ret i1 %3
964}
965
966define arm_aapcscc void @test8(i32* nocapture %p) {
967; ARM-LABEL: test8:
968; ARM:       @ %bb.0: @ %entry
969; ARM-NEXT:    ldrb r1, [r0]
970; ARM-NEXT:    eor r1, r1, #255
971; ARM-NEXT:    str r1, [r0]
972; ARM-NEXT:    bx lr
973;
974; ARMEB-LABEL: test8:
975; ARMEB:       @ %bb.0: @ %entry
976; ARMEB-NEXT:    ldrb r1, [r0, #3]
977; ARMEB-NEXT:    eor r1, r1, #255
978; ARMEB-NEXT:    str r1, [r0]
979; ARMEB-NEXT:    bx lr
980;
981; THUMB1-LABEL: test8:
982; THUMB1:       @ %bb.0: @ %entry
983; THUMB1-NEXT:    ldrb r1, [r0]
984; THUMB1-NEXT:    movs r2, #255
985; THUMB1-NEXT:    eors r2, r1
986; THUMB1-NEXT:    str r2, [r0]
987; THUMB1-NEXT:    bx lr
988;
989; THUMB2-LABEL: test8:
990; THUMB2:       @ %bb.0: @ %entry
991; THUMB2-NEXT:    ldrb r1, [r0]
992; THUMB2-NEXT:    eor r1, r1, #255
993; THUMB2-NEXT:    str r1, [r0]
994; THUMB2-NEXT:    bx lr
995entry:
996  %0 = load i32, i32* %p, align 4
997  %neg = and i32 %0, 255
998  %and = xor i32 %neg, 255
999  store i32 %and, i32* %p, align 4
1000  ret void
1001}
1002
1003define arm_aapcscc void @test9(i32* nocapture %p) {
1004; ARM-LABEL: test9:
1005; ARM:       @ %bb.0: @ %entry
1006; ARM-NEXT:    ldrb r1, [r0]
1007; ARM-NEXT:    eor r1, r1, #255
1008; ARM-NEXT:    str r1, [r0]
1009; ARM-NEXT:    bx lr
1010;
1011; ARMEB-LABEL: test9:
1012; ARMEB:       @ %bb.0: @ %entry
1013; ARMEB-NEXT:    ldrb r1, [r0, #3]
1014; ARMEB-NEXT:    eor r1, r1, #255
1015; ARMEB-NEXT:    str r1, [r0]
1016; ARMEB-NEXT:    bx lr
1017;
1018; THUMB1-LABEL: test9:
1019; THUMB1:       @ %bb.0: @ %entry
1020; THUMB1-NEXT:    ldrb r1, [r0]
1021; THUMB1-NEXT:    movs r2, #255
1022; THUMB1-NEXT:    eors r2, r1
1023; THUMB1-NEXT:    str r2, [r0]
1024; THUMB1-NEXT:    bx lr
1025;
1026; THUMB2-LABEL: test9:
1027; THUMB2:       @ %bb.0: @ %entry
1028; THUMB2-NEXT:    ldrb r1, [r0]
1029; THUMB2-NEXT:    eor r1, r1, #255
1030; THUMB2-NEXT:    str r1, [r0]
1031; THUMB2-NEXT:    bx lr
1032entry:
1033  %0 = load i32, i32* %p, align 4
1034  %neg = xor i32 %0, -1
1035  %and = and i32 %neg, 255
1036  store i32 %and, i32* %p, align 4
1037  ret void
1038}
1039
1040; ARM-LABEL: test10:
1041; ARM:       @ %bb.0: @ %entry
1042; ARM-NEXT:    ldrb r1, [r0]
1043; ARM-NEXT:    eor r1, r1, #255
1044; ARM-NEXT:    str r1, [r0]
1045; ARM-NEXT:    bx lr
1046;
1047; ARMEB-LABEL: test10:
1048; ARMEB:       @ %bb.0: @ %entry
1049; ARMEB-NEXT:    ldrb r1, [r0, #3]
1050; ARMEB-NEXT:    eor r1, r1, #255
1051; ARMEB-NEXT:    str r1, [r0]
1052; ARMEB-NEXT:    bx lr
1053;
1054; THUMB1-LABEL: test10:
1055; THUMB1:       @ %bb.0: @ %entry
1056; THUMB1-NEXT:    ldrb r1, [r0]
1057; THUMB1-NEXT:    movs r2, #255
1058; THUMB1-NEXT:    eors r2, r1
1059; THUMB1-NEXT:    str r2, [r0]
1060; THUMB1-NEXT:    bx lr
1061;
1062; THUMB2-LABEL: test10:
1063; THUMB2:       @ %bb.0: @ %entry
1064; THUMB2-NEXT:    ldrb r1, [r0]
1065; THUMB2-NEXT:    eor r1, r1, #255
1066; THUMB2-NEXT:    str r1, [r0]
1067; THUMB2-NEXT:    bx lr
1068define arm_aapcscc void @test10(i32* nocapture %p) {
1069entry:
1070  %0 = load i32, i32* %p, align 4
1071  %neg = and i32 %0, 255
1072  %and = xor i32 %neg, 255
1073  store i32 %and, i32* %p, align 4
1074  ret void
1075}
1076
1077