Lines Matching refs:unordered
9 %r = load atomic i8, ptr %p1 unordered, align 1
14 %r = load atomic i16, ptr %p1 unordered, align 2
19 %r = load atomic i32, ptr %p1 unordered, align 4
24 %r = load atomic i64, ptr %p1 unordered, align 8
29 %r = load atomic float, ptr %p1 unordered, align 4
34 %r = load atomic float, ptr %p1 unordered, align 8
39 %r = load atomic double, ptr %p1 unordered, align 8
44 %r = load atomic double, ptr %p1 unordered, align 8
49 store atomic i32 %val, ptr %p1 unordered, align 4
54 store atomic i64 %val, ptr %p1 unordered, align 8
59 store atomic float %val, ptr %p1 unordered, align 4
64 store atomic float %val, ptr %p1 unordered, align 4
69 store atomic double %val, ptr %p1 unordered, align 8
74 store atomic double %val, ptr %p1 unordered, align 8
79 %p = load atomic ptr, ptr %ptr1 unordered, align 8
84 store atomic ptr %a, ptr %ptr1 unordered, align 8
90 store atomic i32 %val, ptr %arrayidx unordered, align 8
91 %r = load atomic i32, ptr %arrayidx unordered, align 8
97 store atomic i32 %val, ptr %arrayidx unordered, align 8
98 %r = load atomic i32, ptr %arrayidx unordered, align 8
116 …; SSE: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s8) from…
121 …; AVX: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s8) from…
126 …; AVX512F: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s8) …
131 …; AVX512VL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s8)…
135 %1(s8) = G_LOAD %0(p0) :: (load unordered (s8) from %ir.p1)
154 …; SSE: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s16) …
159 …; AVX: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s16) …
164 …; AVX512F: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s…
169 …OV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s16) from %ir.p…
173 %1(s16) = G_LOAD %0(p0) :: (load unordered (s16) from %ir.p1)
192 …; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s32) …
197 …; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s32) …
202 …; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s…
207 …OV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s32) from %ir.p…
211 %1(s32) = G_LOAD %0(p0) :: (load unordered (s32) from %ir.p1)
230 …; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s64) …
235 …; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s64) …
240 …; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s…
245 …OV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s64) from %ir.p…
249 %1(s64) = G_LOAD %0(p0) :: (load unordered (s64) from %ir.p1)
270 …; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s32) …
277 …; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s32) …
284 …; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s…
291 …OV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s32) from %ir.p…
297 %1:gpr(s32) = G_LOAD %0(p0) :: (load unordered (s32) from %ir.p1)
320 …; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s32) …
327 …; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s32) …
334 …; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s…
341 …OV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s32) from %ir.p…
347 %1:gpr(s32) = G_LOAD %0(p0) :: (load unordered (s32) from %ir.p1)
370 …; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s64) …
377 …; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s64) …
384 …; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s…
391 …OV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s64) from %ir.p…
397 %1:gpr(s64) = G_LOAD %0(p0) :: (load unordered (s64) from %ir.p1)
420 …; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s64) …
427 …; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s64) …
434 …; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s…
441 …OV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s64) from %ir.p…
447 %1:gpr(s64) = G_LOAD %0(p0) :: (load unordered (s64) from %ir.p1)
469 ; SSE: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store unordered (s32) into %ir.p1)
475 ; AVX: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store unordered (s32) into %ir.p1)
481 …; AVX512F: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store unordered (s32) into %ir.p1)
487 …; AVX512VL: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store unordered (s32) into %ir.p…
492 G_STORE %0(s32), %1(p0) :: (store unordered (s32) into %ir.p1)
512 ; SSE: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store unordered (s64) into %ir.p1)
518 ; AVX: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store unordered (s64) into %ir.p1)
524 …; AVX512F: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store unordered (s64) into %ir.p1)
530 …; AVX512VL: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store unordered (s64) into %ir.p…
535 G_STORE %0(s64), %1(p0) :: (store unordered (s64) into %ir.p1)
559 ; SSE: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s32) into %ir.p1)
567 ; AVX: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s32) into %ir.p1)
575 …; AVX512F: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s32) into %ir.p…
583 …; AVX512VL: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s32) into %ir.…
590 G_STORE %3(s32), %1(p0) :: (store unordered (s32) into %ir.p1)
614 ; SSE: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s32) into %ir.p1)
622 ; AVX: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s32) into %ir.p1)
630 …; AVX512F: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s32) into %ir.p…
638 …; AVX512VL: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s32) into %ir.…
645 G_STORE %3(s32), %1(p0) :: (store unordered (s32) into %ir.p1)
670 ; SSE: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s64) into %ir.p1)
678 ; AVX: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s64) into %ir.p1)
686 …; AVX512F: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s64) into %ir.p…
694 …; AVX512VL: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s64) into %ir.…
701 G_STORE %3(s64), %1(p0) :: (store unordered (s64) into %ir.p1)
725 ; SSE: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s64) into %ir.p1)
733 ; AVX: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s64) into %ir.p1)
741 …; AVX512F: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s64) into %ir.p…
749 …; AVX512VL: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s64) into %ir.…
756 G_STORE %3(s64), %1(p0) :: (store unordered (s64) into %ir.p1)
776 …; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (p0) f…
781 …; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (p0) f…
786 …; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (p…
791 …OV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (p0) from %ir.pt…
795 %1(p0) = G_LOAD %0(p0) :: (load unordered (p0) from %ir.ptr1)
816 ; SSE: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store unordered (p0) into %ir.ptr1)
821 ; AVX: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store unordered (p0) into %ir.ptr1)
826 …; AVX512F: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store unordered (p0) into %ir.ptr…
831 …; AVX512VL: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store unordered (p0) into %ir.pt…
835 G_STORE %1(p0), %0(p0) :: (store unordered (p0) into %ir.ptr1)
857 …; SSE: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store unordered (s32) into %ir.array…
858 …; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load unordered (s32)…
864 …; AVX: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store unordered (s32) into %ir.array…
865 …; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load unordered (s32)…
871 …; AVX512F: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store unordered (s32) into %ir.a…
872 …V32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load unordered (s32) from %ir.a…
878 …; AVX512VL: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store unordered (s32) into %ir.…
879 …V32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load unordered (s32) from %ir.a…
886 G_STORE %1(s32), %3(p0) :: (store unordered (s32) into %ir.arrayidx)
887 %4(s32) = G_LOAD %3(p0) :: (load unordered (s32) from %ir.arrayidx)
912 …; SSE: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store unordered (s32) into %ir.arra…
913 …; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load unordered (s32…
921 …; AVX: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store unordered (s32) into %ir.arra…
922 …; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load unordered (s32…
930 …; AVX512F: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store unordered (s32) into %ir.…
931 …32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load unordered (s32) from %ir.a…
939 …; AVX512VL: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store unordered (s32) into %ir…
940 …32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load unordered (s32) from %ir.a…
947 G_STORE %1(s32), %3(p0) :: (store unordered (s32) into %ir.arrayidx)
948 %4(s32) = G_LOAD %3(p0) :: (load unordered (s32) from %ir.arrayidx)