Lines Matching +full:non +full:- +full:volatile

14  *      - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
41 are looking for barriers to use with cache-coherent multi-threaded
47 - CPU attached address space (the CPU memory could be a range of things:
48 cached/uncached/non-temporal CPU DRAM, uncached MMIO space in another
53 - A DMA initiator on a bus. For instance a PCI-E device issuing
57 happens if a MemRd TLP is sent in via PCI-E relative to a CPU WRITE to the
80 memory types or non-temporal stores are required to use SFENCE in their own
84 #define udma_to_device_barrier() asm volatile("" ::: "memory")
86 #define udma_to_device_barrier() asm volatile("" ::: "memory")
88 #define udma_to_device_barrier() asm volatile("sync" ::: "memory")
90 #define udma_to_device_barrier() asm volatile("sync" ::: "memory")
92 #define udma_to_device_barrier() asm volatile("mf" ::: "memory")
94 #define udma_to_device_barrier() asm volatile("membar #StoreStore" ::: "memory")
96 #define udma_to_device_barrier() asm volatile("dsb st" ::: "memory");
98 #define udma_to_device_barrier() asm volatile("" ::: "memory")
117 from the device - eg by reading a MMIO register or seeing that CPU memory is
128 #define udma_from_device_barrier() asm volatile("lock; addl $0,0(%%esp) " ::: "memory")
130 #define udma_from_device_barrier() asm volatile("lfence" ::: "memory")
132 #define udma_from_device_barrier() asm volatile("lwsync" ::: "memory")
134 #define udma_from_device_barrier() asm volatile("sync" ::: "memory")
136 #define udma_from_device_barrier() asm volatile("mf" ::: "memory")
138 #define udma_from_device_barrier() asm volatile("membar #LoadLoad" ::: "memory")
140 #define udma_from_device_barrier() asm volatile("dsb ld" ::: "memory");
142 #define udma_from_device_barrier() asm volatile("" ::: "memory")
167 wqe->addr = ...;
168 wqe->flags = ...;
170 wqe->valid = 1;
195 PCI-E MemWr TLPs from the CPU.
198 #define mmio_flush_writes() asm volatile("lock; addl $0,0(%%esp) " ::: "memory")
200 #define mmio_flush_writes() asm volatile("sfence" ::: "memory")
202 #define mmio_flush_writes() asm volatile("sync" ::: "memory")
204 #define mmio_flush_writes() asm volatile("sync" ::: "memory")
206 #define mmio_flush_writes() asm volatile("fwb" ::: "memory")
208 #define mmio_flush_writes() asm volatile("membar #StoreStore" ::: "memory")
210 #define mmio_flush_writes() asm volatile("dsb st" ::: "memory");
212 #define mmio_flush_writes() asm volatile("" ::: "memory")
223 /* Prevent WC writes from being re-ordered relative to other MMIO
226 This must act as a barrier to prevent write re-ordering from different
236 PCI-E MemWr TLPs from the CPU.
244 volatile.
254 Any access to a multi-value WC region must ensure that multiple cpus do not
279 * to force-flush the WC buffers quickly, and this SFENCE can be in mmio_wc_spinunlock()