/netbsd-src/sys/kern/ |
H A D | subr_thmap.c | 296 return (atomic_load_relaxed(&node->state) & NODE_LOCKED) != 0; in node_locked_p() 305 s = atomic_load_relaxed(&node->state); in lock_node() 321 uint32_t s = atomic_load_relaxed(&node->state) & ~NODE_LOCKED; in unlock_node() 452 ASSERT((atomic_load_relaxed(&node->state) & NODE_DELETED) == 0); in node_insert() 453 ASSERT(atomic_load_relaxed(&node->slots[slot]) == THMAP_NULL); in node_insert() 455 ASSERT(NODE_COUNT(atomic_load_relaxed(&node->state)) < LEVEL_SIZE); in node_insert() 464 atomic_load_relaxed(&node->state) + 1); in node_insert() 471 ASSERT((atomic_load_relaxed(&node->state) & NODE_DELETED) == 0); in node_remove() 472 ASSERT(atomic_load_relaxed(&node->slots[slot]) != THMAP_NULL); in node_remove() 474 ASSERT(NODE_COUNT(atomic_load_relaxed(&node->state)) > 0); in node_remove() [all …]
|
H A D | kern_heartbeat.c | 232 return atomic_load_relaxed(&ci->ci_heartbeat_suspend); in heartbeat_timecounter_suspended() 408 unsigned count = atomic_load_relaxed(&curcpu()->ci_heartbeat_count); in heartbeat_intr() 483 if (atomic_load_relaxed(&panicstr) != NULL) 530 if (atomic_load_relaxed(&ack)) in defibrillate() 565 if (atomic_load_relaxed(&ci->ci_heartbeat_suspend)) in select_patient() 643 period_ticks = atomic_load_relaxed(&heartbeat_max_period_ticks); in heartbeat() 644 period_secs = atomic_load_relaxed(&heartbeat_max_period_secs); in heartbeat() 663 cache = atomic_load_relaxed(&curcpu()->ci_heartbeat_uptime_cache); in heartbeat() 677 atomic_load_relaxed(&curcpu()->ci_heartbeat_uptime_stamp); in heartbeat() 734 d = uptime - atomic_load_relaxed( [all...] |
H A D | subr_fault.c | 89 if (atomic_load_relaxed(&f->oneshot)) { in fault_inject() 90 if (__predict_true(atomic_load_relaxed(&f->nfaults) > 0)) in fault_inject() 95 if (__predict_false(cnt % atomic_load_relaxed(&f->nth) == 0)) { in fault_inject() 200 args->nfaults = atomic_load_relaxed(&fault_global.nfaults); in fault_ioc_getinfo() 206 args->nfaults = atomic_load_relaxed(&f->nfaults); in fault_ioc_getinfo()
|
H A D | subr_pcq.c | 187 v = atomic_load_relaxed(&pcq->pcq_pc); in pcq_put() 219 const uint32_t v = atomic_load_relaxed(&pcq->pcq_pc); in pcq_peek() 240 v = atomic_load_relaxed(&pcq->pcq_pc); in pcq_get() 278 v = atomic_load_relaxed(&pcq->pcq_pc); in pcq_get()
|
H A D | kern_entropy.c | 697 return atomic_load_relaxed(&E->epoch); in entropy_epoch() 709 return atomic_load_relaxed(&E->bitsneeded) == 0; in entropy_ready() 744 if (__predict_true(atomic_load_relaxed(&E->bitsneeded) == 0) && in entropy_account_cpu() 745 __predict_true(!atomic_load_relaxed(&entropy_depletion)) && in entropy_account_cpu() 1060 if (__predict_false(atomic_load_relaxed(&E->bitsneeded) || in entropy_enter_intr() 1061 atomic_load_relaxed(&entropy_depletion)) && in entropy_enter_intr() 1178 cpu_bitspending = atomic_load_relaxed(&ec->ec_bitspending); in entropy_pending_cpu() 1179 cpu_samplespending = atomic_load_relaxed(&ec->ec_samplespending); in entropy_pending_cpu() 1335 if (__predict_true(!atomic_load_relaxed(&entropy_depletion)) || in entropy_notify() 1606 if (__predict_false(atomic_load_relaxed( in entropy_extract() [all...] |
/netbsd-src/sys/net/npf/ |
H A D | npf_conn.c | 190 KASSERT(atomic_load_relaxed(&npf->conn_tracking) == CONN_TRACKING_OFF); in npf_conn_fini() 221 KASSERT(atomic_load_relaxed(&npf->conn_tracking) in npf_conn_load() 223 odb = atomic_load_relaxed(&npf->conn_db); in npf_conn_load() 265 if (atomic_load_relaxed(&npf->conn_tracking) != CONN_TRACKING_ON) { in npf_conn_trackable_p() 298 const uint32_t flags = atomic_load_relaxed(&con->c_flags); in npf_conn_check() 299 const unsigned ifid = atomic_load_relaxed(&con->c_ifid); in npf_conn_check() 341 KASSERT(npc->npc_proto == atomic_load_relaxed(&con->c_proto)); in npf_conn_lookup() 406 if (atomic_load_relaxed(&con->c_flags) & CONN_GPASS) { in npf_conn_inspect() 532 KASSERT(atomic_load_relaxed(&con->c_refcnt) == 0); in npf_conn_destroy() 576 KASSERT(atomic_load_relaxed(&con->c_refcnt) > 0); in npf_conn_setnat() [all …]
|
H A D | npf_alg.c | 230 count = atomic_load_relaxed(&aset->alg_count); in npf_alg_match() 235 match_func = atomic_load_relaxed(&f->match); in npf_alg_match() 263 count = atomic_load_relaxed(&aset->alg_count); in npf_alg_exec() 268 translate_func = atomic_load_relaxed(&f->translate); in npf_alg_exec() 303 count = atomic_load_relaxed(&aset->alg_count); in npf_alg_conn() 308 inspect_func = atomic_load_relaxed(&f->inspect); in npf_alg_conn() 327 if ((destroy_func = atomic_load_relaxed(&f->destroy)) != NULL) { in npf_alg_destroy()
|
H A D | npf_rproc.c | 158 if (atomic_load_relaxed(&ext->ext_refcnt)) { in npf_ext_unregister() 163 if (atomic_load_relaxed(&ext->ext_refcnt)) { in npf_ext_unregister() 331 KASSERT(atomic_load_relaxed(&rp->rp_refcnt) > 0); in npf_rproc_release() 372 KASSERT(atomic_load_relaxed(&rp->rp_refcnt) > 0); in npf_rproc_run() 378 KASSERT(atomic_load_relaxed(&ext->ext_refcnt) > 0); in npf_rproc_run()
|
H A D | npf_conndb.c | 166 npf_conndb_t *cd = atomic_load_relaxed(&npf->conn_db); in npf_conndb_lookup() 251 head = atomic_load_relaxed(&cd->cd_new); in npf_conndb_enqueue() 268 npf_conn_t *next = atomic_load_relaxed(&con->c_next); // union in npf_conndb_update() 443 const unsigned refcnt = atomic_load_relaxed(&con->c_refcnt); in npf_conndb_gc()
|
/netbsd-src/sys/external/bsd/common/linux/ |
H A D | linux_tasklet.c | 227 KASSERT(atomic_load_relaxed(&tasklet->tl_state) & in tasklet_softintr() 261 KASSERT(atomic_load_relaxed(&tasklet->tl_state) & in tasklet_softintr() 286 ostate = atomic_load_relaxed(&tasklet->tl_state); in tasklet_queue_schedule() 312 KASSERT(atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED); in tasklet_queue_enqueue() 459 while (atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED) in tasklet_kill() 492 return atomic_load_relaxed(&tasklet->tl_state) & TASKLET_RUNNING; in tasklet_is_locked() 509 state = atomic_load_relaxed(&tasklet->tl_state); in tasklet_trylock() 532 KASSERT(atomic_load_relaxed(&tasklet->tl_state) & TASKLET_RUNNING); in tasklet_unlock() 635 disablecount = atomic_load_relaxed(&tasklet->tl_disablecount); in __tasklet_is_enabled() 651 return atomic_load_relaxed(&tasklet->tl_state) & TASKLET_SCHEDULED; in __tasklet_is_scheduled()
|
/netbsd-src/sys/external/bsd/drm2/include/linux/ |
H A D | kref.h | 69 count = atomic_load_relaxed(&kref->kr_count); in kref_get_unless_zero() 86 old = atomic_load_relaxed(&kref->kr_count); in kref_sub() 110 old = atomic_load_relaxed(&kref->kr_count); in kref_put_lock() 144 old = atomic_load_relaxed(&kref->kr_count); in kref_put_mutex() 166 return atomic_load_relaxed(&kref->kr_count); in kref_read()
|
/netbsd-src/external/mpl/dhcp/bind/dist/lib/isc/ |
H A D | quota.c | 70 return (atomic_load_relaxed("a->max)); in isc_quota_getmax() 76 return (atomic_load_relaxed("a->soft)); in isc_quota_getsoft() 82 return (atomic_load_relaxed("a->used)); in isc_quota_getused() 135 if (atomic_load_relaxed("a->waiting) > 0) { in quota_release()
|
/netbsd-src/external/mpl/bind/dist/lib/isc/ |
H A D | quota.c | 53 return atomic_load_relaxed("a->max); in isc_quota_destroy() 59 return atomic_load_relaxed("a->soft); in isc_quota_soft() 65 return atomic_load_relaxed("a->used); in isc_quota_max() 94 uint_fast32_t max = atomic_load_relaxed("a->max); in quota_reserve() 112 uint_fast32_t soft = atomic_load_relaxed("a->soft); in enqueue()
|
/netbsd-src/sys/external/bsd/compiler_rt/dist/lib/tsan/rtl/ |
H A D | tsan_sync.h | 88 return atomic_load_relaxed(&flags) & f; in IsFlagSet() 92 atomic_store_relaxed(&flags, atomic_load_relaxed(&flags) | f); in SetFlags() 99 u32 current = atomic_load_relaxed(&flags); in UpdateFlags()
|
/netbsd-src/external/gpl3/gcc.old/dist/libsanitizer/tsan/ |
H A D | tsan_sync.h | 86 return atomic_load_relaxed(&flags) & f; in IsFlagSet() 90 atomic_store_relaxed(&flags, atomic_load_relaxed(&flags) | f); in SetFlags() 97 u32 current = atomic_load_relaxed(&flags); in UpdateFlags()
|
/netbsd-src/external/gpl3/gcc/dist/libsanitizer/tsan/ |
H A D | tsan_sync.h | 86 return atomic_load_relaxed(&flags) & f; in IsFlagSet() 90 atomic_store_relaxed(&flags, atomic_load_relaxed(&flags) | f); in SetFlags() 97 u32 current = atomic_load_relaxed(&flags); in UpdateFlags()
|
/netbsd-src/external/mpl/bind/dist/lib/dns/ |
H A D | badcache.c |
|
/netbsd-src/external/mpl/dhcp/bind/dist/lib/dns/ |
H A D | badcache.c | 141 if (atomic_load_relaxed(&bc->count) > bc->size * 8) { in badcache_resize() 143 } else if (atomic_load_relaxed(&bc->count) < bc->size * 2 && in badcache_resize() 186 for (i = 0; atomic_load_relaxed(&bc->count) > 0 && i < bc->size; i++) { in badcache_resize() 314 if (atomic_load_relaxed(&bc->count) == 0) { in dns_badcache_find() 377 for (i = 0; atomic_load_relaxed(&bc->count) > 0 && i < bc->size; i++) { in dns_badcache_flush() 453 for (i = 0; atomic_load_relaxed(&bc->count) > 0 && i < bc->size; i++) { in dns_badcache_flushtree() 498 for (i = 0; atomic_load_relaxed(&bc->count) > 0 && i < bc->size; i++) { in dns_badcache_print()
|
/netbsd-src/sys/external/bsd/drm2/linux/ |
H A D | linux_dma_resv.c | 484 shared_count = list ? atomic_load_relaxed(&list->shared_count) : 0; in dma_resv_get_shared_reader() 842 shared[i] = atomic_load_relaxed(&list->shared[i]); in dma_resv_get_fences_rcu() 858 if (dma_fence_get_rcu(atomic_load_relaxed(&shared[i])) == NULL) in dma_resv_get_fences_rcu() 951 fence = atomic_load_relaxed(&src_list->shared[i]); in dma_resv_copy_fences() 1077 fence = atomic_load_relaxed(&list->shared[i]); in dma_resv_test_signaled_rcu() 1167 fence = atomic_load_relaxed(&list->shared[i]); in dma_resv_wait_timeout_rcu() 1333 fence = atomic_load_relaxed(&list->shared[i]); in dma_resv_do_poll() 1372 fence = atomic_load_relaxed(&list->shared[i]); in dma_resv_do_poll()
|
/netbsd-src/external/gpl3/gcc/dist/libsanitizer/sanitizer_common/ |
H A D | sanitizer_quarantine.h | 97 uptr GetSize() const { return atomic_load_relaxed(&max_size_); } in GetSize() 99 return atomic_load_relaxed(&max_cache_size_); in GetCacheSize() 121 Recycle(atomic_load_relaxed(&min_size_), cb); in Drain() 214 return atomic_load_relaxed(&size_); in Size()
|
H A D | sanitizer_mutex.h | 167 u64 state = atomic_load_relaxed(&state_); in Lock() 214 u64 state = atomic_load_relaxed(&state_); in Unlock() 240 u64 state = atomic_load_relaxed(&state_); in ReadLock() 278 u64 state = atomic_load_relaxed(&state_); in ReadUnlock()
|
/netbsd-src/sys/external/bsd/compiler_rt/dist/lib/sanitizer_common/ |
H A D | sanitizer_quarantine.h | 98 uptr GetSize() const { return atomic_load_relaxed(&max_size_); } in GetSize() 100 return atomic_load_relaxed(&max_cache_size_); in GetCacheSize() 122 Recycle(atomic_load_relaxed(&min_size_), cb); in Drain() 214 return atomic_load_relaxed(&size_); in Size()
|
/netbsd-src/external/gpl3/gcc.old/dist/libsanitizer/sanitizer_common/ |
H A D | sanitizer_quarantine.h | 96 uptr GetSize() const { return atomic_load_relaxed(&max_size_); } in GetSize() 98 return atomic_load_relaxed(&max_cache_size_); in GetCacheSize() 120 Recycle(atomic_load_relaxed(&min_size_), cb); in Drain() 212 return atomic_load_relaxed(&size_); in Size()
|
/netbsd-src/sys/arch/x86/x86/ |
H A D | x86_tlb.c | 95 #define TP_GET_DONE(tp) (atomic_load_relaxed(&(tp)->tp_store[TP_DONE]) & 1) 106 uintptr_t v = atomic_load_relaxed(&(tp)->tp_store[TP_DONE]); \ 415 KASSERT(atomic_load_relaxed(&pmap_tlb_packet) != ts); in pmap_tlb_shootnow() 425 } while (atomic_load_relaxed(&pmap_tlb_packet) != NULL); in pmap_tlb_shootnow()
|
/netbsd-src/sys/dev/pci/ixgbe/ |
H A D | ixgbe_netbsd.h | 61 atomic_load_relaxed(&((evp)->ev_count)) 66 atomic_load_relaxed(&((evp)->ev_count)) + (val))
|