1*eda14cbcSMatt Macy /*
2*eda14cbcSMatt Macy * Copyright (c) 2007 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3*eda14cbcSMatt Macy * All rights reserved.
4*eda14cbcSMatt Macy *
5*eda14cbcSMatt Macy * Redistribution and use in source and binary forms, with or without
6*eda14cbcSMatt Macy * modification, are permitted provided that the following conditions
7*eda14cbcSMatt Macy * are met:
8*eda14cbcSMatt Macy * 1. Redistributions of source code must retain the above copyright
9*eda14cbcSMatt Macy * notice, this list of conditions and the following disclaimer.
10*eda14cbcSMatt Macy * 2. Redistributions in binary form must reproduce the above copyright
11*eda14cbcSMatt Macy * notice, this list of conditions and the following disclaimer in the
12*eda14cbcSMatt Macy * documentation and/or other materials provided with the distribution.
13*eda14cbcSMatt Macy *
14*eda14cbcSMatt Macy * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15*eda14cbcSMatt Macy * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16*eda14cbcSMatt Macy * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17*eda14cbcSMatt Macy * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18*eda14cbcSMatt Macy * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19*eda14cbcSMatt Macy * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20*eda14cbcSMatt Macy * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21*eda14cbcSMatt Macy * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22*eda14cbcSMatt Macy * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23*eda14cbcSMatt Macy * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24*eda14cbcSMatt Macy * SUCH DAMAGE.
25*eda14cbcSMatt Macy */
26*eda14cbcSMatt Macy
27*eda14cbcSMatt Macy #include <sys/param.h>
28*eda14cbcSMatt Macy #include <sys/lock.h>
29*eda14cbcSMatt Macy #include <sys/mutex.h>
30*eda14cbcSMatt Macy #include <sys/atomic.h>
31*eda14cbcSMatt Macy
32*eda14cbcSMatt Macy #if !defined(__LP64__) && !defined(__mips_n32) && \
33*eda14cbcSMatt Macy !defined(ARM_HAVE_ATOMIC64) && !defined(I386_HAVE_ATOMIC64) && \
34*eda14cbcSMatt Macy !defined(HAS_EMULATED_ATOMIC64)
35*eda14cbcSMatt Macy
36*eda14cbcSMatt Macy #ifdef _KERNEL
37*eda14cbcSMatt Macy #include <sys/kernel.h>
38*eda14cbcSMatt Macy
39*eda14cbcSMatt Macy struct mtx atomic_mtx;
40*eda14cbcSMatt Macy MTX_SYSINIT(atomic, &atomic_mtx, "atomic", MTX_DEF);
41*eda14cbcSMatt Macy #else
42*eda14cbcSMatt Macy #include <pthread.h>
43*eda14cbcSMatt Macy
44*eda14cbcSMatt Macy #define mtx_lock(lock) pthread_mutex_lock(lock)
45*eda14cbcSMatt Macy #define mtx_unlock(lock) pthread_mutex_unlock(lock)
46*eda14cbcSMatt Macy
47*eda14cbcSMatt Macy static pthread_mutex_t atomic_mtx;
48*eda14cbcSMatt Macy
49*eda14cbcSMatt Macy static __attribute__((constructor)) void
atomic_init(void)50*eda14cbcSMatt Macy atomic_init(void)
51*eda14cbcSMatt Macy {
52*eda14cbcSMatt Macy pthread_mutex_init(&atomic_mtx, NULL);
53*eda14cbcSMatt Macy }
54*eda14cbcSMatt Macy #endif
55*eda14cbcSMatt Macy
56*eda14cbcSMatt Macy void
atomic_add_64(volatile uint64_t * target,int64_t delta)57*eda14cbcSMatt Macy atomic_add_64(volatile uint64_t *target, int64_t delta)
58*eda14cbcSMatt Macy {
59*eda14cbcSMatt Macy
60*eda14cbcSMatt Macy mtx_lock(&atomic_mtx);
61*eda14cbcSMatt Macy *target += delta;
62*eda14cbcSMatt Macy mtx_unlock(&atomic_mtx);
63*eda14cbcSMatt Macy }
64*eda14cbcSMatt Macy
65*eda14cbcSMatt Macy void
atomic_dec_64(volatile uint64_t * target)66*eda14cbcSMatt Macy atomic_dec_64(volatile uint64_t *target)
67*eda14cbcSMatt Macy {
68*eda14cbcSMatt Macy
69*eda14cbcSMatt Macy mtx_lock(&atomic_mtx);
70*eda14cbcSMatt Macy *target -= 1;
71*eda14cbcSMatt Macy mtx_unlock(&atomic_mtx);
72*eda14cbcSMatt Macy }
73*eda14cbcSMatt Macy
74*eda14cbcSMatt Macy uint64_t
atomic_swap_64(volatile uint64_t * a,uint64_t value)75*eda14cbcSMatt Macy atomic_swap_64(volatile uint64_t *a, uint64_t value)
76*eda14cbcSMatt Macy {
77*eda14cbcSMatt Macy uint64_t ret;
78*eda14cbcSMatt Macy
79*eda14cbcSMatt Macy mtx_lock(&atomic_mtx);
80*eda14cbcSMatt Macy ret = *a;
81*eda14cbcSMatt Macy *a = value;
82*eda14cbcSMatt Macy mtx_unlock(&atomic_mtx);
83*eda14cbcSMatt Macy return (ret);
84*eda14cbcSMatt Macy }
85*eda14cbcSMatt Macy
86*eda14cbcSMatt Macy uint64_t
atomic_load_64(volatile uint64_t * a)87*eda14cbcSMatt Macy atomic_load_64(volatile uint64_t *a)
88*eda14cbcSMatt Macy {
89*eda14cbcSMatt Macy uint64_t ret;
90*eda14cbcSMatt Macy
91*eda14cbcSMatt Macy mtx_lock(&atomic_mtx);
92*eda14cbcSMatt Macy ret = *a;
93*eda14cbcSMatt Macy mtx_unlock(&atomic_mtx);
94*eda14cbcSMatt Macy return (ret);
95*eda14cbcSMatt Macy }
96*eda14cbcSMatt Macy
97*eda14cbcSMatt Macy uint64_t
atomic_add_64_nv(volatile uint64_t * target,int64_t delta)98*eda14cbcSMatt Macy atomic_add_64_nv(volatile uint64_t *target, int64_t delta)
99*eda14cbcSMatt Macy {
100*eda14cbcSMatt Macy uint64_t newval;
101*eda14cbcSMatt Macy
102*eda14cbcSMatt Macy mtx_lock(&atomic_mtx);
103*eda14cbcSMatt Macy newval = (*target += delta);
104*eda14cbcSMatt Macy mtx_unlock(&atomic_mtx);
105*eda14cbcSMatt Macy return (newval);
106*eda14cbcSMatt Macy }
107*eda14cbcSMatt Macy
108*eda14cbcSMatt Macy uint64_t
atomic_cas_64(volatile uint64_t * target,uint64_t cmp,uint64_t newval)109*eda14cbcSMatt Macy atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval)
110*eda14cbcSMatt Macy {
111*eda14cbcSMatt Macy uint64_t oldval;
112*eda14cbcSMatt Macy
113*eda14cbcSMatt Macy mtx_lock(&atomic_mtx);
114*eda14cbcSMatt Macy oldval = *target;
115*eda14cbcSMatt Macy if (oldval == cmp)
116*eda14cbcSMatt Macy *target = newval;
117*eda14cbcSMatt Macy mtx_unlock(&atomic_mtx);
118*eda14cbcSMatt Macy return (oldval);
119*eda14cbcSMatt Macy }
120*eda14cbcSMatt Macy #endif
121