xref: /netbsd-src/sys/external/bsd/drm2/linux/linux_atomic64.c (revision b12f5a54412d8c7b2407b02b130ba3b4ccd6b7c2)
1 /*	$NetBSD: linux_atomic64.c,v 1.3 2018/08/27 15:11:17 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_atomic64.c,v 1.3 2018/08/27 15:11:17 riastradh Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/bitops.h>
37 #include <sys/lock.h>
38 
39 #include <linux/atomic.h>
40 
41 #ifdef __HAVE_ATOMIC64_OPS
42 
43 int
linux_atomic64_init(void)44 linux_atomic64_init(void)
45 {
46 	return 0;
47 }
48 
49 void
linux_atomic64_fini(void)50 linux_atomic64_fini(void)
51 {
52 }
53 
54 #else
55 
56 static struct {
57 	kmutex_t	lock;
58 	uint32_t	gen;	/* for unlocked read */
59 	char		pad[CACHE_LINE_SIZE -
60 			    sizeof(kmutex_t) - sizeof(uint32_t)];
61 } atomic64_tab[PAGE_SIZE/CACHE_LINE_SIZE] __cacheline_aligned;
62 CTASSERT(sizeof(atomic64_tab) == PAGE_SIZE);
63 CTASSERT(sizeof(atomic64_tab[0]) == CACHE_LINE_SIZE);
64 
65 int
linux_atomic64_init(void)66 linux_atomic64_init(void)
67 {
68 	size_t i;
69 
70 	for (i = 0; i < __arraycount(atomic64_tab); i++) {
71 		mutex_init(&atomic64_tab[i].lock, MUTEX_DEFAULT, IPL_HIGH);
72 		atomic64_tab[i].gen = 0;
73 	}
74 
75 	return 0;
76 }
77 
78 void
linux_atomic64_fini(void)79 linux_atomic64_fini(void)
80 {
81 	size_t i;
82 
83 	for (i = 0; i < __arraycount(atomic64_tab); i++) {
84 		KASSERT((atomic64_tab[i].gen & 1) == 0);
85 		mutex_destroy(&atomic64_tab[i].lock);
86 	}
87 }
88 
89 static inline size_t
atomic64_hash(const struct atomic64 * a)90 atomic64_hash(const struct atomic64 *a)
91 {
92 
93 	return ((uintptr_t)a >> ilog2(CACHE_LINE_SIZE)) %
94 	    __arraycount(atomic64_tab);
95 }
96 
97 static void
atomic64_lock(struct atomic64 * a)98 atomic64_lock(struct atomic64 *a)
99 {
100 	size_t i = atomic64_hash(a);
101 
102 	mutex_spin_enter(&atomic64_tab[i].lock);
103 	KASSERT((atomic64_tab[i].gen & 1) == 0);
104 	atomic64_tab[i].gen |= 1;
105 	membar_producer();
106 }
107 
108 static void
atomic64_unlock(struct atomic64 * a)109 atomic64_unlock(struct atomic64 *a)
110 {
111 	size_t i = atomic64_hash(a);
112 
113 	KASSERT(mutex_owned(&atomic64_tab[i].lock));
114 	KASSERT((atomic64_tab[i].gen & 1) == 1);
115 
116 	membar_producer();
117 	atomic64_tab[i].gen |= 1; /* paranoia */
118 	atomic64_tab[i].gen++;
119 	mutex_spin_exit(&atomic64_tab[i].lock);
120 }
121 
122 uint64_t
atomic64_read(const struct atomic64 * a)123 atomic64_read(const struct atomic64 *a)
124 {
125 	size_t i = atomic64_hash(a);
126 	uint32_t gen;
127 	uint64_t value;
128 
129 	do {
130 		while (__predict_false((gen = atomic64_tab[i].gen) & 1))
131 			SPINLOCK_BACKOFF_HOOK;
132 		membar_consumer();
133 		value = a->a_v;
134 		membar_consumer();
135 	} while (__predict_false(atomic64_tab[i].gen != gen));
136 
137 	return value;
138 }
139 
140 void
atomic64_set(struct atomic64 * a,uint64_t value)141 atomic64_set(struct atomic64 *a, uint64_t value)
142 {
143 
144 	atomic64_lock(a);
145 	a->a_v = value;
146 	atomic64_unlock(a);
147 }
148 
149 void
atomic64_add(int64_t delta,struct atomic64 * a)150 atomic64_add(int64_t delta, struct atomic64 *a)
151 {
152 
153 	atomic64_lock(a);
154 	a->a_v += delta;
155 	atomic64_unlock(a);
156 }
157 
158 void
atomic64_sub(int64_t delta,struct atomic64 * a)159 atomic64_sub(int64_t delta, struct atomic64 *a)
160 {
161 
162 	atomic64_lock(a);
163 	a->a_v -= delta;
164 	atomic64_unlock(a);
165 }
166 
167 int64_t
atomic64_add_return(int64_t delta,struct atomic64 * a)168 atomic64_add_return(int64_t delta, struct atomic64 *a)
169 {
170 	int64_t v;
171 
172 	atomic64_lock(a);
173 	v = (int64_t)(a->a_v += delta);
174 	atomic64_unlock(a);
175 
176 	return v;
177 }
178 
179 uint64_t
atomic64_xchg(struct atomic64 * a,uint64_t new)180 atomic64_xchg(struct atomic64 *a, uint64_t new)
181 {
182 	uint64_t old;
183 
184 	atomic64_lock(a);
185 	old = a->a_v;
186 	a->a_v = new;
187 	atomic64_unlock(a);
188 
189 	return old;
190 }
191 
192 uint64_t
atomic64_cmpxchg(struct atomic64 * a,uint64_t expect,uint64_t new)193 atomic64_cmpxchg(struct atomic64 *a, uint64_t expect, uint64_t new)
194 {
195 	uint64_t old;
196 
197 	atomic64_lock(a);
198 	old = a->a_v;
199 	if (old == expect)
200 		a->a_v = new;
201 	atomic64_unlock(a);
202 
203 	return old;
204 }
205 
206 #endif
207