1 /* $NetBSD: refcount.h,v 1.3 2021/12/19 11:52:08 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #ifndef _LINUX_REFCOUNT_H_
30 #define _LINUX_REFCOUNT_H_
31
32 #include <linux/atomic.h>
33 #include <linux/mutex.h>
34 #include <linux/spinlock.h>
35
36 typedef struct refcount refcount_t;
37
38 struct refcount {
39 atomic_t rc_count;
40 };
41
42 static inline void
refcount_set(struct refcount * rc,int n)43 refcount_set(struct refcount *rc, int n)
44 {
45 atomic_set(&rc->rc_count, n);
46 }
47
48 static inline void
refcount_inc(struct refcount * rc)49 refcount_inc(struct refcount *rc)
50 {
51 atomic_inc(&rc->rc_count);
52 }
53
54 static inline int __must_check
refcount_inc_not_zero(struct refcount * rc)55 refcount_inc_not_zero(struct refcount *rc)
56 {
57 unsigned old, new;
58
59 do {
60 old = atomic_read(&rc->rc_count);
61 if (old == 0)
62 break;
63 new = old + 1;
64 } while (atomic_cmpxchg(&rc->rc_count, old, new) != old);
65
66 return old;
67 }
68
69 static inline bool __must_check
refcount_dec_and_test(struct refcount * rc)70 refcount_dec_and_test(struct refcount *rc)
71 {
72 unsigned old, new;
73
74 do {
75 old = atomic_read(&rc->rc_count);
76 KASSERT(old);
77 new = old - 1;
78 } while (atomic_cmpxchg(&rc->rc_count, old, new) != old);
79
80 return old == 1;
81 }
82
83 static inline bool __must_check
refcount_dec_and_lock_irqsave(struct refcount * rc,struct spinlock * lock,unsigned long * flagsp)84 refcount_dec_and_lock_irqsave(struct refcount *rc, struct spinlock *lock,
85 unsigned long *flagsp)
86 {
87
88 return atomic_dec_and_lock_irqsave(&rc->rc_count, lock, *flagsp);
89 }
90
91 static inline bool __must_check
refcount_dec_and_mutex_lock(struct refcount * rc,struct mutex * lock)92 refcount_dec_and_mutex_lock(struct refcount *rc, struct mutex *lock)
93 {
94 unsigned old, new;
95
96 do {
97 old = atomic_read(&rc->rc_count);
98 KASSERT(old);
99 if (old == 1) {
100 mutex_lock(lock);
101 if (atomic_dec_return(&rc->rc_count) == 0)
102 return true;
103 mutex_unlock(lock);
104 return false;
105 }
106 new = old - 1;
107 } while (atomic_cmpxchg(&rc->rc_count, old, new) != old);
108
109 KASSERT(old != 1);
110 KASSERT(new != 0);
111 return false;
112 }
113
114 #endif /* _LINUX_REFCOUNT_H_ */
115