1 /* $OpenBSD: kref.h,v 1.5 2023/01/01 01:34:58 jsg Exp $ */ 2 /* 3 * Copyright (c) 2015 Mark Kettenis 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #ifndef _LINUX_KREF_H 19 #define _LINUX_KREF_H 20 21 #include <sys/types.h> 22 #include <sys/rwlock.h> 23 #include <sys/atomic.h> 24 #include <linux/atomic.h> 25 #include <linux/compiler.h> 26 #include <linux/refcount.h> 27 #include <linux/spinlock.h> 28 29 struct kref { 30 uint32_t refcount; 31 }; 32 33 static inline void 34 kref_init(struct kref *ref) 35 { 36 atomic_set(&ref->refcount, 1); 37 } 38 39 static inline unsigned int 40 kref_read(const struct kref *ref) 41 { 42 return atomic_read(&ref->refcount); 43 } 44 45 static inline void 46 kref_get(struct kref *ref) 47 { 48 atomic_inc_int(&ref->refcount); 49 } 50 51 static inline int 52 kref_get_unless_zero(struct kref *ref) 53 { 54 if (ref->refcount != 0) { 55 atomic_inc_int(&ref->refcount); 56 return (1); 57 } else { 58 return (0); 59 } 60 } 61 62 static inline int 63 kref_put(struct kref *ref, void (*release)(struct kref *ref)) 64 { 65 if (atomic_dec_int_nv(&ref->refcount) == 0) { 66 release(ref); 67 return 1; 68 } 69 return 0; 70 } 71 72 static inline int 73 kref_put_mutex(struct kref *kref, void (*release)(struct kref *kref), 74 struct rwlock *lock) 75 { 76 if (!atomic_add_unless(&kref->refcount, -1, 1)) { 77 rw_enter_write(lock); 78 if (likely(atomic_dec_and_test(&kref->refcount))) { 79 release(kref); 80 return 1; 81 } 82 rw_exit_write(lock); 83 return 0; 84 } 85 86 return 0; 87 } 88 89 static inline int 90 kref_put_lock(struct kref *kref, void (*release)(struct kref *kref), 91 struct mutex *lock) 92 { 93 if (!atomic_add_unless(&kref->refcount, -1, 1)) { 94 mtx_enter(lock); 95 if (likely(atomic_dec_and_test(&kref->refcount))) { 96 release(kref); 97 return 1; 98 } 99 mtx_leave(lock); 100 return 0; 101 } 102 103 return 0; 104 } 105 106 #endif 107