1 /* Public domain. */ 2 3 #ifndef _LINUX_SPINLOCK_H 4 #define _LINUX_SPINLOCK_H 5 6 #include <linux/kernel.h> 7 #include <linux/spinlock_types.h> 8 #include <linux/preempt.h> 9 #include <linux/bottom_half.h> 10 11 static inline void 12 spin_lock_irqsave(struct mutex *mtxp, __unused unsigned long flags) 13 { 14 mtx_enter(mtxp); 15 } 16 static inline void 17 spin_lock_irqsave_nested(struct mutex *mtxp, __unused unsigned long flags, 18 __unused int subclass) 19 { 20 mtx_enter(mtxp); 21 } 22 static inline void 23 spin_unlock_irqrestore(struct mutex *mtxp, __unused unsigned long flags) 24 { 25 mtx_leave(mtxp); 26 } 27 28 #define spin_lock(mtxp) mtx_enter(mtxp) 29 #define spin_lock_nested(mtxp, l) mtx_enter(mtxp) 30 #define spin_unlock(mtxp) mtx_leave(mtxp) 31 #define spin_lock_irq(mtxp) mtx_enter(mtxp) 32 #define spin_unlock_irq(mtxp) mtx_leave(mtxp) 33 #define assert_spin_locked(mtxp) MUTEX_ASSERT_LOCKED(mtxp) 34 #define spin_trylock_irq(mtxp) mtx_enter_try(mtxp) 35 36 #define down_read(rwl) rw_enter_read(rwl) 37 #define down_read_trylock(rwl) (rw_enter(rwl, RW_READ | RW_NOSLEEP) == 0) 38 #define up_read(rwl) rw_exit_read(rwl) 39 #define down_write(rwl) rw_enter_write(rwl) 40 #define up_write(rwl) rw_exit_write(rwl) 41 #define downgrade_write(rwl) rw_enter(rwl, RW_DOWNGRADE) 42 #define read_lock(rwl) rw_enter_read(rwl) 43 #define read_unlock(rwl) rw_exit_read(rwl) 44 #define write_lock(rwl) rw_enter_write(rwl) 45 #define write_unlock(rwl) rw_exit_write(rwl) 46 47 #endif 48