xref: /openbsd-src/sys/arch/powerpc/powerpc/lock_machdep.c (revision 1c6319c482fd15de41f69f61b08fa1b926df6e1a)
1 /*	$OpenBSD: lock_machdep.c,v 1.12 2024/04/03 19:30:59 gkoehler Exp $	*/
2 
3 /*
4  * Copyright (c) 2021 George Koehler <gkoehler@openbsd.org>
5  * Copyright (c) 2007 Artur Grabowski <art@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/atomic.h>
24 
25 #include <machine/cpu.h>
26 
27 #include <ddb/db_output.h>
28 
29 /*
30  * If __ppc_lock() crosses a page boundary in the kernel text, then it
31  * may cause a page fault (on G5 with ppc_nobat), and pte_spill_r()
32  * would recursively call __ppc_lock().  The lock must be in a valid
33  * state when the page fault happens.  We acquire or release the lock
34  * with a 32-bit atomic write to mpl_owner, so the lock is always in a
35  * valid state, before or after the write.
36  *
37  * Acquired the lock:	mpl->mpl_cpu == curcpu()
38  * Released the lock:	mpl->mpl_cpu == NULL
39  */
40 
41 #if defined(MP_LOCKDEBUG)
42 #ifndef DDB
43 #error "MP_LOCKDEBUG requires DDB"
44 #endif
45 
46 /* CPU-dependent timing, needs this to be settable from ddb. */
47 extern int __mp_lock_spinout;
48 #endif
49 
50 static __inline void
__ppc_lock_spin(struct __ppc_lock * mpl)51 __ppc_lock_spin(struct __ppc_lock *mpl)
52 {
53 #ifndef MP_LOCKDEBUG
54 	while (mpl->mpl_cpu != NULL)
55 		CPU_BUSY_CYCLE();
56 #else
57 	int nticks = __mp_lock_spinout;
58 
59 	while (mpl->mpl_cpu != NULL && --nticks > 0)
60 		CPU_BUSY_CYCLE();
61 
62 	if (nticks == 0) {
63 		db_printf("__ppc_lock(%p): lock spun out\n", mpl);
64 		db_enter();
65 	}
66 #endif
67 }
68 
69 void
__ppc_lock(struct __ppc_lock * mpl)70 __ppc_lock(struct __ppc_lock *mpl)
71 {
72 	/*
73 	 * Please notice that mpl_count stays at 0 for the first lock.
74 	 * A page fault might recursively call __ppc_lock() after we
75 	 * set mpl_cpu, but before we can increase mpl_count.
76 	 *
77 	 * After we acquire the lock, we need a "bc; isync" memory
78 	 * barrier, but we might not reach the barrier before the next
79 	 * page fault.  Then the fault's recursive __ppc_lock() must
80 	 * have a barrier.  membar_enter() is just "isync" and must
81 	 * come after a conditional branch for holding the lock.
82 	 */
83 
84 	while (1) {
85 		struct cpu_info *owner = mpl->mpl_cpu;
86 		struct cpu_info *ci = curcpu();
87 
88 		if (owner == NULL) {
89 			/* Try to acquire the lock. */
90 			if (atomic_cas_ptr(&mpl->mpl_cpu, NULL, ci) == NULL) {
91 				membar_enter();
92 				break;
93 			}
94 		} else if (owner == ci) {
95 			/* We hold the lock, but might need a barrier. */
96 			membar_enter();
97 			mpl->mpl_count++;
98 			break;
99 		}
100 
101 		__ppc_lock_spin(mpl);
102 	}
103 }
104 
105 void
__ppc_unlock(struct __ppc_lock * mpl)106 __ppc_unlock(struct __ppc_lock *mpl)
107 {
108 #ifdef MP_LOCKDEBUG
109 	if (mpl->mpl_cpu != curcpu()) {
110 		db_printf("__ppc_unlock(%p): not held lock\n", mpl);
111 		db_enter();
112 	}
113 #endif
114 
115 	/*
116 	 * If we get a page fault after membar_exit() and before
117 	 * releasing the lock, then the recursive call to
118 	 * __ppc_unlock() must also membar_exit().
119 	 */
120 	if (mpl->mpl_count == 0) {
121 		membar_exit();
122 		mpl->mpl_cpu = NULL;	/* Release the lock. */
123 	} else {
124 		membar_exit();
125 		mpl->mpl_count--;
126 	}
127 }
128